3 #include "assert_opt.h"
10 template<typename SPACE>
11 struct Virt_addr { typedef Page_number Type; };
14 struct Virt_addr<Obj_space> { typedef Obj_space::Addr Type; };
17 template< typename SPACE, typename M >
19 Mword v_delete(M &m, Mword flush_rights, bool full_flush)
21 SPACE* child_space = m->space();
22 assert_opt (child_space);
23 Mword res = child_space->v_delete(m.page(), m.size(), flush_rights);
25 assert_kdb (full_flush != child_space->v_lookup(m.page()));
32 template< typename SPACE >
36 typedef Page_number Addr;
37 typedef Page_count Size;
39 static Addr get_addr(L4_fpage const &fp);
40 static void constraint(Addr &snd_addr, Size &snd_size,
41 Addr &rcv_addr, Size const &rcv_size,
42 Addr const &hot_spot);
43 static bool match(L4_fpage const &from, L4_fpage const &to);
44 static bool free_object(typename SPACE::Phys_addr o,
45 typename SPACE::Reap_list **reap_list);
56 Reap_list() : _h(0), _t(&_h) {}
57 Kobject ***list() { return &_t; }
63 Mword v_delete<Obj_space>(Kobject_mapdb::Iterator &m, Mword flush_rights, bool /*full_flush*/)
65 Obj_space::Entry *c = static_cast<Obj_space::Entry*>(*m);
69 if (flush_rights & L4_fpage::R)
72 c->del_rights(flush_rights & L4_fpage::WX);
78 //------------------------------------------------------------------------
90 IMPLEMENT template<typename SPACE>
93 Map_traits<SPACE>::match(L4_fpage const &, L4_fpage const &)
96 IMPLEMENT template<typename SPACE>
99 Map_traits<SPACE>::free_object(typename SPACE::Phys_addr,
100 typename SPACE::Reap_list **)
104 PUBLIC template< typename SPACE >
107 Map_traits<SPACE>::attribs(L4_msg_item /*control*/, L4_fpage const &/*fp*/,
108 unsigned long *del_attr, unsigned long *set_attr)
109 { *del_attr = 0; *set_attr = 0; }
111 PUBLIC template< typename SPACE >
114 Map_traits<SPACE>::apply_attribs(unsigned long attribs,
115 typename SPACE::Phys_addr &,
116 unsigned long set_attr, unsigned long del_attr)
117 { return (attribs & ~del_attr) | set_attr; }
119 PRIVATE template<typename SPACE>
122 Map_traits<SPACE>::identity_constraint(Addr &snd_addr, Size &snd_size,
123 Addr rcv_addr, Size rcv_size)
125 if (rcv_addr > snd_addr)
127 if (rcv_addr - snd_addr < snd_size)
128 snd_size -= rcv_addr - snd_addr;
134 if (snd_size > rcv_size)
138 PRIVATE template<typename SPACE>
141 Map_traits<SPACE>::free_constraint(Addr &snd_addr, Size &snd_size,
142 Addr &rcv_addr, Size rcv_size,
143 Addr const &hot_spot)
145 if (rcv_size >= snd_size)
146 rcv_addr += hot_spot.offset(rcv_size).trunc(snd_size);
149 snd_addr += hot_spot.offset(snd_size).trunc(rcv_size);
151 // reduce size of address range
155 IMPLEMENT template<typename SPACE>
158 Map_traits<SPACE>::constraint(Addr &snd_addr, Size &snd_size,
159 Addr &rcv_addr, Size const &rcv_size,
160 Addr const &hot_spot)
162 if (SPACE::Identity_map)
163 identity_constraint(snd_addr, snd_size, rcv_addr, rcv_size);
165 free_constraint(snd_addr, snd_size, rcv_addr, rcv_size, hot_spot);
170 //-------------------------------------------------------------------------
176 Map_traits<Io_space>::match(L4_fpage const &from, L4_fpage const &to)
177 { return from.is_iopage() && (to.is_iopage() || to.is_all_spaces()); }
181 Map_traits<Io_space>::Addr
182 Map_traits<Io_space>::get_addr(L4_fpage const &fp)
183 { return Addr(fp.io_address()); }
187 //-------------------------------------------------------------------------
193 Map_traits<Mem_space>::match(L4_fpage const &from, L4_fpage const &to)
195 return from.is_mempage() && (to.is_all_spaces() || to.is_mempage());
200 Map_traits<Mem_space>::Addr
201 Map_traits<Mem_space>::get_addr(L4_fpage const &fp)
202 { return Addr(fp.mem_address()); }
207 Map_traits<Mem_space>::attribs(L4_msg_item control, L4_fpage const &fp,
208 unsigned long *del_attr, unsigned long *set_attr)
210 *del_attr = (fp.rights() & L4_fpage::W) ? 0 : Mem_space::Page_writable;
211 short cache = control.attr() & 0x70;
213 if (cache & L4_msg_item::Caching_opt)
215 *del_attr |= Page::Cache_mask;
217 if (cache == L4_msg_item::Cached)
218 *set_attr = Page::CACHEABLE;
219 else if (cache == L4_msg_item::Buffered)
220 *set_attr = Page::BUFFERED;
222 *set_attr = Page::NONCACHEABLE;
232 Map_traits<Obj_space>::match(L4_fpage const &from, L4_fpage const &to)
233 { return from.is_objpage() && (to.is_objpage() || to.is_all_spaces()); }
238 Map_traits<Obj_space>::Addr
239 Map_traits<Obj_space>::get_addr(L4_fpage const &fp)
240 { return Addr(fp.obj_index()); }
245 Map_traits<Obj_space>::free_object(Obj_space::Phys_addr o,
246 Obj_space::Reap_list **reap_list)
248 if (o->map_root()->no_mappings())
250 o->initiate_deletion(reap_list);
260 Map_traits<Obj_space>::attribs(L4_msg_item control, L4_fpage const &fp,
261 unsigned long *del_attr, unsigned long *set_attr)
264 *del_attr = (~(fp.rights() | (L4_msg_item::C_weak_ref ^ control.attr())));
270 Map_traits<Obj_space>::apply_attribs(unsigned long attribs,
271 Obj_space::Phys_addr &a,
272 unsigned long set_attr,
273 unsigned long del_attr)
275 if (attribs & del_attr & L4_msg_item::C_obj_specific_rights)
276 a = a->downgrade(del_attr);
278 return (attribs & ~del_attr) | set_attr;
282 /** Flexpage mapping.
283 divert to mem_map (for memory fpages) or io_map (for IO fpages)
284 @param from source address space
285 @param fp_from flexpage descriptor for virtual-address space range
286 in source address space
287 @param to destination address space
288 @param fp_to flexpage descriptor for virtual-address space range
289 in destination address space
290 @param offs sender-specified offset into destination flexpage
291 @param grant if set, grant the fpage, otherwise map
292 @pre page_aligned(offs)
294 L4_fpage from_fp, to_fp;
295 Mword control;code that describes the status of the operation
297 // Don't inline -- it eats too much stack.
298 // inline NEEDS ["config.h", io_map]
300 fpage_map(Space *from, L4_fpage fp_from, Space *to,
301 L4_fpage fp_to, L4_msg_item control, Reap_list *r)
303 if (Map_traits<Mem_space>::match(fp_from, fp_to))
304 return mem_map(from, fp_from, to, fp_to, control);
307 if (Map_traits<Io_space>::match(fp_from, fp_to))
308 return io_map(from, fp_from, to, fp_to, control);
311 if (Map_traits<Obj_space>::match(fp_from, fp_to))
312 return obj_map(from, fp_from, to, fp_to, control, r->list());
314 return L4_error::None;
317 /** Flexpage unmapping.
318 divert to mem_fpage_unmap (for memory fpages) or
319 io_fpage_unmap (for IO fpages)
320 @param space address space that should be flushed
321 @param fp flexpage descriptor of address-space range that should
323 @param me_too If false, only flush recursive mappings. If true,
324 additionally flush the region in the given address space.
325 @param flush_mode determines which access privileges to remove.
326 @return combined (bit-ORed) access status of unmapped physical pages
328 // Don't inline -- it eats too much stack.
329 // inline NEEDS ["config.h", io_fpage_unmap]
331 fpage_unmap(Space *space, L4_fpage fp, L4_map_mask mask, Kobject ***rl)
335 if (fp.is_iopage() || fp.is_all_spaces())
336 ret |= io_fpage_unmap(space, fp, mask);
338 if (fp.is_objpage() || fp.is_all_spaces())
339 ret |= obj_fpage_unmap(space, fp, mask, rl);
341 if (fp.is_mempage() || fp.is_all_spaces())
342 ret |= mem_fpage_unmap(space, fp, mask);
351 if (EXPECT_TRUE(!_h))
354 for (Kobject *reap = _h; reap; reap = reap->_next_to_reap)
355 reap->destroy(list());
357 current()->rcu_wait();
359 for (Kobject *reap = _h; reap;)
362 reap = reap->_next_to_reap;
372 Reap_list::~Reap_list()
375 //////////////////////////////////////////////////////////////////////
377 // Utility functions for all address-space types
383 template <typename SPACE, typename MAPDB>
386 SPACE* from, Space *from_id,
387 Page_number _snd_addr,
389 SPACE* to, Space *to_id,
390 Page_number _rcv_addr,
391 bool grant, unsigned attrib_add, unsigned attrib_del,
392 typename SPACE::Reap_list **reap_list = 0)
396 PAGE_SIZE = SPACE::Map_page_size,
397 PAGE_MASK = ~(PAGE_SIZE - 1),
398 SUPERPAGE_SIZE = SPACE::Map_superpage_size,
399 SUPERPAGE_MASK = ~((SUPERPAGE_SIZE - 1) >> SPACE::Page_shift)
402 typedef typename SPACE::Size Size;
403 typedef typename MAPDB::Mapping Mapping;
404 typedef typename MAPDB::Frame Frame;
405 typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
406 typedef Map_traits<SPACE> Mt;
408 L4_error condition = L4_error::None;
410 // FIXME: make this debugging code optional
411 bool no_page_mapped = true;
413 Vaddr rcv_addr(_rcv_addr);
414 Vaddr snd_addr(_snd_addr);
415 const Vaddr rcv_start = rcv_addr;
416 const Page_count rcv_size = snd_size;
417 // We now loop through all the pages we want to send from the
418 // sender's address space, looking up appropriate parent mappings in
419 // the mapping data base, and entering a child mapping and a page
420 // table entry for the receiver.
422 // Special care is taken for 4MB page table entries we find in the
423 // sender's address space: If what we will create in the receiver is
424 // not a 4MB-mapping, too, we have to find the correct parent
425 // mapping for the new mapping database entry: This is the sigma0
426 // mapping for all addresses != the 4MB page base address.
428 // When overmapping an existing page, flush the interfering
429 // physical page in the receiver, even if it is larger than the
432 // verify sender and receiver virtual addresses are still within
433 // bounds; if not, bail out. Sigma0 may send from any address (even
434 // from an out-of-bound one)
436 bool need_tlb_flush = false;
437 bool need_xcpu_tlb_flush = false;
438 Page_number const to_max = to->map_max_address();
439 Page_number const from_max = from->map_max_address();
440 Size const from_superpage_size(from->superpage_size());
441 bool const has_superpages = to->has_superpages();
444 snd_size // pages left for sending?
446 && snd_addr < from_max;
452 // Reset the increment size to one page.
453 size = Size(PAGE_SIZE);
455 // First, look up the page table entries in the sender and
456 // receiver address spaces.
459 // make gcc happy, initialized later anyway
460 typename SPACE::Phys_addr s_phys;
462 unsigned s_attribs = 0;
464 // Sigma0 special case: Sigma0 doesn't need to have a
465 // fully-constructed page table, and it can fabricate mappings
466 // for all physical addresses.:435
467 if (EXPECT_FALSE(! from->v_fabricate(snd_addr, &s_phys,
468 &s_size, &s_attribs)))
471 // We have a mapping in the sender's address space.
472 // FIXME: make this debugging code optional
473 no_page_mapped = false;
477 // The may be used uninitialized warning for this variable is bogus
478 // the v_lookup function must initialize the value if it returns true.
479 typename SPACE::Phys_addr r_phys;
483 // Compute attributes for to-be-inserted frame
484 typename SPACE::Phys_addr i_phys = s_phys;
485 Size i_size = s_size;
486 bool const rcv_page_mapped
487 = to->v_lookup(rcv_addr, &r_phys, &r_size, &r_attribs);
488 // See if we have to degrade to non-superpage mappings
489 if (has_superpages && i_size == from_superpage_size)
491 if (i_size > snd_size
492 // want to send less than a superpage?
493 || i_size > r_size // not enough space for superpage map?
494 || snd_addr.offset(Size(SUPERPAGE_SIZE)) // snd page not aligned?
495 || rcv_addr.offset(Size(SUPERPAGE_SIZE)) // rcv page not aligned?
496 || (rcv_addr + from_superpage_size > rcv_start + rcv_size))
497 // rcv area to small?
499 // We map a 4K mapping from a 4MB mapping
500 i_size = Size(PAGE_SIZE);
502 if (Size super_offset = snd_addr.offset(Size(SUPERPAGE_SIZE)))
504 // Just use OR here because i_phys may already contain
505 // the offset. (As is on ARM)
506 i_phys = SPACE::subpage_address(i_phys, super_offset);
511 WARN("XXX Can't GRANT page from superpage (%p: " L4_PTR_FMT
512 " -> %p: " L4_PTR_FMT "), demoting to MAP\n",
513 from_id, snd_addr.value(), to_id, rcv_addr.value());
519 // Also, look up mapping database entry. Depending on whether
520 // we can overmap, either look up the destination mapping first
521 // (and compute the sender mapping from it) or look up the
522 // sender mapping directly.
523 Mapping* sender_mapping = 0;
524 // mapdb_frame will be initialized by the mapdb lookup function when
525 // it returns true, so don't care about "may be use uninitialized..."
527 bool doing_upgrade = false;
531 // We have something mapped.
533 // Check if we can upgrade mapping. Otherwise, flush target
535 if (! grant // Grant currently always flushes
536 && r_size <= i_size // Rcv frame in snd frame
537 && SPACE::page_address(r_phys, i_size) == i_phys
538 && (sender_mapping = mapdb->check_for_upgrade(r_phys, from_id, snd_addr, to_id, rcv_addr, &mapdb_frame)))
539 doing_upgrade = true;
541 if (! sender_mapping) // Need flush
543 unmap(mapdb, to, to_id, rcv_addr.trunc(r_size), r_size,
544 L4_fpage::RWX, L4_map_mask::full(), reap_list);
548 if (! sender_mapping && mapdb->valid_address(s_phys))
550 if (EXPECT_FALSE(! mapdb->lookup(from_id,
551 snd_addr.trunc(s_size), s_phys,
552 &sender_mapping, &mapdb_frame)))
553 continue; // someone deleted this mapping in the meantime
556 // from here mapdb_frame is always initialized, so ignore the warning
559 // At this point, we have a lookup for the sender frame (s_phys,
560 // s_size, s_attribs), the max. size of the receiver frame
561 // (r_phys), the sender_mapping, and whether a receiver mapping
562 // already exists (doing_upgrade).
565 = Mt::apply_attribs(s_attribs, i_phys, attrib_add, attrib_del);
567 // Loop increment is size of insertion
570 // Do the actual insertion.
571 typename SPACE::Status status
572 = to->v_insert(i_phys, rcv_addr, i_size, i_attribs);
576 case SPACE::Insert_warn_exists:
577 case SPACE::Insert_warn_attrib_upgrade:
578 case SPACE::Insert_ok:
580 assert_kdb (mapdb->valid_address(s_phys) || status == SPACE::Insert_ok);
581 // Never doing upgrades for mapdb-unmanaged memory
585 if (mapdb->valid_address(s_phys))
586 if (EXPECT_FALSE(!mapdb->grant(mapdb_frame, sender_mapping,
589 // Error -- remove mapping again.
590 to->v_delete(rcv_addr, i_size);
592 // may fail due to quota limits
593 condition = L4_error::Map_failed;
597 from->v_delete(snd_addr.trunc(s_size), s_size);
598 need_tlb_flush = true;
600 else if (status == SPACE::Insert_ok)
602 assert_kdb (!doing_upgrade);
604 if (mapdb->valid_address(s_phys)
605 && !mapdb->insert(mapdb_frame, sender_mapping,
609 // Error -- remove mapping again.
610 to->v_delete(rcv_addr, i_size);
612 // XXX This is not race-free as the mapping could have
613 // been used in the mean-time, but we do not care.
614 condition = L4_error::Map_failed;
619 if (SPACE::Need_xcpu_tlb_flush && SPACE::Need_insert_tlb_flush)
620 need_xcpu_tlb_flush = true;
624 case SPACE::Insert_err_nomem:
625 condition = L4_error::Map_failed;
628 case SPACE::Insert_err_exists:
629 WARN("map (%s) skipping area (%p/%lx): " L4_PTR_FMT
630 " -> %p/%lx: " L4_PTR_FMT "(%lx)", SPACE::name,
631 from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(),
632 to_id, Kobject_dbg::pointer_to_id(to_id), rcv_addr.value(), i_size.value());
633 // Do not flag an error here -- because according to L4
634 // semantics, it isn't.
639 mapdb->free(mapdb_frame);
648 if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
649 Context::xcpu_tlb_flush(false, to, from);
651 // FIXME: make this debugging code optional
652 if (EXPECT_FALSE(no_page_mapped))
654 WARN("nothing mapped: (%s) from [%p/%lx]: " L4_PTR_FMT
655 " size: " L4_PTR_FMT " to [%p/%lx]\n", SPACE::name,
656 from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(), rcv_size.value(),
657 to_id, Kobject_dbg::pointer_to_id(to_id));
664 template <typename SPACE, typename MAPDB>
666 unmap(MAPDB* mapdb, SPACE* space, Space *space_id,
667 Page_number start, Page_count size, unsigned char rights,
668 L4_map_mask mask, typename SPACE::Reap_list **reap_list)
671 typedef typename SPACE::Size Size;
672 typedef typename MAPDB::Mapping Mapping;
673 typedef typename MAPDB::Iterator Iterator;
674 typedef typename MAPDB::Frame Frame;
675 typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
677 bool me_too = mask.self_unmap();
679 Mword flushed_rights = 0;
680 Page_number end = start + size;
681 Page_number const map_max = space->map_max_address();
683 // make gcc happy, initialized later anyway
684 typename SPACE::Phys_addr phys;
685 Page_count phys_size;
688 Mword const flush_rights = SPACE::xlate_flush(rights);
689 bool const full_flush = SPACE::is_full_flush(rights);
690 bool need_tlb_flush = false;
691 bool need_xcpu_tlb_flush = false;
693 // iterate over all pages in "space"'s page table that are mapped
694 // into the specified region
695 for (Vaddr address(start);
696 address < end && address < map_max;
697 address = page_address + phys_size)
699 // for amd64-mem_space's this will skip the hole in the address space
700 address = SPACE::canonize(address);
706 have_page = space->v_fabricate(address, &phys, &ps);
710 page_address = address.trunc(phys_size);
712 // phys_size and page_address have now been set up, allowing the
713 // use of continue (which evaluates the for-loop's iteration
714 // expression involving these to variables).
721 assert_kdb (address == page_address
722 || phys_size == Size(SPACE::Map_superpage_size));
724 // Rewind flush address to page address. We always flush
725 // the whole page, even if it is larger than the specified
727 address = page_address;
728 if (end < address + phys_size)
729 end = address + phys_size;
732 // all pages shall be handled by our mapping data base
733 assert_kdb (mapdb->valid_address(phys));
738 if (! mapdb->lookup(space_id, page_address, phys,
739 &mapping, &mapdb_frame))
740 // someone else unmapped faster
743 Mword page_rights = 0;
745 // Delete from this address space
749 space->v_delete(address, phys_size, flush_rights);
751 // assert_kdb (full_flush != space->v_lookup(address));
752 need_tlb_flush = true;
753 need_xcpu_tlb_flush = true;
756 // now delete from the other address spaces
757 for (Iterator m(mapdb_frame, mapping, address, end);
761 page_rights |= Mu::v_delete<SPACE>(m, flush_rights, full_flush);
762 need_xcpu_tlb_flush = true;
765 flushed_rights |= page_rights;
767 // Store access attributes for later retrieval
768 save_access_attribs(mapdb, mapdb_frame, mapping,
769 space, page_rights, page_address, phys, phys_size,
773 mapdb->flush(mapdb_frame, mapping, mask, address, end);
776 Map_traits<SPACE>::free_object(phys, reap_list);
778 mapdb->free(mapdb_frame);
784 if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
785 Context::xcpu_tlb_flush(true, space, 0);
787 return SPACE::xlate_flush_result(flushed_rights);
790 //----------------------------------------------------------------------------
791 IMPLEMENTATION[!io || ux]:
793 // Empty dummy functions when I/O protection is disabled
795 void init_mapdb_io(Space *)
800 io_map(Space *, L4_fpage const &, Space *, L4_fpage const &, L4_msg_item)
802 return L4_error::None;
807 io_fpage_unmap(Space * /*space*/, L4_fpage const &/*fp*/, L4_map_mask)