9 template<typename SPACE>
10 struct Virt_addr { typedef Page_number Type; };
13 struct Virt_addr<Obj_space> { typedef Obj_space::Addr Type; };
16 template< typename SPACE, typename M >
18 Mword v_delete(M &m, Mword flush_rights)
20 SPACE* child_space = 0;
21 check (m->space()->lookup_space(&child_space));
23 return child_space->v_delete(m.page(), m.size(), flush_rights);
29 template< typename SPACE >
33 typedef Page_number Addr;
34 typedef Page_count Size;
36 static Addr get_addr(L4_fpage const &fp);
37 static void constraint(Addr &snd_addr, Size &snd_size,
38 Addr &rcv_addr, Size const &rcv_size,
39 Addr const &hot_spot);
40 static bool match(L4_fpage const &from, L4_fpage const &to);
41 static bool free_object(typename SPACE::Phys_addr o,
42 typename SPACE::Reap_list **reap_list);
53 Reap_list() : _h(0), _t(&_h) {}
54 Kobject ***list() { return &_t; }
60 Mword v_delete<Obj_space>(Kobject_mapdb::Iterator &m, Mword flush_rights)
62 Obj_space::Entry *c = static_cast<Obj_space::Entry*>(*m);
66 if (flush_rights & L4_fpage::R)
69 c->del_rights(flush_rights & L4_fpage::WX);
75 //------------------------------------------------------------------------
87 IMPLEMENT template<typename SPACE>
90 Map_traits<SPACE>::match(L4_fpage const &, L4_fpage const &)
93 IMPLEMENT template<typename SPACE>
96 Map_traits<SPACE>::free_object(typename SPACE::Phys_addr,
97 typename SPACE::Reap_list **)
101 PUBLIC template< typename SPACE >
104 Map_traits<SPACE>::attribs(L4_msg_item /*control*/, L4_fpage const &/*fp*/,
105 unsigned long *del_attr, unsigned long *set_attr)
106 { *del_attr = 0; *set_attr = 0; }
108 PUBLIC template< typename SPACE >
111 Map_traits<SPACE>::apply_attribs(unsigned long attribs,
112 typename SPACE::Phys_addr &,
113 unsigned long set_attr, unsigned long del_attr)
114 { return (attribs & ~del_attr) | set_attr; }
116 PRIVATE template<typename SPACE>
119 Map_traits<SPACE>::identity_constraint(Addr &snd_addr, Size &snd_size,
120 Addr rcv_addr, Size rcv_size)
122 if (rcv_addr > snd_addr)
124 if (rcv_addr - snd_addr < snd_size)
125 snd_size -= rcv_addr - snd_addr;
131 if (snd_size > rcv_size)
135 PRIVATE template<typename SPACE>
138 Map_traits<SPACE>::free_constraint(Addr &snd_addr, Size &snd_size,
139 Addr &rcv_addr, Size rcv_size,
140 Addr const &hot_spot)
142 if (rcv_size >= snd_size)
143 rcv_addr += hot_spot.offset(rcv_size).trunc(snd_size);
146 snd_addr += hot_spot.offset(snd_size).trunc(rcv_size);
148 // reduce size of address range
152 IMPLEMENT template<typename SPACE>
155 Map_traits<SPACE>::constraint(Addr &snd_addr, Size &snd_size,
156 Addr &rcv_addr, Size const &rcv_size,
157 Addr const &hot_spot)
159 if (SPACE::Identity_map)
160 identity_constraint(snd_addr, snd_size, rcv_addr, rcv_size);
162 free_constraint(snd_addr, snd_size, rcv_addr, rcv_size, hot_spot);
167 //-------------------------------------------------------------------------
173 Map_traits<Io_space>::match(L4_fpage const &from, L4_fpage const &to)
174 { return from.is_iopage() && (to.is_iopage() || to.is_all_spaces()); }
178 Map_traits<Io_space>::Addr
179 Map_traits<Io_space>::get_addr(L4_fpage const &fp)
180 { return Addr(fp.io_address()); }
184 //-------------------------------------------------------------------------
190 Map_traits<Mem_space>::match(L4_fpage const &from, L4_fpage const &to)
192 return from.is_mempage() && (to.is_all_spaces() || to.is_mempage());
197 Map_traits<Mem_space>::Addr
198 Map_traits<Mem_space>::get_addr(L4_fpage const &fp)
199 { return Addr(fp.mem_address()); }
204 Map_traits<Mem_space>::attribs(L4_msg_item control, L4_fpage const &fp,
205 unsigned long *del_attr, unsigned long *set_attr)
207 *del_attr = (fp.rights() & L4_fpage::W) ? 0 : Mem_space::Page_writable;
208 short cache = control.attr() & 0x70;
210 if (cache & L4_msg_item::Caching_opt)
212 *del_attr |= Page::Cache_mask;
214 if (cache == L4_msg_item::Cached)
215 *set_attr = Page::CACHEABLE;
216 else if (cache == L4_msg_item::Buffered)
217 *set_attr = Page::BUFFERED;
219 *set_attr = Page::NONCACHEABLE;
229 Map_traits<Obj_space>::match(L4_fpage const &from, L4_fpage const &to)
230 { return from.is_objpage() && (to.is_objpage() || to.is_all_spaces()); }
235 Map_traits<Obj_space>::Addr
236 Map_traits<Obj_space>::get_addr(L4_fpage const &fp)
237 { return Addr(fp.obj_index()); }
242 Map_traits<Obj_space>::free_object(Obj_space::Phys_addr o,
243 Obj_space::Reap_list **reap_list)
245 if (o->map_root()->no_mappings())
247 o->initiate_deletion(reap_list);
257 Map_traits<Obj_space>::attribs(L4_msg_item control, L4_fpage const &fp,
258 unsigned long *del_attr, unsigned long *set_attr)
261 *del_attr = (~(fp.rights() | (L4_msg_item::C_weak_ref ^ control.attr())));
267 Map_traits<Obj_space>::apply_attribs(unsigned long attribs,
268 Obj_space::Phys_addr &a,
269 unsigned long set_attr,
270 unsigned long del_attr)
272 if (attribs & del_attr & L4_msg_item::C_obj_specific_rights)
273 a = a->downgrade(del_attr);
275 return (attribs & ~del_attr) | set_attr;
279 /** Flexpage mapping.
280 divert to mem_map (for memory fpages) or io_map (for IO fpages)
281 @param from source address space
282 @param fp_from flexpage descriptor for virtual-address space range
283 in source address space
284 @param to destination address space
285 @param fp_to flexpage descriptor for virtual-address space range
286 in destination address space
287 @param offs sender-specified offset into destination flexpage
288 @param grant if set, grant the fpage, otherwise map
289 @pre page_aligned(offs)
291 L4_fpage from_fp, to_fp;
292 Mword control;code that describes the status of the operation
294 // Don't inline -- it eats too much stack.
295 // inline NEEDS ["config.h", io_map]
297 fpage_map(Space *from, L4_fpage fp_from, Space *to,
298 L4_fpage fp_to, L4_msg_item control, Reap_list *r)
300 if (Map_traits<Mem_space>::match(fp_from, fp_to))
301 return mem_map(from, fp_from, to, fp_to, control);
303 #ifdef CONFIG_IO_PROT
304 if (Map_traits<Io_space>::match(fp_from, fp_to))
305 return io_map(from, fp_from, to, fp_to, control);
308 if (Map_traits<Obj_space>::match(fp_from, fp_to))
309 return obj_map(from, fp_from, to, fp_to, control, r->list());
311 return L4_error::None;
314 /** Flexpage unmapping.
315 divert to mem_fpage_unmap (for memory fpages) or
316 io_fpage_unmap (for IO fpages)
317 @param space address space that should be flushed
318 @param fp flexpage descriptor of address-space range that should
320 @param me_too If false, only flush recursive mappings. If true,
321 additionally flush the region in the given address space.
322 @param flush_mode determines which access privileges to remove.
323 @return combined (bit-ORed) access status of unmapped physical pages
325 // Don't inline -- it eats too much stack.
326 // inline NEEDS ["config.h", io_fpage_unmap]
328 fpage_unmap(Space *space, L4_fpage fp, L4_map_mask mask, Kobject ***rl)
332 if (Config::enable_io_protection && (fp.is_iopage() || fp.is_all_spaces()))
333 ret |= io_fpage_unmap(space, fp, mask);
335 if (fp.is_objpage() || fp.is_all_spaces())
336 ret |= obj_fpage_unmap(space, fp, mask, rl);
338 if (fp.is_mempage() || fp.is_all_spaces())
339 ret |= mem_fpage_unmap(space, fp, mask);
348 if (EXPECT_TRUE(!_h))
351 for (Kobject *reap = _h; reap; reap = reap->_next_to_reap)
352 reap->destroy(list());
354 current()->rcu_wait();
356 for (Kobject *reap = _h; reap;)
359 reap = reap->_next_to_reap;
369 Reap_list::~Reap_list()
372 //////////////////////////////////////////////////////////////////////
374 // Utility functions for all address-space types
380 template <typename SPACE, typename MAPDB>
383 SPACE* from, Space *from_id,
384 Page_number _snd_addr,
386 SPACE* to, Space *to_id,
387 Page_number _rcv_addr,
388 bool grant, unsigned attrib_add, unsigned attrib_del,
389 typename SPACE::Reap_list **reap_list = 0)
393 PAGE_SIZE = SPACE::Map_page_size,
394 PAGE_MASK = ~(PAGE_SIZE - 1),
395 SUPERPAGE_SIZE = SPACE::Map_superpage_size,
396 SUPERPAGE_MASK = ~((SUPERPAGE_SIZE - 1) >> SPACE::Page_shift)
399 typedef typename SPACE::Size Size;
400 typedef typename MAPDB::Mapping Mapping;
401 typedef typename MAPDB::Frame Frame;
402 typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
403 typedef Map_traits<SPACE> Mt;
405 L4_error condition = L4_error::None;
407 bool no_page_mapped = true;
408 Vaddr rcv_addr(_rcv_addr);
409 Vaddr snd_addr(_snd_addr);
410 const Vaddr rcv_start = rcv_addr;
411 const Page_count rcv_size = snd_size;
412 // We now loop through all the pages we want to send from the
413 // sender's address space, looking up appropriate parent mappings in
414 // the mapping data base, and entering a child mapping and a page
415 // table entry for the receiver.
417 // Special care is taken for 4MB page table entries we find in the
418 // sender's address space: If what we will create in the receiver is
419 // not a 4MB-mapping, too, we have to find the correct parent
420 // mapping for the new mapping database entry: This is the sigma0
421 // mapping for all addresses != the 4MB page base address.
423 // When overmapping an existing page, flush the interfering
424 // physical page in the receiver, even if it is larger than the
427 // verify sender and receiver virtual addresses are still within
428 // bounds; if not, bail out. Sigma0 may send from any address (even
429 // from an out-of-bound one)
431 bool need_tlb_flush = false;
432 bool need_xcpu_tlb_flush = false;
433 Page_number const to_max = to->map_max_address();
434 Page_number const from_max = from->map_max_address();
435 Size const from_superpage_size(from->superpage_size());
436 bool const has_superpages = to->has_superpages();
439 snd_size // pages left for sending?
441 && snd_addr < from_max;
447 // Reset the increment size to one page.
448 size = Size(PAGE_SIZE);
450 // First, look up the page table entries in the sender and
451 // receiver address spaces.
454 // make gcc happy, initialized later anyway
455 typename SPACE::Phys_addr s_phys;
457 unsigned s_attribs = 0;
459 // Sigma0 special case: Sigma0 doesn't need to have a
460 // fully-constructed page table, and it can fabricate mappings
461 // for all physical addresses.:435
462 if (EXPECT_FALSE(! from->v_fabricate(snd_addr, &s_phys,
463 &s_size, &s_attribs)))
466 // We have a mapping in the sender's address space.
467 no_page_mapped = false;
470 typename SPACE::Phys_addr r_phys;
474 // Compute attributes for to-be-inserted frame
475 typename SPACE::Phys_addr i_phys = s_phys;
476 Size i_size = s_size;
477 bool const rcv_page_mapped
478 = to->v_lookup(rcv_addr, &r_phys, &r_size, &r_attribs);
479 // See if we have to degrade to non-superpage mappings
480 if (has_superpages && i_size == from_superpage_size)
482 if (i_size > snd_size
483 // want to send less that a superpage?
484 || i_size > r_size // not enough space for superpage map?
485 || snd_addr.offset(Size(SUPERPAGE_SIZE)) // snd page not aligned?
486 || rcv_addr.offset(Size(SUPERPAGE_SIZE)) // rcv page not aligned?
487 || (rcv_addr + from_superpage_size > rcv_start + rcv_size))
488 // rcv area to small?
490 // We map a 4K mapping from a 4MB mapping
491 i_size = Size(PAGE_SIZE);
493 if (Size super_offset = snd_addr.offset(Size(SUPERPAGE_SIZE)))
495 // Just use OR here because i_phys may already contain
496 // the offset. (As is on ARM)
497 i_phys = SPACE::subpage_address(i_phys, super_offset);
502 WARN("XXX Can't GRANT page from superpage (%p: "L4_PTR_FMT
503 " -> %p: "L4_PTR_FMT"), demoting to MAP\n",
504 from_id, snd_addr.value(), to_id, rcv_addr.value());
510 // Also, look up mapping database entry. Depending on whether
511 // we can overmap, either look up the destination mapping first
512 // (and compute the sender mapping from it) or look up the
513 // sender mapping directly.
514 Mapping* sender_mapping = 0;
516 bool doing_upgrade = false;
520 // We have something mapped.
522 // Check if we can upgrade mapping. Otherwise, flush target
524 if (! grant // Grant currently always flushes
525 && r_size <= i_size // Rcv frame in snd frame
526 && SPACE::page_address(r_phys, i_size) == i_phys
527 && (sender_mapping = mapdb->check_for_upgrade(r_phys, from_id, snd_addr, to_id, rcv_addr, &mapdb_frame)))
528 doing_upgrade = true;
530 if (! sender_mapping) // Need flush
532 unmap(mapdb, to, to_id, rcv_addr.trunc(r_size), r_size,
533 L4_fpage::RWX, L4_map_mask::full(), reap_list);
537 if (! sender_mapping && mapdb->valid_address(s_phys))
539 if (EXPECT_FALSE(! mapdb->lookup(from_id,
540 snd_addr.trunc(s_size), s_phys,
541 &sender_mapping, &mapdb_frame)))
542 continue; // someone deleted this mapping in the meantime
545 // At this point, we have a lookup for the sender frame (s_phys,
546 // s_size, s_attribs), the max. size of the receiver frame
547 // (r_phys), the sender_mapping, and whether a receiver mapping
548 // already exists (doing_upgrade).
551 = Mt::apply_attribs(s_attribs, i_phys, attrib_add, attrib_del);
553 // Loop increment is size of insertion
556 // Do the actual insertion.
557 typename SPACE::Status status
558 = to->v_insert(i_phys, rcv_addr, i_size, i_attribs);
562 case SPACE::Insert_warn_exists:
563 case SPACE::Insert_warn_attrib_upgrade:
564 case SPACE::Insert_ok:
566 assert (mapdb->valid_address(s_phys) || status == SPACE::Insert_ok);
567 // Never doing upgrades for mapdb-unmanaged memory
571 if (mapdb->valid_address(s_phys))
572 if (EXPECT_FALSE(!mapdb->grant(mapdb_frame, sender_mapping,
575 // Error -- remove mapping again.
576 to->v_delete(rcv_addr, i_size);
577 // may fail due to quota limits
578 condition = L4_error::Map_failed;
582 from->v_delete(snd_addr.trunc(s_size), s_size);
583 need_tlb_flush = true;
585 else if (status == SPACE::Insert_ok)
587 assert (!doing_upgrade);
589 if (mapdb->valid_address(s_phys)
590 && !mapdb->insert(mapdb_frame, sender_mapping,
594 // Error -- remove mapping again.
595 to->v_delete(rcv_addr, i_size);
597 // XXX This is not race-free as the mapping could have
598 // been used in the mean-time, but we do not care.
599 condition = L4_error::Map_failed;
604 if (SPACE::Need_xcpu_tlb_flush && SPACE::Need_insert_tlb_flush)
605 need_xcpu_tlb_flush = true;
609 case SPACE::Insert_err_nomem:
610 condition = L4_error::Map_failed;
613 case SPACE::Insert_err_exists:
614 WARN("map (%s) skipping area (%p/%lx): "L4_PTR_FMT
615 " -> %p/%lx: "L4_PTR_FMT"(%lx)", SPACE::name,
616 from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(),
617 to_id, Kobject_dbg::pointer_to_id(to_id), rcv_addr.value(), i_size.value());
618 // Do not flag an error here -- because according to L4
619 // semantics, it isn't.
624 mapdb->free(mapdb_frame);
633 if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
634 Context::xcpu_tlb_flush(false, to, from);
636 if (EXPECT_FALSE(no_page_mapped))
638 WARN("nothing mapped: (%s) from [%p/%lx]: "L4_PTR_FMT
639 " size: "L4_PTR_FMT" to [%p/%lx]\n", SPACE::name,
640 from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(), rcv_size.value(),
641 to_id, Kobject_dbg::pointer_to_id(to_id));
648 template <typename SPACE, typename MAPDB>
650 unmap(MAPDB* mapdb, SPACE* space, Space *space_id,
651 Page_number start, Page_count size, unsigned char rights,
652 L4_map_mask mask, typename SPACE::Reap_list **reap_list)
655 typedef typename SPACE::Size Size;
656 typedef typename SPACE::Addr Addr;
657 typedef typename MAPDB::Mapping Mapping;
658 typedef typename MAPDB::Iterator Iterator;
659 typedef typename MAPDB::Frame Frame;
660 typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
662 bool me_too = mask.self_unmap();
664 Mword flushed_rights = 0;
665 Page_number end = start + size;
666 Page_number const map_max = space->map_max_address();
668 // make gcc happy, initialized later anyway
669 typename SPACE::Phys_addr phys;
670 Page_count phys_size;
673 Mword const flush_rights = SPACE::xlate_flush(rights);
674 bool const full_flush = SPACE::is_full_flush(rights);
675 bool need_tlb_flush = false;
676 bool need_xcpu_tlb_flush = false;
678 // iterate over all pages in "space"'s page table that are mapped
679 // into the specified region
680 for (Vaddr address(start);
681 address < end && address < map_max;
682 address = page_address + phys_size)
684 // for amd64-mem_space's this will skip the hole in the address space
685 address = SPACE::canonize(address);
691 have_page = space->v_fabricate(address, &phys, &ps);
695 page_address = address.trunc(phys_size);
697 // phys_size and page_address have now been set up, allowing the
698 // use of continue (which evaluates the for-loop's iteration
699 // expression involving these to variables).
706 assert (address == page_address
707 || phys_size == Size(SPACE::Map_superpage_size));
709 // Rewind flush address to page address. We always flush
710 // the whole page, even if it is larger than the specified
712 address = page_address;
713 if (end < address + phys_size)
714 end = address + phys_size;
717 // all pages shall be handled by our mapping data base
718 assert (mapdb->valid_address(phys));
723 if (! mapdb->lookup(space_id, page_address, phys,
724 &mapping, &mapdb_frame))
725 // someone else unmapped faster
728 Mword page_rights = 0;
730 // Delete from this address space
734 space->v_delete(address, phys_size, flush_rights);
736 need_tlb_flush = true;
737 need_xcpu_tlb_flush = true;
740 // now delete from the other address spaces
741 for (Iterator m(mapdb_frame, mapping, address, end);
745 page_rights |= Mu::v_delete<SPACE>(m, flush_rights);
746 need_xcpu_tlb_flush = true;
749 flushed_rights |= page_rights;
751 // Store access attributes for later retrieval
752 save_access_attribs(mapdb, mapdb_frame, mapping,
753 space, page_rights, page_address, phys, phys_size,
757 mapdb->flush(mapdb_frame, mapping, mask, address, end);
760 Map_traits<SPACE>::free_object(phys, reap_list);
762 mapdb->free(mapdb_frame);
768 if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
769 Context::xcpu_tlb_flush(true, space, 0);
771 return SPACE::xlate_flush_result(flushed_rights);
774 //----------------------------------------------------------------------------
775 IMPLEMENTATION[!io || ux]:
777 // Empty dummy functions when I/O protection is disabled
781 io_map(Space *, L4_fpage const &, Space *, L4_fpage const &, L4_msg_item)
783 return L4_error::None;
788 io_fpage_unmap(Space * /*space*/, L4_fpage const &/*fp*/, L4_map_mask)