]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/map_util.cpp
288d0ed727ca18fc327e29272489416e0eb1b4d4
[l4.git] / kernel / fiasco / src / kern / map_util.cpp
1 INTERFACE:
2
3 #include "assert_opt.h"
4 #include "l4_types.h"
5 #include "space.h"
6
7 class Mapdb;
8
9 namespace Mu {
10 template<typename SPACE>
11 struct Virt_addr { typedef Page_number Type; };
12
13 template<>
14 struct Virt_addr<Obj_space> { typedef Obj_space::Addr Type; };
15
16
17 template< typename SPACE, typename M >
18 inline
19 Mword v_delete(M &m, Mword flush_rights, bool full_flush)
20 {
21   SPACE* child_space = m->space();
22   assert_opt (child_space);
23   Mword res = child_space->v_delete(m.page(), m.size(), flush_rights);
24   (void) full_flush;
25   assert_kdb (full_flush != child_space->v_lookup(m.page()));
26   return res;
27 }
28
29
30 }
31
32 template< typename SPACE >
33 class Map_traits
34 {
35 public:
36   typedef Page_number Addr;
37   typedef Page_count Size;
38
39   static Addr get_addr(L4_fpage const &fp);
40   static void constraint(Addr &snd_addr, Size &snd_size,
41                          Addr &rcv_addr, Size const &rcv_size,
42                          Addr const &hot_spot);
43   static bool match(L4_fpage const &from, L4_fpage const &to);
44   static bool free_object(typename SPACE::Phys_addr o,
45                           typename SPACE::Reap_list **reap_list);
46 };
47
48
49 class Reap_list
50 {
51 private:
52   Kobject *_h;
53   Kobject **_t;
54
55 public:
56   Reap_list() : _h(0), _t(&_h) {}
57   Kobject ***list() { return &_t; }
58 };
59
60 namespace Mu {
61 template<>
62 inline
63 Mword v_delete<Obj_space>(Kobject_mapdb::Iterator &m, Mword flush_rights, bool /*full_flush*/)
64 {
65   Obj_space::Entry *c = static_cast<Obj_space::Entry*>(*m);
66
67   if (c->valid())
68     {
69       if (flush_rights & L4_fpage::R)
70         c->invalidate();
71       else
72         c->del_rights(flush_rights & L4_fpage::WX);
73     }
74   return 0;
75 }
76 }
77
78 //------------------------------------------------------------------------
79 IMPLEMENTATION:
80
81 #include <cassert>
82
83 #include "config.h"
84 #include "context.h"
85 #include "kobject.h"
86 #include "paging.h"
87 #include "warn.h"
88
89
90 IMPLEMENT template<typename SPACE>
91 inline
92 bool
93 Map_traits<SPACE>::match(L4_fpage const &, L4_fpage const &)
94 { return false; }
95
96 IMPLEMENT template<typename SPACE>
97 inline
98 bool
99 Map_traits<SPACE>::free_object(typename SPACE::Phys_addr,
100                                typename SPACE::Reap_list **)
101 { return false; }
102
103
104 PUBLIC template< typename SPACE >
105 static inline
106 void
107 Map_traits<SPACE>::attribs(L4_msg_item /*control*/, L4_fpage const &/*fp*/,
108                            unsigned long *del_attr, unsigned long *set_attr)
109 { *del_attr = 0; *set_attr = 0; }
110
111 PUBLIC template< typename SPACE >
112 static inline
113 unsigned long
114 Map_traits<SPACE>::apply_attribs(unsigned long attribs,
115                                  typename SPACE::Phys_addr &,
116                                  unsigned long set_attr, unsigned long del_attr)
117 { return (attribs & ~del_attr) | set_attr; }
118
119 PRIVATE template<typename SPACE>
120 static inline
121 void
122 Map_traits<SPACE>::identity_constraint(Addr &snd_addr, Size &snd_size,
123                                        Addr rcv_addr, Size rcv_size)
124 {
125   if (rcv_addr > snd_addr)
126     {
127       if (rcv_addr - snd_addr < snd_size)
128         snd_size -= rcv_addr - snd_addr;
129       else
130         snd_size = Size(0);
131       snd_addr = rcv_addr;
132     }
133
134   if (snd_size > rcv_size)
135     snd_size = rcv_size;
136 }
137
138 PRIVATE template<typename SPACE>
139 static inline
140 void
141 Map_traits<SPACE>::free_constraint(Addr &snd_addr, Size &snd_size,
142                                    Addr &rcv_addr, Size rcv_size,
143                                    Addr const &hot_spot)
144 {
145   if (rcv_size >= snd_size)
146     rcv_addr += hot_spot.offset(rcv_size).trunc(snd_size);
147   else
148     {
149       snd_addr += hot_spot.offset(snd_size).trunc(rcv_size);
150       snd_size = rcv_size;
151       // reduce size of address range
152     }
153 }
154
155 IMPLEMENT template<typename SPACE>
156 inline
157 void
158 Map_traits<SPACE>::constraint(Addr &snd_addr, Size &snd_size,
159                               Addr &rcv_addr, Size const &rcv_size,
160                               Addr const &hot_spot)
161 {
162   if (SPACE::Identity_map)
163     identity_constraint(snd_addr, snd_size, rcv_addr, rcv_size);
164   else
165     free_constraint(snd_addr, snd_size, rcv_addr, rcv_size, hot_spot);
166 }
167
168
169
170 //-------------------------------------------------------------------------
171 IMPLEMENTATION [io]:
172
173 IMPLEMENT template<>
174 inline
175 bool
176 Map_traits<Io_space>::match(L4_fpage const &from, L4_fpage const &to)
177 { return from.is_iopage() && (to.is_iopage() || to.is_all_spaces()); }
178
179 IMPLEMENT template<>
180 inline
181 Map_traits<Io_space>::Addr
182 Map_traits<Io_space>::get_addr(L4_fpage const &fp)
183 { return Addr(fp.io_address()); }
184
185
186
187 //-------------------------------------------------------------------------
188 IMPLEMENTATION:
189
190 IMPLEMENT template<>
191 inline
192 bool
193 Map_traits<Mem_space>::match(L4_fpage const &from, L4_fpage const &to)
194
195   return from.is_mempage() && (to.is_all_spaces() || to.is_mempage());
196 }
197
198 IMPLEMENT template<>
199 inline
200 Map_traits<Mem_space>::Addr
201 Map_traits<Mem_space>::get_addr(L4_fpage const &fp)
202 { return Addr(fp.mem_address()); }
203
204 IMPLEMENT template<>
205 inline
206 void
207 Map_traits<Mem_space>::attribs(L4_msg_item control, L4_fpage const &fp,
208     unsigned long *del_attr, unsigned long *set_attr)
209 {
210   *del_attr = (fp.rights() & L4_fpage::W) ? 0 : Mem_space::Page_writable;
211   short cache = control.attr() & 0x70;
212
213   if (cache & L4_msg_item::Caching_opt)
214     {
215       *del_attr |= Page::Cache_mask;
216
217       if (cache == L4_msg_item::Cached)
218         *set_attr = Page::CACHEABLE;
219       else if (cache == L4_msg_item::Buffered)
220         *set_attr = Page::BUFFERED;
221       else
222         *set_attr = Page::NONCACHEABLE;
223     }
224   else
225     *set_attr = 0;
226 }
227
228
229 IMPLEMENT template<>
230 inline
231 bool
232 Map_traits<Obj_space>::match(L4_fpage const &from, L4_fpage const &to)
233 { return from.is_objpage() && (to.is_objpage() || to.is_all_spaces()); }
234
235
236 IMPLEMENT template<>
237 inline
238 Map_traits<Obj_space>::Addr
239 Map_traits<Obj_space>::get_addr(L4_fpage const &fp)
240 { return Addr(fp.obj_index()); }
241
242 IMPLEMENT template<>
243 inline
244 bool
245 Map_traits<Obj_space>::free_object(Obj_space::Phys_addr o,
246                                    Obj_space::Reap_list **reap_list)
247 {
248   if (o->map_root()->no_mappings())
249     {
250       o->initiate_deletion(reap_list);
251       return true;
252     }
253
254   return false;
255 }
256
257 IMPLEMENT template<>
258 inline
259 void
260 Map_traits<Obj_space>::attribs(L4_msg_item control, L4_fpage const &fp,
261     unsigned long *del_attr, unsigned long *set_attr)
262 {
263   *set_attr = 0;
264   *del_attr = (~(fp.rights() | (L4_msg_item::C_weak_ref ^ control.attr())));
265 }
266
267 IMPLEMENT template<>
268 static inline
269 unsigned long
270 Map_traits<Obj_space>::apply_attribs(unsigned long attribs,
271                                      Obj_space::Phys_addr &a,
272                                      unsigned long set_attr,
273                                      unsigned long del_attr)
274 {
275   if (attribs & del_attr & L4_msg_item::C_obj_specific_rights)
276     a = a->downgrade(del_attr);
277
278   return (attribs & ~del_attr) | set_attr;
279 }
280
281
282 /** Flexpage mapping.
283     divert to mem_map (for memory fpages) or io_map (for IO fpages)
284     @param from source address space
285     @param fp_from flexpage descriptor for virtual-address space range
286         in source address space
287     @param to destination address space
288     @param fp_to flexpage descriptor for virtual-address space range
289         in destination address space
290     @param offs sender-specified offset into destination flexpage
291     @param grant if set, grant the fpage, otherwise map
292     @pre page_aligned(offs)
293     @return IPC error
294     L4_fpage from_fp, to_fp;
295     Mword control;code that describes the status of the operation
296 */
297 // Don't inline -- it eats too much stack.
298 // inline NEEDS ["config.h", io_map]
299 L4_error
300 fpage_map(Space *from, L4_fpage fp_from, Space *to,
301           L4_fpage fp_to, L4_msg_item control, Reap_list *r)
302 {
303  if (Map_traits<Mem_space>::match(fp_from, fp_to))
304     return mem_map(from, fp_from, to, fp_to, control);
305
306 #ifdef CONFIG_PF_PC
307   if (Map_traits<Io_space>::match(fp_from, fp_to))
308     return io_map(from, fp_from, to, fp_to, control);
309 #endif
310
311   if (Map_traits<Obj_space>::match(fp_from, fp_to))
312     return obj_map(from, fp_from, to, fp_to, control, r->list());
313
314   return L4_error::None;
315 }
316
317 /** Flexpage unmapping.
318     divert to mem_fpage_unmap (for memory fpages) or
319     io_fpage_unmap (for IO fpages)
320     @param space address space that should be flushed
321     @param fp    flexpage descriptor of address-space range that should
322                  be flushed
323     @param me_too If false, only flush recursive mappings.  If true,
324                  additionally flush the region in the given address space.
325     @param flush_mode determines which access privileges to remove.
326     @return combined (bit-ORed) access status of unmapped physical pages
327 */
328 // Don't inline -- it eats too much stack.
329 // inline NEEDS ["config.h", io_fpage_unmap]
330 unsigned
331 fpage_unmap(Space *space, L4_fpage fp, L4_map_mask mask, Kobject ***rl)
332 {
333   unsigned ret = 0;
334
335   if (fp.is_iopage() || fp.is_all_spaces())
336     ret |= io_fpage_unmap(space, fp, mask);
337
338   if (fp.is_objpage() || fp.is_all_spaces())
339     ret |= obj_fpage_unmap(space, fp, mask, rl);
340
341   if (fp.is_mempage() || fp.is_all_spaces())
342     ret |= mem_fpage_unmap(space, fp, mask);
343
344   return ret;
345 }
346
347 PUBLIC
348 void
349 Reap_list::del()
350 {
351   if (EXPECT_TRUE(!_h))
352     return;
353
354   for (Kobject *reap = _h; reap; reap = reap->_next_to_reap)
355     reap->destroy(list());
356
357   current()->rcu_wait();
358
359   for (Kobject *reap = _h; reap;)
360     {
361       Kobject *d = reap;
362       reap = reap->_next_to_reap;
363       if (d->put())
364         delete d;
365     }
366
367   _h = 0;
368   _t = &_h;
369 }
370
371 PUBLIC inline
372 Reap_list::~Reap_list()
373 { del(); }
374
375 //////////////////////////////////////////////////////////////////////
376 //
377 // Utility functions for all address-space types
378 //
379
380 #include "mapdb.h"
381
382 inline
383 template <typename SPACE, typename MAPDB>
384 L4_error
385 map(MAPDB* mapdb,
386     SPACE* from, Space *from_id,
387     Page_number _snd_addr,
388     Page_count snd_size,
389     SPACE* to, Space *to_id,
390     Page_number _rcv_addr,
391     bool grant, unsigned attrib_add, unsigned attrib_del,
392     typename SPACE::Reap_list **reap_list = 0)
393 {
394   enum
395   {
396     PAGE_SIZE = SPACE::Map_page_size,
397     PAGE_MASK = ~(PAGE_SIZE - 1),
398     SUPERPAGE_SIZE = SPACE::Map_superpage_size,
399     SUPERPAGE_MASK = ~((SUPERPAGE_SIZE - 1) >> SPACE::Page_shift)
400   };
401
402   typedef typename SPACE::Size Size;
403   typedef typename MAPDB::Mapping Mapping;
404   typedef typename MAPDB::Frame Frame;
405   typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
406   typedef Map_traits<SPACE> Mt;
407
408   L4_error condition = L4_error::None;
409
410   // FIXME: make this debugging code optional
411   bool no_page_mapped = true;
412
413   Vaddr rcv_addr(_rcv_addr);
414   Vaddr snd_addr(_snd_addr);
415   const Vaddr rcv_start = rcv_addr;
416   const Page_count rcv_size = snd_size;
417   // We now loop through all the pages we want to send from the
418   // sender's address space, looking up appropriate parent mappings in
419   // the mapping data base, and entering a child mapping and a page
420   // table entry for the receiver.
421
422   // Special care is taken for 4MB page table entries we find in the
423   // sender's address space: If what we will create in the receiver is
424   // not a 4MB-mapping, too, we have to find the correct parent
425   // mapping for the new mapping database entry: This is the sigma0
426   // mapping for all addresses != the 4MB page base address.
427
428   // When overmapping an existing page, flush the interfering
429   // physical page in the receiver, even if it is larger than the
430   // mapped page.
431
432   // verify sender and receiver virtual addresses are still within
433   // bounds; if not, bail out.  Sigma0 may send from any address (even
434   // from an out-of-bound one)
435   Page_count size;
436   bool need_tlb_flush = false;
437   bool need_xcpu_tlb_flush = false;
438   Page_number const to_max = to->map_max_address();
439   Page_number const from_max = from->map_max_address();
440   Size const from_superpage_size(from->superpage_size());
441   bool const has_superpages = to->has_superpages();
442
443   for (;
444        snd_size                               // pages left for sending?
445        && rcv_addr < to_max
446        && snd_addr < from_max;
447
448        rcv_addr += size,
449        snd_addr += size,
450        snd_size -= size)
451     {
452       // Reset the increment size to one page.
453       size = Size(PAGE_SIZE);
454
455       // First, look up the page table entries in the sender and
456       // receiver address spaces.
457
458       // Sender lookup.
459       // make gcc happy, initialized later anyway
460       typename SPACE::Phys_addr s_phys;
461       Size s_size(0);
462       unsigned s_attribs = 0;
463
464       // Sigma0 special case: Sigma0 doesn't need to have a
465       // fully-constructed page table, and it can fabricate mappings
466       // for all physical addresses.:435
467       if (EXPECT_FALSE(! from->v_fabricate(snd_addr, &s_phys,
468                                            &s_size, &s_attribs)))
469         continue;
470
471       // We have a mapping in the sender's address space.
472       // FIXME: make this debugging code optional
473       no_page_mapped = false;
474
475       // Receiver lookup.
476
477       // The may be used uninitialized warning for this variable is bogus
478       // the v_lookup function must initialize the value if it returns true.
479       typename SPACE::Phys_addr r_phys;
480       Size r_size;
481       unsigned r_attribs;
482
483       // Compute attributes for to-be-inserted frame
484       typename SPACE::Phys_addr i_phys = s_phys;
485       Size i_size = s_size;
486       bool const rcv_page_mapped
487         = to->v_lookup(rcv_addr, &r_phys, &r_size, &r_attribs);
488       // See if we have to degrade to non-superpage mappings
489       if (has_superpages && i_size == from_superpage_size)
490         {
491           if (i_size > snd_size
492               // want to send less than a superpage?
493               || i_size > r_size         // not enough space for superpage map?
494               || snd_addr.offset(Size(SUPERPAGE_SIZE)) // snd page not aligned?
495               || rcv_addr.offset(Size(SUPERPAGE_SIZE)) // rcv page not aligned?
496               || (rcv_addr + from_superpage_size > rcv_start + rcv_size))
497                                                       // rcv area to small?
498             {
499               // We map a 4K mapping from a 4MB mapping
500               i_size = Size(PAGE_SIZE);
501
502               if (Size super_offset = snd_addr.offset(Size(SUPERPAGE_SIZE)))
503                 {
504                   // Just use OR here because i_phys may already contain
505                   // the offset. (As is on ARM)
506                   i_phys = SPACE::subpage_address(i_phys, super_offset);
507                 }
508
509               if (grant)
510                 {
511                   WARN("XXX Can't GRANT page from superpage (%p: " L4_PTR_FMT
512                        " -> %p: " L4_PTR_FMT "), demoting to MAP\n",
513                        from_id, snd_addr.value(), to_id, rcv_addr.value());
514                   grant = 0;
515                 }
516             }
517         }
518
519       // Also, look up mapping database entry.  Depending on whether
520       // we can overmap, either look up the destination mapping first
521       // (and compute the sender mapping from it) or look up the
522       // sender mapping directly.
523       Mapping* sender_mapping = 0;
524       // mapdb_frame will be initialized by the mapdb lookup function when
525       // it returns true, so don't care about "may be use uninitialized..."
526       Frame mapdb_frame;
527       bool doing_upgrade = false;
528
529       if (rcv_page_mapped)
530         {
531           // We have something mapped.
532
533           // Check if we can upgrade mapping.  Otherwise, flush target
534           // mapping.
535           if (! grant                         // Grant currently always flushes
536               && r_size <= i_size             // Rcv frame in snd frame
537               && SPACE::page_address(r_phys, i_size) == i_phys
538               && (sender_mapping = mapdb->check_for_upgrade(r_phys, from_id, snd_addr, to_id, rcv_addr, &mapdb_frame)))
539             doing_upgrade = true;
540
541           if (! sender_mapping) // Need flush
542             {
543               unmap(mapdb, to, to_id, rcv_addr.trunc(r_size), r_size,
544                     L4_fpage::RWX, L4_map_mask::full(), reap_list);
545             }
546         }
547
548       if (! sender_mapping && mapdb->valid_address(s_phys))
549         {
550           if (EXPECT_FALSE(! mapdb->lookup(from_id,
551                                            snd_addr.trunc(s_size), s_phys,
552                                            &sender_mapping, &mapdb_frame)))
553             continue;           // someone deleted this mapping in the meantime
554         }
555
556       // from here mapdb_frame is always initialized, so ignore the warning
557       // in grant / insert
558
559       // At this point, we have a lookup for the sender frame (s_phys,
560       // s_size, s_attribs), the max. size of the receiver frame
561       // (r_phys), the sender_mapping, and whether a receiver mapping
562       // already exists (doing_upgrade).
563
564       unsigned i_attribs
565         = Mt::apply_attribs(s_attribs, i_phys, attrib_add, attrib_del);
566
567       // Loop increment is size of insertion
568       size = i_size;
569
570       // Do the actual insertion.
571       typename SPACE::Status status
572         = to->v_insert(i_phys, rcv_addr, i_size, i_attribs);
573
574       switch (status)
575         {
576         case SPACE::Insert_warn_exists:
577         case SPACE::Insert_warn_attrib_upgrade:
578         case SPACE::Insert_ok:
579
580           assert_kdb (mapdb->valid_address(s_phys) || status == SPACE::Insert_ok);
581           // Never doing upgrades for mapdb-unmanaged memory
582
583           if (grant)
584             {
585               if (mapdb->valid_address(s_phys))
586                 if (EXPECT_FALSE(!mapdb->grant(mapdb_frame, sender_mapping,
587                                                to_id, rcv_addr)))
588                   {
589                     // Error -- remove mapping again.
590                     to->v_delete(rcv_addr, i_size);
591
592                     // may fail due to quota limits
593                     condition = L4_error::Map_failed;
594                     break;
595                   }
596
597               from->v_delete(snd_addr.trunc(s_size), s_size);
598               need_tlb_flush = true;
599             }
600           else if (status == SPACE::Insert_ok)
601             {
602               assert_kdb (!doing_upgrade);
603
604               if (mapdb->valid_address(s_phys)
605                   && !mapdb->insert(mapdb_frame, sender_mapping,
606                                     to_id, rcv_addr,
607                                     i_phys, i_size))
608                 {
609                   // Error -- remove mapping again.
610                   to->v_delete(rcv_addr, i_size);
611
612                   // XXX This is not race-free as the mapping could have
613                   // been used in the mean-time, but we do not care.
614                   condition = L4_error::Map_failed;
615                   break;
616                 }
617             }
618
619           if (SPACE::Need_xcpu_tlb_flush && SPACE::Need_insert_tlb_flush)
620             need_xcpu_tlb_flush = true;
621
622           break;
623
624         case SPACE::Insert_err_nomem:
625           condition = L4_error::Map_failed;
626           break;
627
628         case SPACE::Insert_err_exists:
629           WARN("map (%s) skipping area (%p/%lx): " L4_PTR_FMT
630                " -> %p/%lx: " L4_PTR_FMT "(%lx)", SPACE::name,
631                from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(),
632                to_id, Kobject_dbg::pointer_to_id(to_id), rcv_addr.value(), i_size.value());
633           // Do not flag an error here -- because according to L4
634           // semantics, it isn't.
635           break;
636         }
637
638       if (sender_mapping)
639         mapdb->free(mapdb_frame);
640
641       if (!condition.ok())
642         break;
643     }
644
645   if (need_tlb_flush)
646     from->tlb_flush();
647
648   if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
649     Context::xcpu_tlb_flush(false, to, from);
650
651   // FIXME: make this debugging code optional
652   if (EXPECT_FALSE(no_page_mapped))
653     {
654       WARN("nothing mapped: (%s) from [%p/%lx]: " L4_PTR_FMT
655            " size: " L4_PTR_FMT " to [%p/%lx]\n", SPACE::name,
656            from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(), rcv_size.value(),
657            to_id, Kobject_dbg::pointer_to_id(to_id));
658     }
659
660   return condition;
661 }
662
663
664 template <typename SPACE, typename MAPDB>
665 unsigned
666 unmap(MAPDB* mapdb, SPACE* space, Space *space_id,
667       Page_number start, Page_count size, unsigned char rights,
668       L4_map_mask mask, typename SPACE::Reap_list **reap_list)
669 {
670
671   typedef typename SPACE::Size Size;
672   typedef typename MAPDB::Mapping Mapping;
673   typedef typename MAPDB::Iterator Iterator;
674   typedef typename MAPDB::Frame Frame;
675   typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
676
677   bool me_too = mask.self_unmap();
678
679   Mword flushed_rights = 0;
680   Page_number end = start + size;
681   Page_number const map_max = space->map_max_address();
682
683   // make gcc happy, initialized later anyway
684   typename SPACE::Phys_addr phys;
685   Page_count phys_size;
686   Vaddr page_address;
687
688   Mword const flush_rights = SPACE::xlate_flush(rights);
689   bool const full_flush = SPACE::is_full_flush(rights);
690   bool need_tlb_flush = false;
691   bool need_xcpu_tlb_flush = false;
692
693   // iterate over all pages in "space"'s page table that are mapped
694   // into the specified region
695   for (Vaddr address(start);
696        address < end && address < map_max;
697        address = page_address + phys_size)
698     {
699       // for amd64-mem_space's this will skip the hole in the address space
700       address = SPACE::canonize(address);
701
702       bool have_page;
703
704         {
705           Size ps;
706           have_page = space->v_fabricate(address, &phys, &ps);
707           phys_size = ps;
708         }
709
710       page_address = address.trunc(phys_size);
711
712       // phys_size and page_address have now been set up, allowing the
713       // use of continue (which evaluates the for-loop's iteration
714       // expression involving these to variables).
715
716       if (! have_page)
717         continue;
718
719       if (me_too)
720         {
721           assert_kdb (address == page_address
722                   || phys_size == Size(SPACE::Map_superpage_size));
723
724           // Rewind flush address to page address.  We always flush
725           // the whole page, even if it is larger than the specified
726           // flush area.
727           address = page_address;
728           if (end < address + phys_size)
729             end = address + phys_size;
730         }
731
732       // all pages shall be handled by our mapping data base
733       assert_kdb (mapdb->valid_address(phys));
734
735       Mapping *mapping;
736       Frame mapdb_frame;
737
738       if (! mapdb->lookup(space_id, page_address, phys,
739                           &mapping, &mapdb_frame))
740         // someone else unmapped faster
741         continue;               // skip
742
743       Mword page_rights = 0;
744
745       // Delete from this address space
746       if (me_too)
747         {
748           page_rights |=
749             space->v_delete(address, phys_size, flush_rights);
750
751           // assert_kdb (full_flush != space->v_lookup(address));
752           need_tlb_flush = true;
753           need_xcpu_tlb_flush = true;
754         }
755
756       // now delete from the other address spaces
757       for (Iterator m(mapdb_frame, mapping, address, end);
758            m;
759            ++m)
760         {
761           page_rights |= Mu::v_delete<SPACE>(m, flush_rights, full_flush);
762           need_xcpu_tlb_flush = true;
763         }
764
765       flushed_rights |= page_rights;
766
767       // Store access attributes for later retrieval
768       save_access_attribs(mapdb, mapdb_frame, mapping,
769                           space, page_rights, page_address, phys, phys_size,
770                           me_too);
771
772       if (full_flush)
773         mapdb->flush(mapdb_frame, mapping, mask, address, end);
774
775       if (full_flush)
776         Map_traits<SPACE>::free_object(phys, reap_list);
777
778       mapdb->free(mapdb_frame);
779     }
780
781   if (need_tlb_flush)
782     space->tlb_flush();
783
784   if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
785     Context::xcpu_tlb_flush(true, space, 0);
786
787   return SPACE::xlate_flush_result(flushed_rights);
788 }
789
790 //----------------------------------------------------------------------------
791 IMPLEMENTATION[!io || ux]:
792
793 // Empty dummy functions when I/O protection is disabled
794 inline
795 void init_mapdb_io(Space *)
796 {}
797
798 inline
799 L4_error
800 io_map(Space *, L4_fpage const &, Space *, L4_fpage const &, L4_msg_item)
801 {
802   return L4_error::None;
803 }
804
805 inline
806 unsigned
807 io_fpage_unmap(Space * /*space*/, L4_fpage const &/*fp*/, L4_map_mask)
808 {
809   return 0;
810 }
811