]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/map_util.cpp
update
[l4.git] / kernel / fiasco / src / kern / map_util.cpp
1 INTERFACE:
2
3 #include "l4_types.h"
4 #include "space.h"
5
6 class Mapdb;
7
8 namespace Mu {
9 template<typename SPACE>
10 struct Virt_addr { typedef Page_number Type; };
11
12 template<>
13 struct Virt_addr<Obj_space> { typedef Obj_space::Addr Type; };
14
15
16 template< typename SPACE, typename M >
17 inline
18 Mword v_delete(M &m, Mword flush_rights)
19 {
20   SPACE* child_space = 0;
21   check (m->space()->lookup_space(&child_space));
22
23   return child_space->v_delete(m.page(), m.size(), flush_rights);
24 }
25
26
27 }
28
29 template< typename SPACE >
30 class Map_traits
31 {
32 public:
33   typedef Page_number Addr;
34   typedef Page_count Size;
35
36   static Addr get_addr(L4_fpage const &fp);
37   static void constraint(Addr &snd_addr, Size &snd_size,
38                          Addr &rcv_addr, Size const &rcv_size,
39                          Addr const &hot_spot);
40   static bool match(L4_fpage const &from, L4_fpage const &to);
41   static bool free_object(typename SPACE::Phys_addr o,
42                           typename SPACE::Reap_list **reap_list);
43 };
44
45
46 class Reap_list
47 {
48 private:
49   Kobject *_h;
50   Kobject **_t;
51
52 public:
53   Reap_list() : _h(0), _t(&_h) {}
54   Kobject ***list() { return &_t; }
55 };
56
57 namespace Mu {
58 template<>
59 inline
60 Mword v_delete<Obj_space>(Kobject_mapdb::Iterator &m, Mword flush_rights)
61 {
62   Obj_space::Entry *c = static_cast<Obj_space::Entry*>(*m);
63
64   if (c->valid())
65     {
66       if (flush_rights & L4_fpage::R)
67         c->invalidate();
68       else
69         c->del_rights(flush_rights & L4_fpage::WX);
70     }
71   return 0;
72 }
73 }
74
75 //------------------------------------------------------------------------
76 IMPLEMENTATION:
77
78 #include <cassert>
79
80 #include "config.h"
81 #include "context.h"
82 #include "kobject.h"
83 #include "paging.h"
84 #include "warn.h"
85
86
87 IMPLEMENT template<typename SPACE>
88 inline
89 bool
90 Map_traits<SPACE>::match(L4_fpage const &, L4_fpage const &)
91 { return false; }
92
93 IMPLEMENT template<typename SPACE>
94 inline
95 bool
96 Map_traits<SPACE>::free_object(typename SPACE::Phys_addr,
97                                typename SPACE::Reap_list **)
98 { return false; }
99
100
101 PUBLIC template< typename SPACE >
102 static inline
103 void
104 Map_traits<SPACE>::attribs(L4_msg_item /*control*/, L4_fpage const &/*fp*/,
105                            unsigned long *del_attr, unsigned long *set_attr)
106 { *del_attr = 0; *set_attr = 0; }
107
108 PUBLIC template< typename SPACE >
109 static inline
110 unsigned long
111 Map_traits<SPACE>::apply_attribs(unsigned long attribs,
112                                  typename SPACE::Phys_addr &,
113                                  unsigned long set_attr, unsigned long del_attr)
114 { return (attribs & ~del_attr) | set_attr; }
115
116 PRIVATE template<typename SPACE>
117 static inline
118 void
119 Map_traits<SPACE>::identity_constraint(Addr &snd_addr, Size &snd_size,
120                                        Addr rcv_addr, Size rcv_size)
121 {
122   if (rcv_addr > snd_addr)
123     {
124       if (rcv_addr - snd_addr < snd_size)
125         snd_size -= rcv_addr - snd_addr;
126       else
127         snd_size = Size(0);
128       snd_addr = rcv_addr;
129     }
130
131   if (snd_size > rcv_size)
132     snd_size = rcv_size;
133 }
134
135 PRIVATE template<typename SPACE>
136 static inline
137 void
138 Map_traits<SPACE>::free_constraint(Addr &snd_addr, Size &snd_size,
139                                    Addr &rcv_addr, Size rcv_size,
140                                    Addr const &hot_spot)
141 {
142   if (rcv_size >= snd_size)
143     rcv_addr += hot_spot.offset(rcv_size).trunc(snd_size);
144   else
145     {
146       snd_addr += hot_spot.offset(snd_size).trunc(rcv_size);
147       snd_size = rcv_size;
148       // reduce size of address range
149     }
150 }
151
152 IMPLEMENT template<typename SPACE>
153 inline
154 void
155 Map_traits<SPACE>::constraint(Addr &snd_addr, Size &snd_size,
156                               Addr &rcv_addr, Size const &rcv_size,
157                               Addr const &hot_spot)
158 {
159   if (SPACE::Identity_map)
160     identity_constraint(snd_addr, snd_size, rcv_addr, rcv_size);
161   else
162     free_constraint(snd_addr, snd_size, rcv_addr, rcv_size, hot_spot);
163 }
164
165
166
167 //-------------------------------------------------------------------------
168 IMPLEMENTATION [io]:
169
170 IMPLEMENT template<>
171 inline
172 bool
173 Map_traits<Io_space>::match(L4_fpage const &from, L4_fpage const &to)
174 { return from.is_iopage() && (to.is_iopage() || to.is_all_spaces()); }
175
176 IMPLEMENT template<>
177 inline
178 Map_traits<Io_space>::Addr
179 Map_traits<Io_space>::get_addr(L4_fpage const &fp)
180 { return Addr(fp.io_address()); }
181
182
183
184 //-------------------------------------------------------------------------
185 IMPLEMENTATION:
186
187 IMPLEMENT template<>
188 inline
189 bool
190 Map_traits<Mem_space>::match(L4_fpage const &from, L4_fpage const &to)
191
192   return from.is_mempage() && (to.is_all_spaces() || to.is_mempage());
193 }
194
195 IMPLEMENT template<>
196 inline
197 Map_traits<Mem_space>::Addr
198 Map_traits<Mem_space>::get_addr(L4_fpage const &fp)
199 { return Addr(fp.mem_address()); }
200
201 IMPLEMENT template<>
202 inline
203 void
204 Map_traits<Mem_space>::attribs(L4_msg_item control, L4_fpage const &fp,
205     unsigned long *del_attr, unsigned long *set_attr)
206 {
207   *del_attr = (fp.rights() & L4_fpage::W) ? 0 : Mem_space::Page_writable;
208   short cache = control.attr() & 0x70;
209
210   if (cache & L4_msg_item::Caching_opt)
211     {
212       *del_attr |= Page::Cache_mask;
213
214       if (cache == L4_msg_item::Cached)
215         *set_attr = Page::CACHEABLE;
216       else if (cache == L4_msg_item::Buffered)
217         *set_attr = Page::BUFFERED;
218       else
219         *set_attr = Page::NONCACHEABLE;
220     }
221   else
222     *set_attr = 0;
223 }
224
225
226 IMPLEMENT template<>
227 inline
228 bool
229 Map_traits<Obj_space>::match(L4_fpage const &from, L4_fpage const &to)
230 { return from.is_objpage() && (to.is_objpage() || to.is_all_spaces()); }
231
232
233 IMPLEMENT template<>
234 inline
235 Map_traits<Obj_space>::Addr
236 Map_traits<Obj_space>::get_addr(L4_fpage const &fp)
237 { return Addr(fp.obj_index()); }
238
239 IMPLEMENT template<>
240 inline
241 bool
242 Map_traits<Obj_space>::free_object(Obj_space::Phys_addr o,
243                                    Obj_space::Reap_list **reap_list)
244 {
245   if (o->map_root()->no_mappings())
246     {
247       o->initiate_deletion(reap_list);
248       return true;
249     }
250
251   return false;
252 }
253
254 IMPLEMENT template<>
255 inline
256 void
257 Map_traits<Obj_space>::attribs(L4_msg_item control, L4_fpage const &fp,
258     unsigned long *del_attr, unsigned long *set_attr)
259 {
260   *set_attr = 0;
261   *del_attr = (~(fp.rights() | (L4_msg_item::C_weak_ref ^ control.attr())));
262 }
263
264 IMPLEMENT template<>
265 static inline
266 unsigned long
267 Map_traits<Obj_space>::apply_attribs(unsigned long attribs,
268                                      Obj_space::Phys_addr &a,
269                                      unsigned long set_attr,
270                                      unsigned long del_attr)
271 {
272   if (attribs & del_attr & L4_msg_item::C_obj_specific_rights)
273     a = a->downgrade(del_attr);
274
275   return (attribs & ~del_attr) | set_attr;
276 }
277
278
279 /** Flexpage mapping.
280     divert to mem_map (for memory fpages) or io_map (for IO fpages)
281     @param from source address space
282     @param fp_from flexpage descriptor for virtual-address space range
283         in source address space
284     @param to destination address space
285     @param fp_to flexpage descriptor for virtual-address space range
286         in destination address space
287     @param offs sender-specified offset into destination flexpage
288     @param grant if set, grant the fpage, otherwise map
289     @pre page_aligned(offs)
290     @return IPC error
291     L4_fpage from_fp, to_fp;
292     Mword control;code that describes the status of the operation
293 */
294 // Don't inline -- it eats too much stack.
295 // inline NEEDS ["config.h", io_map]
296 L4_error
297 fpage_map(Space *from, L4_fpage fp_from, Space *to,
298           L4_fpage fp_to, L4_msg_item control, Reap_list *r)
299 {
300  if (Map_traits<Mem_space>::match(fp_from, fp_to))
301     return mem_map(from, fp_from, to, fp_to, control);
302
303 #ifdef CONFIG_IO_PROT
304   if (Map_traits<Io_space>::match(fp_from, fp_to))
305     return io_map(from, fp_from, to, fp_to, control);
306 #endif
307
308   if (Map_traits<Obj_space>::match(fp_from, fp_to))
309     return obj_map(from, fp_from, to, fp_to, control, r->list());
310
311   return L4_error::None;
312 }
313
314 /** Flexpage unmapping.
315     divert to mem_fpage_unmap (for memory fpages) or
316     io_fpage_unmap (for IO fpages)
317     @param space address space that should be flushed
318     @param fp    flexpage descriptor of address-space range that should
319                  be flushed
320     @param me_too If false, only flush recursive mappings.  If true,
321                  additionally flush the region in the given address space.
322     @param flush_mode determines which access privileges to remove.
323     @return combined (bit-ORed) access status of unmapped physical pages
324 */
325 // Don't inline -- it eats too much stack.
326 // inline NEEDS ["config.h", io_fpage_unmap]
327 unsigned
328 fpage_unmap(Space *space, L4_fpage fp, L4_map_mask mask, Kobject ***rl)
329 {
330   unsigned ret = 0;
331
332   if (Config::enable_io_protection && (fp.is_iopage() || fp.is_all_spaces()))
333     ret |= io_fpage_unmap(space, fp, mask);
334
335   if (fp.is_objpage() || fp.is_all_spaces())
336     ret |= obj_fpage_unmap(space, fp, mask, rl);
337
338   if (fp.is_mempage() || fp.is_all_spaces())
339     ret |= mem_fpage_unmap(space, fp, mask);
340
341   return ret;
342 }
343
344 PUBLIC
345 void
346 Reap_list::del()
347 {
348   if (EXPECT_TRUE(!_h))
349     return;
350
351   for (Kobject *reap = _h; reap; reap = reap->_next_to_reap)
352     reap->destroy(list());
353
354   current()->rcu_wait();
355
356   for (Kobject *reap = _h; reap;)
357     {
358       Kobject *d = reap;
359       reap = reap->_next_to_reap;
360       if (d->put())
361         delete d;
362     }
363
364   _h = 0;
365   _t = &_h;
366 }
367
368 PUBLIC inline
369 Reap_list::~Reap_list()
370 { del(); }
371
372 //////////////////////////////////////////////////////////////////////
373 //
374 // Utility functions for all address-space types
375 //
376
377 #include "mapdb.h"
378
379 inline
380 template <typename SPACE, typename MAPDB>
381 L4_error
382 map(MAPDB* mapdb,
383     SPACE* from, Space *from_id,
384     Page_number _snd_addr,
385     Page_count snd_size,
386     SPACE* to, Space *to_id,
387     Page_number _rcv_addr,
388     bool grant, unsigned attrib_add, unsigned attrib_del,
389     typename SPACE::Reap_list **reap_list = 0)
390 {
391   enum
392   {
393     PAGE_SIZE = SPACE::Map_page_size,
394     PAGE_MASK = ~(PAGE_SIZE - 1),
395     SUPERPAGE_SIZE = SPACE::Map_superpage_size,
396     SUPERPAGE_MASK = ~((SUPERPAGE_SIZE - 1) >> SPACE::Page_shift)
397   };
398
399   typedef typename SPACE::Size Size;
400   typedef typename MAPDB::Mapping Mapping;
401   typedef typename MAPDB::Frame Frame;
402   typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
403   typedef Map_traits<SPACE> Mt;
404
405   L4_error condition = L4_error::None;
406
407   bool no_page_mapped = true;
408   Vaddr rcv_addr(_rcv_addr);
409   Vaddr snd_addr(_snd_addr);
410   const Vaddr rcv_start = rcv_addr;
411   const Page_count rcv_size = snd_size;
412   // We now loop through all the pages we want to send from the
413   // sender's address space, looking up appropriate parent mappings in
414   // the mapping data base, and entering a child mapping and a page
415   // table entry for the receiver.
416
417   // Special care is taken for 4MB page table entries we find in the
418   // sender's address space: If what we will create in the receiver is
419   // not a 4MB-mapping, too, we have to find the correct parent
420   // mapping for the new mapping database entry: This is the sigma0
421   // mapping for all addresses != the 4MB page base address.
422
423   // When overmapping an existing page, flush the interfering
424   // physical page in the receiver, even if it is larger than the
425   // mapped page.
426
427   // verify sender and receiver virtual addresses are still within
428   // bounds; if not, bail out.  Sigma0 may send from any address (even
429   // from an out-of-bound one)
430   Page_count size;
431   bool need_tlb_flush = false;
432   bool need_xcpu_tlb_flush = false;
433   Page_number const to_max = to->map_max_address();
434   Page_number const from_max = from->map_max_address();
435   Size const from_superpage_size(from->superpage_size());
436   bool const has_superpages = to->has_superpages();
437
438   for (;
439        snd_size                               // pages left for sending?
440        && rcv_addr < to_max
441        && snd_addr < from_max;
442
443        rcv_addr += size,
444        snd_addr += size,
445        snd_size -= size)
446     {
447       // Reset the increment size to one page.
448       size = Size(PAGE_SIZE);
449
450       // First, look up the page table entries in the sender and
451       // receiver address spaces.
452
453       // Sender lookup.
454       // make gcc happy, initialized later anyway
455       typename SPACE::Phys_addr s_phys;
456       Size s_size(0);
457       unsigned s_attribs = 0;
458
459       // Sigma0 special case: Sigma0 doesn't need to have a
460       // fully-constructed page table, and it can fabricate mappings
461       // for all physical addresses.:435
462       if (EXPECT_FALSE(! from->v_fabricate(snd_addr, &s_phys,
463                                            &s_size, &s_attribs)))
464         continue;
465
466       // We have a mapping in the sender's address space.
467       no_page_mapped = false;
468
469       // Receiver lookup.
470       typename SPACE::Phys_addr r_phys;
471       Size r_size;
472       unsigned r_attribs;
473
474       // Compute attributes for to-be-inserted frame
475       typename SPACE::Phys_addr i_phys = s_phys;
476       Size i_size = s_size;
477       bool const rcv_page_mapped
478         = to->v_lookup(rcv_addr, &r_phys, &r_size, &r_attribs);
479       // See if we have to degrade to non-superpage mappings
480       if (has_superpages && i_size == from_superpage_size)
481         {
482           if (i_size > snd_size
483               // want to send less that a superpage?
484               || i_size > r_size         // not enough space for superpage map?
485               || snd_addr.offset(Size(SUPERPAGE_SIZE)) // snd page not aligned?
486               || rcv_addr.offset(Size(SUPERPAGE_SIZE)) // rcv page not aligned?
487               || (rcv_addr + from_superpage_size > rcv_start + rcv_size))
488                                                       // rcv area to small?
489             {
490               // We map a 4K mapping from a 4MB mapping
491               i_size = Size(PAGE_SIZE);
492
493               if (Size super_offset = snd_addr.offset(Size(SUPERPAGE_SIZE)))
494                 {
495                   // Just use OR here because i_phys may already contain
496                   // the offset. (As is on ARM)
497                   i_phys = SPACE::subpage_address(i_phys, super_offset);
498                 }
499
500               if (grant)
501                 {
502                   WARN("XXX Can't GRANT page from superpage (%p: "L4_PTR_FMT
503                        " -> %p: "L4_PTR_FMT"), demoting to MAP\n",
504                        from_id, snd_addr.value(), to_id, rcv_addr.value());
505                   grant = 0;
506                 }
507             }
508         }
509
510       // Also, look up mapping database entry.  Depending on whether
511       // we can overmap, either look up the destination mapping first
512       // (and compute the sender mapping from it) or look up the
513       // sender mapping directly.
514       Mapping* sender_mapping = 0;
515       Frame mapdb_frame;
516       bool doing_upgrade = false;
517
518       if (rcv_page_mapped)
519         {
520           // We have something mapped.
521
522           // Check if we can upgrade mapping.  Otherwise, flush target
523           // mapping.
524           if (! grant                         // Grant currently always flushes
525               && r_size <= i_size             // Rcv frame in snd frame
526               && SPACE::page_address(r_phys, i_size) == i_phys
527               && (sender_mapping = mapdb->check_for_upgrade(r_phys, from_id, snd_addr, to_id, rcv_addr, &mapdb_frame)))
528             doing_upgrade = true;
529
530           if (! sender_mapping) // Need flush
531             {
532               unmap(mapdb, to, to_id, rcv_addr.trunc(r_size), r_size,
533                     L4_fpage::RWX, L4_map_mask::full(), reap_list);
534             }
535         }
536
537       if (! sender_mapping && mapdb->valid_address(s_phys))
538         {
539           if (EXPECT_FALSE(! mapdb->lookup(from_id,
540                                            snd_addr.trunc(s_size), s_phys,
541                                            &sender_mapping, &mapdb_frame)))
542             continue;           // someone deleted this mapping in the meantime
543         }
544
545       // At this point, we have a lookup for the sender frame (s_phys,
546       // s_size, s_attribs), the max. size of the receiver frame
547       // (r_phys), the sender_mapping, and whether a receiver mapping
548       // already exists (doing_upgrade).
549
550       unsigned i_attribs
551         = Mt::apply_attribs(s_attribs, i_phys, attrib_add, attrib_del);
552
553       // Loop increment is size of insertion
554       size = i_size;
555
556       // Do the actual insertion.
557       typename SPACE::Status status
558         = to->v_insert(i_phys, rcv_addr, i_size, i_attribs);
559
560       switch (status)
561         {
562         case SPACE::Insert_warn_exists:
563         case SPACE::Insert_warn_attrib_upgrade:
564         case SPACE::Insert_ok:
565
566           assert (mapdb->valid_address(s_phys) || status == SPACE::Insert_ok);
567           // Never doing upgrades for mapdb-unmanaged memory
568
569           if (grant)
570             {
571               if (mapdb->valid_address(s_phys))
572                 if (EXPECT_FALSE(!mapdb->grant(mapdb_frame, sender_mapping,
573                                                to_id, rcv_addr)))
574                   {
575                     // Error -- remove mapping again.
576                     to->v_delete(rcv_addr, i_size);
577                     // may fail due to quota limits
578                     condition = L4_error::Map_failed;
579                     break;
580                   }
581
582               from->v_delete(snd_addr.trunc(s_size), s_size);
583               need_tlb_flush = true;
584             }
585           else if (status == SPACE::Insert_ok)
586             {
587               assert (!doing_upgrade);
588
589               if (mapdb->valid_address(s_phys)
590                   && !mapdb->insert(mapdb_frame, sender_mapping,
591                                     to_id, rcv_addr,
592                                     i_phys, i_size))
593                 {
594                   // Error -- remove mapping again.
595                   to->v_delete(rcv_addr, i_size);
596
597                   // XXX This is not race-free as the mapping could have
598                   // been used in the mean-time, but we do not care.
599                   condition = L4_error::Map_failed;
600                   break;
601                 }
602             }
603
604           if (SPACE::Need_xcpu_tlb_flush && SPACE::Need_insert_tlb_flush)
605             need_xcpu_tlb_flush = true;
606
607           break;
608
609         case SPACE::Insert_err_nomem:
610           condition = L4_error::Map_failed;
611           break;
612
613         case SPACE::Insert_err_exists:
614           WARN("map (%s) skipping area (%p/%lx): "L4_PTR_FMT
615                " -> %p/%lx: "L4_PTR_FMT"(%lx)", SPACE::name,
616                from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(),
617                to_id, Kobject_dbg::pointer_to_id(to_id), rcv_addr.value(), i_size.value());
618           // Do not flag an error here -- because according to L4
619           // semantics, it isn't.
620           break;
621         }
622
623       if (sender_mapping)
624         mapdb->free(mapdb_frame);
625
626       if (!condition.ok())
627         break;
628     }
629
630   if (need_tlb_flush)
631     from->tlb_flush();
632
633   if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
634     Context::xcpu_tlb_flush(false, to, from);
635
636   if (EXPECT_FALSE(no_page_mapped))
637     {
638       WARN("nothing mapped: (%s) from [%p/%lx]: "L4_PTR_FMT
639            " size: "L4_PTR_FMT" to [%p/%lx]\n", SPACE::name,
640            from_id, Kobject_dbg::pointer_to_id(from_id), snd_addr.value(), rcv_size.value(),
641            to_id, Kobject_dbg::pointer_to_id(to_id));
642     }
643
644   return condition;
645 }
646
647
648 template <typename SPACE, typename MAPDB>
649 unsigned
650 unmap(MAPDB* mapdb, SPACE* space, Space *space_id,
651       Page_number start, Page_count size, unsigned char rights,
652       L4_map_mask mask, typename SPACE::Reap_list **reap_list)
653 {
654
655   typedef typename SPACE::Size Size;
656   typedef typename SPACE::Addr Addr;
657   typedef typename MAPDB::Mapping Mapping;
658   typedef typename MAPDB::Iterator Iterator;
659   typedef typename MAPDB::Frame Frame;
660   typedef typename Mu::Virt_addr<SPACE>::Type Vaddr;
661
662   bool me_too = mask.self_unmap();
663
664   Mword flushed_rights = 0;
665   Page_number end = start + size;
666   Page_number const map_max = space->map_max_address();
667
668   // make gcc happy, initialized later anyway
669   typename SPACE::Phys_addr phys;
670   Page_count phys_size;
671   Vaddr page_address;
672
673   Mword const flush_rights = SPACE::xlate_flush(rights);
674   bool const full_flush = SPACE::is_full_flush(rights);
675   bool need_tlb_flush = false;
676   bool need_xcpu_tlb_flush = false;
677
678   // iterate over all pages in "space"'s page table that are mapped
679   // into the specified region
680   for (Vaddr address(start);
681        address < end && address < map_max;
682        address = page_address + phys_size)
683     {
684       // for amd64-mem_space's this will skip the hole in the address space
685       address = SPACE::canonize(address);
686
687       bool have_page;
688
689         {
690           Size ps;
691           have_page = space->v_fabricate(address, &phys, &ps);
692           phys_size = ps;
693         }
694
695       page_address = address.trunc(phys_size);
696
697       // phys_size and page_address have now been set up, allowing the
698       // use of continue (which evaluates the for-loop's iteration
699       // expression involving these to variables).
700
701       if (! have_page)
702         continue;
703
704       if (me_too)
705         {
706           assert (address == page_address
707                   || phys_size == Size(SPACE::Map_superpage_size));
708
709           // Rewind flush address to page address.  We always flush
710           // the whole page, even if it is larger than the specified
711           // flush area.
712           address = page_address;
713           if (end < address + phys_size)
714             end = address + phys_size;
715         }
716
717       // all pages shall be handled by our mapping data base
718       assert (mapdb->valid_address(phys));
719
720       Mapping *mapping;
721       Frame mapdb_frame;
722
723       if (! mapdb->lookup(space_id, page_address, phys,
724                           &mapping, &mapdb_frame))
725         // someone else unmapped faster
726         continue;               // skip
727
728       Mword page_rights = 0;
729
730       // Delete from this address space
731       if (me_too)
732         {
733           page_rights |=
734             space->v_delete(address, phys_size, flush_rights);
735
736           need_tlb_flush = true;
737           need_xcpu_tlb_flush = true;
738         }
739
740       // now delete from the other address spaces
741       for (Iterator m(mapdb_frame, mapping, address, end);
742            m;
743            ++m)
744         {
745           page_rights |= Mu::v_delete<SPACE>(m, flush_rights);
746           need_xcpu_tlb_flush = true;
747         }
748
749       flushed_rights |= page_rights;
750
751       // Store access attributes for later retrieval
752       save_access_attribs(mapdb, mapdb_frame, mapping,
753                           space, page_rights, page_address, phys, phys_size,
754                           me_too);
755
756       if (full_flush)
757         mapdb->flush(mapdb_frame, mapping, mask, address, end);
758
759       if (full_flush)
760         Map_traits<SPACE>::free_object(phys, reap_list);
761
762       mapdb->free(mapdb_frame);
763     }
764
765   if (need_tlb_flush)
766     space->tlb_flush();
767
768   if (SPACE::Need_xcpu_tlb_flush && need_xcpu_tlb_flush)
769     Context::xcpu_tlb_flush(true, space, 0);
770
771   return SPACE::xlate_flush_result(flushed_rights);
772 }
773
774 //----------------------------------------------------------------------------
775 IMPLEMENTATION[!io || ux]:
776
777 // Empty dummy functions when I/O protection is disabled
778
779 inline
780 L4_error
781 io_map(Space *, L4_fpage const &, Space *, L4_fpage const &, L4_msg_item)
782 {
783   return L4_error::None;
784 }
785
786 inline
787 unsigned
788 io_fpage_unmap(Space * /*space*/, L4_fpage const &/*fp*/, L4_map_mask)
789 {
790   return 0;
791 }
792