3 #include "slab_cache.h"
7 #include "auto_quota.h"
9 struct Mapping_tree; // forward decls
14 /** A mapping database.
18 friend class Jdb_mapdb;
21 typedef ::Mapping Mapping;
23 /** Iterator for mapping trees. This iterator knows how to descend
28 Mapping_tree* _mapping_tree;
34 Page_number _submap_index;
35 Page_number _offs_begin, _offs_end;
38 inline Mapping* operator->() const { return _cursor; }
39 inline operator Mapping*() const { return _cursor; }
40 inline Iterator() : _submap (0), _submap_index(Page_number::create(0)) {}
41 inline Iterator(const Mapdb::Frame& f, Mapping* parent,
42 Page_number va_begin, Page_number va_end);
44 inline void neutralize();
45 inline Page_count size() const;
46 inline size_t shift() const { return _page_shift; }
47 inline Page_number page() const
48 { return Page_number(_cursor->page() << shift()); }
50 inline Iterator &operator++ ();
57 friend class Mapdb::Iterator;
62 inline Page_number vaddr(Mapping* m) const;
67 Treemap* const _treemap;
73 /* The mapping database.
75 * This implementation encodes mapping trees in very compact arrays of
76 * fixed sizes, prefixed by a tree header (Mapping_tree). Array
77 * sizes can vary from 4 mappings to 4<<15 mappings. For each size,
78 * we set up a slab allocator. To grow or shrink the size of an
79 * array, we have to allocate a larger or smaller tree from the
80 * corresponding allocator and then copy the array elements.
82 * The array elements (Mapping) contain a tree depth element. This
83 * depth and the relative position in the array is all information we
84 * need to derive tree structure information. Here is an example:
88 * number value comment
89 * --------------------------
91 * 1 1 child of element #0 with depth 0
92 * 2 2 child of element #1 with depth 1
93 * 3 2 child of element #1 with depth 1
94 * 4 3 child of element #3 with depth 2
95 * 5 2 child of element #1 with depth 1
96 * 6 3 child of element #5 with depth 2
97 * 7 1 child of element #0 with depth 0
99 * This array is a pre-order encoding of the following tree:
109 * The mapping database (Mapdb) is organized in a hierarchy of
110 * frame-number-keyed maps of Mapping_trees (Treemap). The top-level
111 * Treemap contains mapping trees for superpages. These mapping trees
112 * may contain references to Treemaps for subpages. (Original credits
113 * for this idea: Christan Szmajda.)
116 * -------------------------------
117 * | | | | | | | | | | | | | | | | array of ptr to 4M Mapping_tree's
118 * ---|---------------------------
124 * | | 4M Mapping *or* ptr to nested Treemap
126 * | ----------------| Treemap
127 * | | v array of ptr to 4K Mapping_tree's
128 * --------------- -------------------------------
129 * | | | | | | | | | | | | | | | |
130 * ---|---------------------------
142 * IDEAS for enhancing this implementation:
144 * We often have to find a tree header corresponding to a mapping.
145 * Currently, we do this by iterating backwards over the array
146 * containing the mappings until we find the Sigma0 mapping, from
147 * whose address we can compute the address of the tree header. If
148 * this becomes a problem, we could add one more byte to the mappings
149 * with a hint (negative array offset) where to find the sigma0
150 * mapping. (If the hint value overflows, just iterate using the hint
151 * value of the mapping we find with the first hint value.) Another
152 * idea (from Adam) would be to just look up the tree header by using
153 * the physical address from the page-table lookup, but we would need
154 * to change the interface of the mapping database for that (pass in
155 * the physical address at all times), or we would have to include the
156 * physical address (or just the address of the tree header) in the
157 * Mapdb-user-visible Mapping (which could be different from the
158 * internal tree representation). (XXX: Implementing one of these
159 * ideas is probably worthwile doing!)
161 * Instead of copying whole trees around when they grow or shrink a
162 * lot, or copying parts of trees when inserting an element, we could
163 * give up the array representation and add a "next" pointer to the
164 * elements -- that is, keep the tree of mappings in a
165 * pre-order-encoded singly-linked list (credits to: Christan Szmajda
166 * and Adam Wiggins). 24 bits would probably be enough to encode that
167 * pointer. Disadvantages: Mapping entries would be larger, and the
168 * cache-friendly space-locality of tree entries would be lost.
176 #include "helping_lock.h"
177 #include "kmem_alloc.h"
178 #include "mapping_tree.h"
180 #include "kmem_slab.h"
181 #include "ram_quota.h"
182 #include "std_macros.h"
189 /** Array elements for holding frame-specific data. */
190 class Physframe : public Base_mappable
193 friend class Mapdb::Iterator;
194 friend class Treemap;
195 friend class Jdb_mapdb;
196 }; // struct Physframe
198 #if 0 // Optimization: do this using memset in Physframe::alloc()
200 Physframe::Physframe ()
205 Physframe::operator new (size_t, void *p)
211 Physframe::mem_size(size_t size)
213 return (size*sizeof(Physframe) + 1023) & ~1023;
218 Physframe::alloc(size_t size)
221 = (Physframe*)Kmem_alloc::allocator()->unaligned_alloc(mem_size(size));
223 #if 1 // Optimization: See constructor
225 memset(block, 0, size * sizeof(Physframe));
228 for (unsigned i = 0; i < size; ++i)
229 new (block + i) Physframe();
236 NEEDS["mapping_tree.h", Treemap::~Treemap, Treemap::operator delete]
237 Physframe::~Physframe()
241 auto guard = lock_guard(lock);
243 // Find next-level trees.
244 for (Mapping* m = tree->mappings();
245 m && !m->is_end_tag();
256 static // inline NEEDS[Physframe::~Physframe]
258 Physframe::free(Physframe *block, size_t size)
260 for (unsigned i = 0; i < size; ++i)
261 block[i].~Physframe();
263 Kmem_alloc::allocator()->unaligned_free(Physframe::mem_size(size), block);
272 friend class Jdb_mapdb;
273 friend class Treemap_ops;
276 Page_count _key_end; ///< Number of Physframe entries
277 Space *_owner_id; ///< ID of owner of mapping trees
278 Page_number _page_offset; ///< Virt. page address in owner's addr space
279 size_t _page_shift; ///< Page size of mapping trees
280 Physframe* _physframe; ///< Pointer to Physframe array
281 const size_t* const _sub_shifts; ///< Pointer to array of sub-page sizes
282 const unsigned _sub_shifts_max; ///< Number of valid _page_sizes entries
284 friend class Mapdb::Iterator;
285 Mapdb::Iterator _unwind; ///< Chaining information for Mapdb_iterator
291 // Helper operations for Treemaps (used in Mapping_tree::flush)
295 unsigned long _page_shift;
299 Treemap_ops::Treemap_ops(unsigned long page_shift)
300 : _page_shift(page_shift)
305 Treemap_ops::mem_size(Treemap const *submap) const
308 = submap->_key_end.value() * sizeof(Physframe) + sizeof(Treemap);
310 for (Page_number key = Page_number::create(0);
311 key < submap->_key_end;
314 Physframe* subframe = submap->frame(key);
316 quota += sizeof(Mapping);
324 Treemap_ops::grant(Treemap *submap, Space *new_space, Page_number va) const
326 submap->_owner_id = new_space;
327 submap->_page_offset = va << _page_shift;
329 for (Page_number key = Page_number::create(0);
330 key < submap->_key_end;
333 Physframe* subframe = submap->frame(key);
334 if (Mapping_tree* tree = subframe->tree.get())
336 auto guard = lock_guard(subframe->lock);
337 tree->mappings()->set_space(new_space);
338 tree->mappings()->set_page(va + (key >> (_page_shift - submap->_page_shift)));
345 Treemap_ops::owner(Treemap const *submap) const
346 { return submap->owner(); }
350 Treemap_ops::is_partial(Treemap const *submap, Page_count offs_begin,
351 Page_count offs_end) const
353 return offs_begin > Page_count::create(0)
354 || offs_end < (submap->_key_end << submap->_page_shift);
359 Treemap_ops::del(Treemap *submap) const
364 Treemap_ops::flush(Treemap *submap,
365 Page_number offs_begin, Page_number offs_end) const
367 for (Page_number i = offs_begin >> submap->_page_shift;
368 i < (offs_end + Page_count::create((1UL << submap->_page_shift) - 1)
369 >> submap->_page_shift);
372 Page_count page_offs_begin = i << submap->_page_shift;
373 Page_count page_offs_end = page_offs_begin;
374 page_offs_end += Page_count::create(1UL << submap->_page_shift);
376 Physframe* subframe = submap->frame(i);
378 auto guard = lock_guard(subframe->lock);
380 if (offs_begin <= page_offs_begin && offs_end >= page_offs_end)
381 subframe->tree.reset();
384 Page_count psz = Page_count::create(1UL << _page_shift);
385 // FIXME: do we have to check for subframe->tree != 0 here ?
386 submap->flush(subframe, subframe->tree->mappings(), false,
387 page_offs_begin > offs_begin
388 ? Page_number::create(0) : offs_begin.offset(psz),
389 page_offs_end < offs_end
391 : (offs_end - Page_count::create(1)).offset(psz)
392 + Page_count::create(1));
403 Treemap::Treemap(Page_number key_end, Space *owner_id,
404 Page_number page_offset, size_t page_shift,
405 const size_t* sub_shifts, unsigned sub_shifts_max,
406 Physframe *physframe)
409 _page_offset(page_offset),
410 _page_shift(page_shift),
411 _physframe(physframe),
412 _sub_shifts(sub_shifts),
413 _sub_shifts_max(sub_shifts_max)
418 PRIVATE static inline
420 Treemap::quota_size(Page_number key_end)
421 { return Physframe::mem_size(key_end.value()) + sizeof(Treemap); }
425 Treemap::create(Page_number key_end, Space *owner_id,
426 Page_number page_offset, size_t page_shift,
427 const size_t* sub_shifts, unsigned sub_shifts_max)
429 Auto_quota<Ram_quota> quota(Mapping_tree::quota(owner_id), quota_size(key_end));
431 if (EXPECT_FALSE(!quota))
435 Physframe *pf = Physframe::alloc(key_end.value());
437 if (EXPECT_FALSE(!pf))
440 void *m = allocator()->alloc();
441 if (EXPECT_FALSE(!m))
443 Physframe::free(pf, key_end.value());
448 return new (m) Treemap(key_end, owner_id, page_offset, page_shift, sub_shifts,
455 inline NEEDS[Physframe::free, Mapdb] //::Iterator::neutralize]
458 Physframe::free(_physframe, _key_end.value());
460 // Make sure that _unwind.~Mapdb_iterator is harmless: Reinitialize it.
461 _unwind.neutralize();
464 static Kmem_slab_t<Treemap> _treemap_allocator("Treemap");
469 { return &_treemap_allocator; }
475 Treemap::operator delete (void *block)
477 Treemap *t = reinterpret_cast<Treemap*>(block);
478 Space *id = t->_owner_id;
479 allocator()->free(block);
480 Mapping_tree::quota(id)->free(Treemap::quota_size(t->_key_end));
485 Treemap::page_shift() const
492 Treemap::owner() const
499 Treemap::end_addr() const
501 return (_key_end << _page_shift) + _page_offset;
504 PUBLIC inline NEEDS["paging.h"]
506 Treemap::vaddr(Mapping* m) const
508 return m->page() << _page_shift;
513 Treemap::set_vaddr(Mapping* m, Page_number address) const
515 m->set_page(address >> _page_shift);
520 Treemap::tree(Page_number key)
522 assert (key < _key_end);
524 Physframe *f = &_physframe[key.value()];
530 Auto_quota<Ram_quota> q(Mapping_tree::quota(_owner_id), sizeof(Mapping));
531 if (EXPECT_FALSE(!q))
537 cxx::unique_ptr<Mapping_tree> new_tree
538 (new (Mapping_tree::Size_id_min) Mapping_tree(Mapping_tree::Size_id_min, key + (_page_offset >> _page_shift),
541 if (EXPECT_FALSE(!new_tree))
548 f->tree = cxx::move(new_tree);
556 Treemap::frame(Page_number key) const
558 assert (key < _key_end);
560 return &_physframe[key.value()];
565 Treemap::lookup(Page_number key, Space *search_space, Page_number search_va,
566 Mapping** out_mapping, Treemap** out_treemap,
567 Physframe** out_frame)
569 // get and lock the tree.
570 assert (key >> _page_shift < _key_end);
571 Physframe *f = tree(key >> _page_shift); // returns locked frame
574 Mapping_tree *t = f->tree.get();
575 Page_count const psz = Page_count::create(1UL << _page_shift);
578 assert (t->mappings()[0].space() == _owner_id);
579 assert (vaddr(t->mappings()) == key.trunc(psz) + _page_offset);
584 for (m = t->mappings(); m; m = t->next (m))
586 if (Treemap *sub = m->submap())
588 // XXX Recursion. The max. recursion depth should better be
590 if ((ret = sub->lookup(key.offset(psz),
591 search_space, search_va,
592 out_mapping, out_treemap, out_frame)))
595 else if (m->space() == search_space
596 && vaddr(m) == search_va.trunc(psz))
601 return true; // found! -- return locked
605 // not found, or found in submap -- unlock tree
613 Treemap::insert(Physframe* frame, Mapping* parent, Space *space,
614 Page_number va, Page_number phys, Page_count size)
616 Mapping_tree* t = frame->tree.get();
619 Page_count const psz = Page_count::create(1UL << _page_shift);
620 bool insert_submap = psz != size;
622 // Inserting subpage mapping? See if we can find a submap. If so,
623 // we don't have to allocate a new Mapping entry.
628 submap = t->find_submap(parent);
631 if (! submap) // Need allocation of new entry -- for submap or
634 // first check quota! In case of a new submap the parent pays for
636 payer = Mapping_tree::quota(insert_submap ? parent->space() : space);
638 Mapping *free = t->allocate(payer, parent, insert_submap);
639 if (EXPECT_FALSE(!free))
642 // Check for submap entry.
643 if (! insert_submap) // Not a submap entry
645 free->set_space(space);
648 t->check_integrity(_owner_id);
652 assert (_sub_shifts_max > 0);
655 = Treemap::create(Page_number::create(1UL << (_page_shift - _sub_shifts[0])),
656 parent->space(), vaddr(parent), _sub_shifts[0],
657 _sub_shifts + 1, _sub_shifts_max - 1);
660 // free the mapping got with allocate
661 t->free_mapping(payer, free);
665 free->set_submap(submap);
668 Physframe* subframe = submap->tree(phys.offset(psz) >> submap->_page_shift);
672 Mapping_tree* subtree = subframe->tree.get();
678 Mapping* ret = submap->insert(subframe, subtree->mappings(), space, va,
679 phys.offset(psz), size);
680 submap->free(subframe);
683 } // Treemap::insert()
687 Treemap::free(Physframe* f)
690 f->tree->check_integrity(_owner_id);
699 Treemap::flush(Physframe* f, Mapping* parent, bool me_too,
700 Page_number offs_begin, Page_number offs_end)
702 assert (! parent->unused());
704 // This is easy to do: We just have to iterate over the array
705 // encoding the tree.
706 Mapping_tree *t = f->tree.get();
707 t->flush(parent, me_too, offs_begin, offs_end, Treemap_ops(_page_shift));
709 t->check_integrity(_owner_id);
711 } // Treemap::flush()
715 Treemap::grant(Physframe* f, Mapping* m, Space *new_space, Page_number va)
717 return f->tree->grant(m, new_space, va >> _page_shift,
718 Treemap_ops(_page_shift));
724 // class Mapdb_iterator
727 /** Create a new mapping-tree iterator. If parent is the result of a
728 fresh lookup (and not the result of an insert operation), you can
729 pass the corresponding Mapdb::Frame for optimization.
731 IMPLEMENT inline NEEDS[Treemap::vaddr]
732 Mapdb::Iterator::Iterator(const Mapdb::Frame& f, Mapping* parent,
733 Page_number va_begin, Page_number va_end)
734 : _mapping_tree(f.frame->tree.get()),
737 _page_shift(f.treemap->_page_shift),
740 _submap_index(Page_number::create(0))
742 assert (_mapping_tree == Mapping_tree::head_of(parent));
743 assert (! parent->submap());
745 if (va_begin < f.treemap->vaddr(parent))
746 va_begin = f.treemap->vaddr(parent);
747 if (va_begin > va_end)
750 _offs_begin = va_begin - f.treemap->vaddr(parent);
751 _offs_end = va_end - f.treemap->vaddr(parent);
756 IMPLEMENT inline NEEDS[Physframe, "helping_lock.h", Treemap]
757 Mapdb::Iterator::~Iterator()
759 // unwind lock information
763 _subframe->lock.clear();
765 *this = _submap->_unwind;
769 /** Make sure that the destructor does nothing. Handy for scratch
770 iterators such as Treemap::_unwind. */
773 Mapdb::Iterator::neutralize()
780 Mapdb::Iterator::size() const
782 return Page_count(1UL << _page_shift);
785 IMPLEMENT inline NEEDS ["mapping_tree.h"]
787 Mapdb::Iterator::operator++ ()
791 _cursor = _mapping_tree->next_child(_parent, _cursor);
793 if (_cursor && ! _cursor->submap())
794 { // Found a new regular child mapping.
799 if (_cursor) // _cursor is a submap
801 Treemap* submap = _cursor->submap();
804 // Before descending into array of subpages, save iterator
805 // state for later unwinding to this point.
806 submap->_unwind = *this;
808 // For subpages of original parent mapping, apply possible tag and
809 // virtual-address restrictions. (Do not apply restrictions
810 // to other subpages because operations on a part of a
811 // superpage always affect *complete* child-superpages, as
812 // we do not support splitting out parts of superpages.
813 Page_count parent_size = submap->_key_end << submap->_page_shift;
815 if (_offs_end > parent_size)
816 _offs_end = parent_size;
818 if (_cursor->parent() == _parent)
819 { // Submap of iteration root
820 _offs_begin = _offs_begin.offset(parent_size);
821 _offs_end = (_offs_end - Page_count::create(1)).offset(parent_size) + Page_count::create(1);
823 else // Submap of a child mapping
825 _offs_begin = Page_number::create(0);
826 _offs_end = parent_size;
829 // Initialize rest of iterator for subframe iteration
831 _page_shift = _submap->_page_shift;
833 _submap_index = _offs_begin >> _page_shift;
835 _parent = _cursor = 0;
838 else if (! _submap) // End of iteration
841 // Clear previously acquired lock.
844 _subframe->lock.clear();
848 // Find a new subframe.
851 Page_count end_offs = _offs_end;
852 end_offs += Page_count::create((1UL << _submap->_page_shift) - 1);
853 end_offs >>= _submap->_page_shift;
855 assert (end_offs <= _submap->_key_end);
857 for (; _submap_index < end_offs;)
859 f = _submap->frame(_submap_index++);
864 if (f && f->tree) // Found a subframe
867 f->lock.lock(); // Lock it
868 _mapping_tree = f->tree.get();
869 _parent = _cursor = _mapping_tree->mappings();
873 // No new subframes found -- unwind to superpage tree
874 *this = _submap->_unwind;
884 @param End physical end address of RAM. (Used only if
885 Config::Mapdb_ram_only is true.)
888 Mapdb::Mapdb(Space *owner, Page_number end_frame, size_t const *page_shifts,
889 unsigned page_shifts_max)
890 : _treemap(Treemap::create(end_frame, owner,
891 Page_number::create(0),
892 page_shifts[0], page_shifts + 1,
893 page_shifts_max - 1))
895 // assert (boot_time);
906 /** Insert a new mapping entry with the given values as child of
908 We assume that there is at least one free entry at the end of the
909 array so that at least one insert() operation can succeed between a
910 lookup()/free() pair of calls. This is guaranteed by the free()
911 operation which allocates a larger tree if the current one becomes
913 @param parent Parent mapping of the new mapping.
914 @param space Number of the address space into which the mapping is entered
915 @param va Virtual address of the mapped page.
916 @param size Size of the mapping. For memory mappings, 4K or 4M.
917 @return If successful, new mapping. If out of memory or mapping
919 @post All Mapping* pointers pointing into this mapping tree,
920 except "parent" and its parents, will be invalidated.
924 Mapdb::insert(const Mapdb::Frame& frame, Mapping* parent, Space *space,
925 Page_number va, Page_number phys, Page_count size)
927 return frame.treemap->insert(frame.frame, parent, space, va, phys, size);
932 * Lookup a mapping and lock the corresponding mapping tree. The returned
933 * mapping pointer, and all other mapping pointers derived from it, remain
934 * valid until free() is called on one of them. We guarantee that at most
935 * one insert() operation succeeds between one lookup()/free() pair of calls
936 * (it succeeds unless the mapping tree is fu68,ll).
937 * @param space Number of virtual address space in which the mapping
939 * @param va Virtual address of the mapping
940 * @param phys Physical address of the mapped pag frame
941 * @return mapping, if found; otherwise, 0
945 Mapdb::lookup(Space *space, Page_number va, Page_number phys,
946 Mapping** out_mapping, Mapdb::Frame* out_lock)
948 return _treemap->lookup(phys, space, va, out_mapping,
949 & out_lock->treemap, & out_lock->frame);
952 /** Unlock the mapping tree to which the mapping belongs. Once a tree
953 has been unlocked, all Mapping instances pointing into it become
956 A mapping tree is locked during lookup(). When the tree is
957 locked, the tree may be traversed (using member functions of
958 Mapping, which serves as an iterator over the tree) or
959 manipulated (using insert(), free(), flush(), grant()). Note that
960 only one insert() is allowed during each locking cycle.
962 @param mapping_of_tree Any mapping belonging to a mapping tree.
966 Mapdb::free(const Mapdb::Frame& f)
968 f.treemap->free(f.frame);
971 /** Delete mappings from a tree. This function deletes mappings
973 @param m Mapping that denotes the subtree that should be deleted.
974 @param me_too If true, delete m as well; otherwise, delete only
979 Mapdb::flush(const Mapdb::Frame& f, Mapping *m, L4_map_mask mask,
980 Page_number va_start, Page_number va_end)
982 Page_count size = Page_count::create(1UL << f.treemap->page_shift());
983 Page_number offs_begin = va_start > f.treemap->vaddr(m)
984 ? va_start - f.treemap->vaddr(m) : Page_number::create(0);
985 Page_number offs_end = va_end > f.treemap->vaddr(m) + size
986 ? size : va_end - f.treemap->vaddr(m);
988 f.treemap->flush(f.frame, m, mask.self_unmap(), offs_begin, offs_end);
991 /** Change ownership of a mapping.
992 @param m Mapping to be modified.
993 @param new_space Number of address space the mapping should be
995 @param va Virtual address of the mapping in the new address space
999 Mapdb::grant(const Mapdb::Frame& f, Mapping *m, Space *new_space,
1002 return f.treemap->grant(f.frame, m, new_space, va);
1005 /** Return page size of given mapping and frame. */
1006 PUBLIC static inline NEEDS[Treemap::page_shift]
1008 Mapdb::shift(const Mapdb::Frame& f, Mapping * /*m*/)
1010 // XXX add special case for _mappings[0]: Return superpage size.
1011 return f.treemap->page_shift();
1014 PUBLIC static inline NEEDS[Treemap::vaddr]
1016 Mapdb::vaddr(const Mapdb::Frame& f, Mapping* m)
1018 return f.treemap->vaddr(m);
1022 // class Mapdb::Frame
1025 IMPLEMENT inline NEEDS[Treemap::vaddr]
1027 Mapdb::Frame::vaddr(Mapping* m) const
1029 return treemap->vaddr(m);
1033 PUBLIC inline NEEDS [Treemap::end_addr]
1035 Mapdb::valid_address(Page_number phys)
1037 return phys < _treemap->end_addr();
1043 Mapdb::check_for_upgrade(Page_number phys,
1044 Space *from_id, Page_number snd_addr,
1045 Space *to_id, Page_number rcv_addr,
1048 // Check if we can upgrade mapping. Otherwise, flush target
1050 Mapping* receiver_mapping;
1051 if (valid_address(phys) // Can lookup in mapdb
1052 && lookup(to_id, rcv_addr, phys, &receiver_mapping, mapdb_frame))
1054 Mapping* receiver_parent = receiver_mapping->parent();
1055 if (receiver_parent->space() == from_id
1056 && vaddr(*mapdb_frame, receiver_parent) == snd_addr)
1057 return receiver_parent;
1058 else // Not my child -- cannot upgrade
1065 IMPLEMENTATION [debug]:
1069 Treemap::find_space(Space *s)
1071 bool bug = _owner_id == s;
1072 for (unsigned i = 0; i < _key_end.value(); ++i)
1074 bool tree_bug = false;
1075 Mapping_tree *t = _physframe[i].tree.get();
1079 t->check_integrity();
1080 Mapping *m = t->mappings();
1081 for (unsigned mx = 0; mx < t->number_of_entries(); ++mx)
1083 bool mapping_bug = false;
1084 Mapping *ma = m + mx;
1085 if (ma->is_end_tag())
1092 mapping_bug = ma->submap()->find_space(s);
1093 else if (ma->space() == s)
1096 printf("MAPDB: found space %p in mapdb (idx=%d, mapping=%p, depth=%d, page=%lx)\n",
1097 s, mx, ma, ma->depth(), ma->page().value());
1100 tree_bug |= mapping_bug;
1104 printf("MAPDB: error in mapping tree: index=%d\n", i);
1110 printf("MAPDB: error in treemap: owner(space)=%p, offset=%lx, size=%lx, pageshift=%zd\n",
1111 _owner_id, _page_offset.value(), _key_end.value(), _page_shift);
1118 Mapdb::find_space(Space *s)
1119 { return _treemap->find_space(s); }