1 INTERFACE [ia32 || ux || amd64]:
3 EXTENSION class Mem_space
8 /** Return status of v_insert. */
11 Insert_ok = 0, ///< Mapping was added successfully.
12 Insert_warn_exists, ///< Mapping already existed
13 Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14 Insert_err_nomem, ///< Couldn't alloc new page table
15 Insert_err_exists ///< A mapping already exists at the target addr
18 /** Attribute masks for page mappings. */
23 Page_writable = Pt_entry::Writable,
25 /// Page is noncacheable.
26 Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
28 Page_user_accessible = Pt_entry::User,
29 /// Page has been referenced
30 Page_referenced = Pt_entry::Referenced,
32 Page_dirty = Pt_entry::Dirty,
33 Page_references = Page_referenced | Page_dirty,
34 /// A mask which contains all mask bits
35 Page_all_attribs = Page_writable | Page_noncacheable |
36 Page_user_accessible | Page_referenced | Page_dirty,
41 enum // Definitions for map_util
43 Need_insert_tlb_flush = 0,
44 Map_page_size = Config::PAGE_SIZE,
45 Page_shift = Config::PAGE_SHIFT,
46 Map_superpage_size = Config::SUPERPAGE_SIZE,
47 Map_max_address = Mem_layout::User_max,
48 Whole_space = MWORD_BITS,
53 void page_map (Address phys, Address virt,
54 Address size, unsigned page_attribs);
56 void page_unmap (Address virt, Address size);
58 void page_protect (Address virt, Address size,
59 unsigned page_attribs);
66 //----------------------------------------------------------------------------
67 IMPLEMENTATION [ia32 || ux || amd64]:
74 #include "mem_layout.h"
76 #include "std_macros.h"
81 PUBLIC explicit inline
82 Mem_space::Mem_space(Ram_quota *q) : _quota(q), _dir(0) {}
86 Mem_space::initialize()
89 if (EXPECT_FALSE(!(b = Kmem_alloc::allocator()
90 ->q_alloc(_quota, Config::PAGE_SHIFT))))
93 _dir = static_cast<Dir_type*>(b);
94 _dir->clear(); // initialize to zero
95 return true; // success
99 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
100 : _quota(q), _dir(pdir)
102 _kernel_space = this;
103 _current.cpu(0) = this;
109 Mem_space::xlate_flush(unsigned char rights)
111 Mword a = Page_references;
112 if (rights & L4_fpage::RX)
113 a |= Page_all_attribs;
114 else if (rights & L4_fpage::W)
121 Mem_space::is_full_flush(unsigned char rights)
123 return rights & L4_fpage::RX;
128 Mem_space::xlate_flush_result(Mword attribs)
131 if (attribs & Page_referenced)
134 if (attribs & Page_dirty)
140 PUBLIC inline NEEDS["cpu.h"]
142 Mem_space::has_superpages()
144 return Cpu::have_superpages();
148 PUBLIC static inline NEEDS["mem_unit.h"]
150 Mem_space::tlb_flush(bool = false)
152 Mem_unit::tlb_flush();
157 Mem_space::tlb_flush_spaces(bool, Mem_space *, Mem_space *)
165 Mem_space::current_mem_space(unsigned cpu) /// XXX: do not fix, deprecated, remove!
167 return _current.cpu(cpu);
172 Mem_space::set_attributes(Addr virt, unsigned page_attribs)
174 Pdir::Iter i = _dir->walk(virt);
176 if (!i.e->valid() || i.shift() != Config::PAGE_SHIFT)
179 i.e->del_attr(Page::MAX_ATTRIBS);
180 i.e->add_attr(page_attribs);
191 * Destructor. Deletes the address space and unregisters it from
196 Mem_space::dir_shutdown()
198 // free all page tables we have allocated for this address space
199 // except the ones in kernel space which are always shared
200 _dir->destroy(Virt_addr(0),
201 Virt_addr(Kmem::mem_user_max), Pdir::Depth - 1,
202 Kmem_alloc::q_allocator(_quota));
208 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size,
209 unsigned page_attribs, bool upgrade_ignore_size)
211 // insert page into page table
213 // XXX should modify page table using compare-and-swap
215 assert_kdb (size == Size(Config::PAGE_SIZE)
216 || size == Size(Config::SUPERPAGE_SIZE));
217 if (size == Size(Config::SUPERPAGE_SIZE))
219 assert (Cpu::have_superpages());
220 assert (virt.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
221 assert (phys.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
224 unsigned level = (size == Size(Config::SUPERPAGE_SIZE) ? (int)Pdir::Super_level : (int)Pdir::Depth);
225 unsigned shift = (size == Size(Config::SUPERPAGE_SIZE) ? Config::SUPERPAGE_SHIFT : Config::PAGE_SHIFT);
226 unsigned attrs = (size == Size(Config::SUPERPAGE_SIZE) ? (unsigned long)Pt_entry::Pse_bit : 0);
228 Pdir::Iter i = _dir->walk(virt, level,
229 Kmem_alloc::q_allocator(_quota));
231 if (EXPECT_FALSE(!i.e->valid() && i.shift() != shift))
232 return Insert_err_nomem;
234 if (EXPECT_FALSE(!upgrade_ignore_size
235 && i.e->valid() && (i.shift() != shift || i.addr() != phys.value())))
236 return Insert_err_exists;
240 if (EXPECT_FALSE((i.e->raw() | page_attribs) == i.e->raw()))
241 return Insert_warn_exists;
243 i.e->add_attr(page_attribs);
244 page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
246 return Insert_warn_attrib_upgrade;
250 *i.e = Addr(phys).value() | Pt_entry::Valid | attrs | page_attribs;
251 page_map (Addr(phys).value(), Addr(virt).value(), Size(size).value(), page_attribs);
258 * Simple page-table lookup.
260 * @param virt Virtual address. This address does not need to be page-aligned.
261 * @return Physical address corresponding to a.
263 PUBLIC inline NEEDS ["paging.h"]
265 Mem_space::virt_to_phys(Address virt) const
267 return dir()->virt_to_phys(virt);
271 * Simple page-table lookup.
273 * @param virt Virtual address. This address does not need to be page-aligned.
274 * @return Physical address corresponding to a.
276 PUBLIC inline NEEDS ["mem_layout.h"]
278 Mem_space::pmem_to_phys (Address virt) const
280 return Mem_layout::pmem_to_phys(virt);
284 * Simple page-table lookup.
286 * This method is similar to Space_context's virt_to_phys().
287 * The difference is that this version handles Sigma0's
288 * address space with a special case: For Sigma0, we do not
289 * actually consult the page table -- it is meaningless because we
290 * create new mappings for Sigma0 transparently; instead, we return the
291 * logically-correct result of physical address == virtual address.
293 * @param a Virtual address. This address does not need to be page-aligned.
294 * @return Physical address corresponding to a.
298 Mem_space::virt_to_phys_s0(void *a) const
300 return dir()->virt_to_phys((Address)a);
305 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
306 Size *size, unsigned *page_attribs)
308 Pdir::Iter i = _dir->walk(virt);
309 if (size) *size = Size(1UL << i.shift());
314 if (phys) *phys = Addr(i.e->addr() & (~0UL << i.shift()));
315 if (page_attribs) *page_attribs = (i.e->raw() & Page_all_attribs);
322 Mem_space::v_delete(Vaddr virt, Vsize size,
323 unsigned long page_attribs = Page_all_attribs)
327 // delete pages from page tables
328 assert (size == Size(Config::PAGE_SIZE) || size == Size(Config::SUPERPAGE_SIZE));
330 if (size == Size(Config::SUPERPAGE_SIZE))
332 assert (Cpu::have_superpages());
333 assert (!virt.offset(Size(Config::SUPERPAGE_SIZE)));
336 Pdir::Iter i = _dir->walk(virt);
338 if (EXPECT_FALSE (! i.e->valid()))
341 assert (! (i.e->raw() & Pt_entry::global())); // Cannot unmap shared ptables
343 ret = i.e->raw() & page_attribs;
345 if (! (page_attribs & Page_user_accessible))
347 // downgrade PDE (superpage) rights
348 i.e->del_attr(page_attribs);
349 page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
353 // delete PDE (superpage)
355 page_unmap (Addr(virt).value(), Size(size).value());
362 * \brief Free all memory allocated for this Mem_space.
363 * \pre Runs after the destructor!
366 Mem_space::~Mem_space()
371 Kmem_alloc::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
376 // --------------------------------------------------------------------
377 IMPLEMENTATION [ia32 || amd64]:
380 #include "l4_types.h"
382 #include "mem_unit.h"
383 #include "cpu_lock.h"
384 #include "lock_guard.h"
392 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
394 Mem_space::make_current()
396 Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
397 _current.cpu(current_cpu()) = this;
400 PUBLIC inline NEEDS ["kmem.h"]
402 Mem_space::phys_dir()
404 return Mem_layout::pmem_to_phys(_dir);
408 * The following functions are all no-ops on native ia32.
409 * Pages appear in an address space when the corresponding PTE is made
410 * ... unlike Fiasco-UX which needs these special tricks
415 Mem_space::page_map (Address, Address, Address, unsigned)
420 Mem_space::page_protect (Address, Address, unsigned)
425 Mem_space::page_unmap (Address, Address)
428 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h"]
430 Mem_space::switchin_context(Mem_space *from)
432 // FIXME: this optimization breaks SMP task deletion, an idle thread
433 // may run on an already deleted page table
435 // never switch to kernel space (context of the idle thread)
436 if (dir() == Kmem::dir())
442 CNT_ADDR_SPACE_SWITCH;
449 Mem_space::sync_kernel()
451 _dir->sync(Virt_addr(Mem_layout::User_max), Kmem::dir(),
452 Virt_addr(Mem_layout::User_max),
453 Virt_addr(-Mem_layout::User_max), Pdir::Super_level,
454 Kmem_alloc::q_allocator(_quota));
457 // --------------------------------------------------------------------
458 IMPLEMENTATION [amd64]:
462 Mem_space::canonize(Page_number v)
464 if (v & Virt_addr(1UL << 48))
465 v = v | Virt_addr(~0UL << 48);
469 // --------------------------------------------------------------------
470 IMPLEMENTATION [ia32 || ux]:
474 Mem_space::canonize(Page_number v)