1 INTERFACE [ia32 || ux || amd64]:
3 EXTENSION class Mem_space
8 /** Return status of v_insert. */
11 Insert_ok = 0, ///< Mapping was added successfully.
12 Insert_warn_exists, ///< Mapping already existed
13 Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14 Insert_err_nomem, ///< Couldn't alloc new page table
15 Insert_err_exists ///< A mapping already exists at the target addr
18 /** Attribute masks for page mappings. */
23 Page_writable = Pt_entry::Writable,
25 /// Page is noncacheable.
26 Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
28 Page_user_accessible = Pt_entry::User,
29 /// Page has been referenced
30 Page_referenced = Pt_entry::Referenced,
32 Page_dirty = Pt_entry::Dirty,
33 Page_references = Page_referenced | Page_dirty,
34 /// A mask which contains all mask bits
35 Page_all_attribs = Page_writable | Page_noncacheable |
36 Page_user_accessible | Page_referenced | Page_dirty,
41 enum // Definitions for map_util
43 Need_insert_tlb_flush = 0,
44 Map_page_size = Config::PAGE_SIZE,
45 Page_shift = Config::PAGE_SHIFT,
46 Map_superpage_size = Config::SUPERPAGE_SIZE,
47 Map_max_address = Mem_layout::User_max,
48 Whole_space = MWORD_BITS,
53 void page_map (Address phys, Address virt,
54 Address size, unsigned page_attribs);
56 void page_unmap (Address virt, Address size);
58 void page_protect (Address virt, Address size,
59 unsigned page_attribs);
66 //----------------------------------------------------------------------------
67 IMPLEMENTATION [ia32 || ux || amd64]:
74 #include "mem_layout.h"
76 #include "std_macros.h"
79 Per_cpu<Mem_space *> DEFINE_PER_CPU Mem_space::_current;
83 Mem_space::Mem_space (Ram_quota *q, bool sync_kernel = true)
88 if (EXPECT_FALSE(! (b = Mapped_allocator::allocator()
89 ->q_alloc(_quota, Config::PAGE_SHIFT))))
92 _dir = static_cast<Dir_type*>(b);
93 _dir->clear(); // initialize to zero
99 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
100 : _quota(q), _dir (pdir)
102 _kernel_space = this;
103 _current.cpu(0) = this;
109 Mem_space::xlate_flush(unsigned char rights)
111 Mword a = Page_references;
112 if (rights & L4_fpage::RX)
113 a |= Page_all_attribs;
114 else if (rights & L4_fpage::W)
121 Mem_space::is_full_flush(unsigned char rights)
123 return rights & L4_fpage::RX;
128 Mem_space::xlate_flush_result(Mword attribs)
131 if (attribs & Page_referenced)
134 if (attribs & Page_dirty)
140 PUBLIC inline NEEDS["cpu.h"]
142 Mem_space::has_superpages()
144 return Cpu::have_superpages();
148 PUBLIC static inline NEEDS["mem_unit.h"]
150 Mem_space::tlb_flush(bool = false)
152 Mem_unit::tlb_flush();
157 Mem_space::tlb_flush_spaces(bool, Mem_space *, Mem_space *)
165 Mem_space::current_mem_space(unsigned cpu) /// XXX: do not fix, deprecated, remove!
167 return _current.cpu(cpu);
172 Mem_space::set_attributes(Addr virt, unsigned page_attribs)
174 Pdir::Iter i = _dir->walk(virt);
176 if (!i.e->valid() || i.shift() != Config::PAGE_SHIFT)
179 i.e->del_attr(Page::MAX_ATTRIBS);
180 i.e->add_attr(page_attribs);
186 * Destructor. Deletes the address space and unregisters it from
191 Mem_space::dir_shutdown()
193 // free all page tables we have allocated for this address space
194 // except the ones in kernel space which are always shared
195 _dir->alloc_cast<Mem_space_q_alloc>()
196 ->destroy(Virt_addr(0),
197 Virt_addr(Kmem::mem_user_max), Pdir::Depth - 1,
198 Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
204 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size,
205 unsigned page_attribs, bool upgrade_ignore_size)
207 // insert page into page table
209 // XXX should modify page table using compare-and-swap
211 assert_kdb (size == Size(Config::PAGE_SIZE)
212 || size == Size(Config::SUPERPAGE_SIZE));
213 if (size == Size(Config::SUPERPAGE_SIZE))
215 assert (Cpu::have_superpages());
216 assert (virt.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
217 assert (phys.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
220 unsigned level = (size == Size(Config::SUPERPAGE_SIZE) ? (int)Pdir::Super_level : (int)Pdir::Depth);
221 unsigned shift = (size == Size(Config::SUPERPAGE_SIZE) ? Config::SUPERPAGE_SHIFT : Config::PAGE_SHIFT);
222 unsigned attrs = (size == Size(Config::SUPERPAGE_SIZE) ? (unsigned long)Pt_entry::Pse_bit : 0);
224 Pdir::Iter i = _dir->alloc_cast<Mem_space_q_alloc>()
226 Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
228 if (EXPECT_FALSE(!i.e->valid() && i.shift() != shift))
229 return Insert_err_nomem;
231 if (EXPECT_FALSE(!upgrade_ignore_size
232 && i.e->valid() && (i.shift() != shift || i.addr() != phys.value())))
233 return Insert_err_exists;
237 if (EXPECT_FALSE((i.e->raw() | page_attribs) == i.e->raw()))
238 return Insert_warn_exists;
240 i.e->add_attr(page_attribs);
241 page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
243 return Insert_warn_attrib_upgrade;
247 *i.e = Addr(phys).value() | Pt_entry::Valid | attrs | page_attribs;
248 page_map (Addr(phys).value(), Addr(virt).value(), Size(size).value(), page_attribs);
255 * Simple page-table lookup.
257 * @param virt Virtual address. This address does not need to be page-aligned.
258 * @return Physical address corresponding to a.
260 PUBLIC inline NEEDS ["paging.h"]
262 Mem_space::virt_to_phys(Address virt) const
264 return dir()->virt_to_phys(virt);
268 * Simple page-table lookup.
270 * @param virt Virtual address. This address does not need to be page-aligned.
271 * @return Physical address corresponding to a.
273 PUBLIC inline NEEDS ["mem_layout.h"]
275 Mem_space::pmem_to_phys (Address virt) const
277 return Mem_layout::pmem_to_phys(virt);
281 * Simple page-table lookup.
283 * This method is similar to Space_context's virt_to_phys().
284 * The difference is that this version handles Sigma0's
285 * address space with a special case: For Sigma0, we do not
286 * actually consult the page table -- it is meaningless because we
287 * create new mappings for Sigma0 transparently; instead, we return the
288 * logically-correct result of physical address == virtual address.
290 * @param a Virtual address. This address does not need to be page-aligned.
291 * @return Physical address corresponding to a.
295 Mem_space::virt_to_phys_s0(void *a) const
297 return dir()->virt_to_phys((Address)a);
302 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
303 Size *size, unsigned *page_attribs)
305 Pdir::Iter i = _dir->walk(virt);
306 if (size) *size = Size(1UL << i.shift());
311 if (phys) *phys = Addr(i.e->addr() & (~0UL << i.shift()));
312 if (page_attribs) *page_attribs = (i.e->raw() & Page_all_attribs);
319 Mem_space::v_delete(Vaddr virt, Vsize size,
320 unsigned long page_attribs = Page_all_attribs)
324 // delete pages from page tables
325 assert (size == Size(Config::PAGE_SIZE) || size == Size(Config::SUPERPAGE_SIZE));
327 if (size == Size(Config::SUPERPAGE_SIZE))
329 assert (Cpu::have_superpages());
330 assert (!virt.offset(Size(Config::SUPERPAGE_SIZE)));
333 Pdir::Iter i = _dir->walk(virt);
335 if (EXPECT_FALSE (! i.e->valid()))
337 if (Config::conservative)
338 kdb_ke("v_delete did not find anything");
343 assert (! (i.e->raw() & Pt_entry::global())); // Cannot unmap shared ptables
345 ret = i.e->raw() & page_attribs;
347 if (! (page_attribs & Page_user_accessible))
349 // downgrade PDE (superpage) rights
350 i.e->del_attr(page_attribs);
351 page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
355 // delete PDE (superpage)
357 page_unmap (Addr(virt).value(), Size(size).value());
364 * \brief Free all memory allocated for this Mem_space.
365 * \pre Runs after the destructor!
368 Mem_space::~Mem_space()
373 Mapped_allocator::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
378 // --------------------------------------------------------------------
379 IMPLEMENTATION [ia32 || amd64]:
382 #include "l4_types.h"
384 #include "mem_unit.h"
385 #include "cpu_lock.h"
386 #include "lock_guard.h"
394 PRIVATE static inline NEEDS ["cpu.h", "kmem.h"]
396 Mem_space::current_pdir()
398 return reinterpret_cast<Pdir*>(Kmem::phys_to_virt(Cpu::get_pdbr()));
401 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
403 Mem_space::make_current()
405 Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
406 _current.cpu(current_cpu()) = this;
409 PUBLIC inline NEEDS ["kmem.h"]
411 Mem_space::phys_dir()
413 return Mem_layout::pmem_to_phys(_dir);
417 * The following functions are all no-ops on native ia32.
418 * Pages appear in an address space when the corresponding PTE is made
419 * ... unlike Fiasco-UX which needs these special tricks
424 Mem_space::page_map (Address, Address, Address, unsigned)
429 Mem_space::page_protect (Address, Address, unsigned)
434 Mem_space::page_unmap (Address, Address)
437 IMPLEMENT inline NEEDS ["kmem.h"]
438 void Mem_space::kmem_update (void *addr)
440 Pdir::Iter dir = _dir->walk(Addr::create((Address)addr), Pdir::Super_level);
441 Pdir::Iter kdir = Kmem::dir()->walk(Addr::create((Address)addr), Pdir::Super_level);
445 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h", Mem_space::current_pdir]
447 Mem_space::switchin_context(Mem_space *from)
449 // FIXME: this optimization breaks SMP task deletion, an idle thread
450 // may run on an already deleted page table
452 // never switch to kernel space (context of the idle thread)
453 if (dir() == Kmem::dir())
459 CNT_ADDR_SPACE_SWITCH;
464 PRIVATE inline NOEXPORT
466 Mem_space::initial_sync()
468 _dir->alloc_cast<Mem_space_q_alloc>()
469 ->sync(Virt_addr(Mem_layout::User_max), Kmem::dir(),
470 Virt_addr(Mem_layout::User_max),
471 Virt_addr(-Mem_layout::User_max), Pdir::Super_level,
472 Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
475 // --------------------------------------------------------------------
476 IMPLEMENTATION [amd64]:
480 Mem_space::canonize(Page_number v)
482 if (v & Virt_addr(1UL << 48))
483 v = v | Virt_addr(~0UL << 48);
487 // --------------------------------------------------------------------
488 IMPLEMENTATION [ia32 || ux]:
492 Mem_space::canonize(Page_number v)