1 INTERFACE [ia32 || ux || amd64]:
3 EXTENSION class Mem_space
8 /** Return status of v_insert. */
11 Insert_ok = 0, ///< Mapping was added successfully.
12 Insert_warn_exists, ///< Mapping already existed
13 Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14 Insert_err_nomem, ///< Couldn't alloc new page table
15 Insert_err_exists ///< A mapping already exists at the target addr
18 /** Attribute masks for page mappings. */
23 Page_writable = Pt_entry::Writable,
25 /// Page is noncacheable.
26 Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
28 Page_user_accessible = Pt_entry::User,
29 /// Page has been referenced
30 Page_referenced = Pt_entry::Referenced,
32 Page_dirty = Pt_entry::Dirty,
33 Page_references = Page_referenced | Page_dirty,
34 /// A mask which contains all mask bits
35 Page_all_attribs = Page_writable | Page_noncacheable |
36 Page_user_accessible | Page_referenced | Page_dirty,
41 enum // Definitions for map_util
43 Need_insert_tlb_flush = 0,
44 Map_page_size = Config::PAGE_SIZE,
45 Page_shift = Config::PAGE_SHIFT,
46 Whole_space = MWORD_BITS,
51 void page_map (Address phys, Address virt,
54 void page_unmap (Address virt, Address size);
56 void page_protect (Address virt, Address size,
57 unsigned page_attribs);
64 //----------------------------------------------------------------------------
65 IMPLEMENTATION [ia32 || ux || amd64]:
72 #include "mem_layout.h"
74 #include "std_macros.h"
79 PUBLIC explicit inline
80 Mem_space::Mem_space(Ram_quota *q) : _quota(q), _dir(0) {}
84 Mem_space::initialize()
87 if (EXPECT_FALSE(!(b = Kmem_alloc::allocator()
88 ->q_alloc(_quota, Config::PAGE_SHIFT))))
91 _dir = static_cast<Dir_type*>(b);
92 _dir->clear(false); // initialize to zero
93 return true; // success
97 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
98 : _quota(q), _dir(pdir)
100 _kernel_space = this;
101 _current.cpu(Cpu_number::boot_cpu()) = this;
106 Mem_space::is_full_flush(L4_fpage::Rights rights)
108 return rights & L4_fpage::Rights::R();
111 PUBLIC inline NEEDS["cpu.h"]
113 Mem_space::has_superpages()
115 return Cpu::have_superpages();
119 PUBLIC inline NEEDS["mem_unit.h"]
121 Mem_space::tlb_flush(bool = false)
123 if (_current.current() == this)
124 Mem_unit::tlb_flush();
130 Mem_space::current_mem_space(Cpu_number cpu) /// XXX: do not fix, deprecated, remove!
132 return _current.cpu(cpu);
137 Mem_space::set_attributes(Virt_addr virt, Attr page_attribs)
139 auto i = _dir->walk(virt);
144 i.set_attribs(page_attribs);
155 * Destructor. Deletes the address space and unregisters it from
160 Mem_space::dir_shutdown()
162 // free all page tables we have allocated for this address space
163 // except the ones in kernel space which are always shared
164 _dir->destroy(Virt_addr(0UL),
165 Virt_addr(Mem_layout::User_max), 0, Pdir::Depth,
166 Kmem_alloc::q_allocator(_quota));
168 // free all unshared page table levels for the kernel space
169 _dir->destroy(Virt_addr(Mem_layout::User_max + 1),
170 Virt_addr(~0UL), 0, Pdir::Super_level,
171 Kmem_alloc::q_allocator(_quota));
177 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Page_order size,
180 // insert page into page table
182 // XXX should modify page table using compare-and-swap
184 assert (cxx::get_lsb(Phys_addr(phys), size) == 0);
185 assert (cxx::get_lsb(Virt_addr(virt), size) == 0);
188 for (level = 0; level <= Pdir::Depth; ++level)
189 if (Page_order(Pdir::page_order_for_level(level)) <= size)
192 auto i = _dir->walk(virt, level, false,
193 Kmem_alloc::q_allocator(_quota));
195 if (EXPECT_FALSE(!i.is_valid() && i.level != level))
196 return Insert_err_nomem;
198 if (EXPECT_FALSE(i.is_valid()
199 && (i.level != level || Phys_addr(i.page_addr()) != phys)))
200 return Insert_err_exists;
204 if (EXPECT_FALSE(!i.add_attribs(page_attribs)))
205 return Insert_warn_exists;
207 page_protect(Virt_addr::val(virt), Address(1) << Page_order::val(size),
208 *i.pte & Page_all_attribs);
210 return Insert_warn_attrib_upgrade;
214 i.create_page(phys, page_attribs);
215 page_map(Virt_addr::val(phys), Virt_addr::val(virt),
216 Address(1) << Page_order::val(size), page_attribs);
225 Mem_space::v_set_access_flags(Vaddr virt, L4_fpage::Rights access_flags)
227 auto i = _dir->walk(virt);
229 if (EXPECT_FALSE(!i.is_valid()))
232 unsigned page_attribs = 0;
234 if (access_flags & L4_fpage::Rights::R())
235 page_attribs |= Page_referenced;
236 if (access_flags & L4_fpage::Rights::W())
237 page_attribs |= Page_dirty;
239 i.add_attribs(page_attribs);
243 * Simple page-table lookup.
245 * @param virt Virtual address. This address does not need to be page-aligned.
246 * @return Physical address corresponding to a.
248 PUBLIC inline NEEDS ["paging.h"]
250 Mem_space::virt_to_phys(Address virt) const
252 return dir()->virt_to_phys(virt);
256 * Simple page-table lookup.
258 * @param virt Virtual address. This address does not need to be page-aligned.
259 * @return Physical address corresponding to a.
261 PUBLIC inline NEEDS ["mem_layout.h"]
263 Mem_space::pmem_to_phys(Address virt) const
265 return Mem_layout::pmem_to_phys(virt);
269 * Simple page-table lookup.
271 * This method is similar to Space_context's virt_to_phys().
272 * The difference is that this version handles Sigma0's
273 * address space with a special case: For Sigma0, we do not
274 * actually consult the page table -- it is meaningless because we
275 * create new mappings for Sigma0 transparently; instead, we return the
276 * logically-correct result of physical address == virtual address.
278 * @param a Virtual address. This address does not need to be page-aligned.
279 * @return Physical address corresponding to a.
283 Mem_space::virt_to_phys_s0(void *a) const
285 return dir()->virt_to_phys((Address)a);
290 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
291 Page_order *order, Attr *page_attribs)
293 auto i = _dir->walk(virt);
294 if (order) *order = Page_order(i.page_order());
299 if (phys) *phys = Phys_addr(i.page_addr());
300 if (page_attribs) *page_attribs = i.attribs();
307 Mem_space::v_delete(Vaddr virt, Page_order size, L4_fpage::Rights page_attribs)
309 assert (cxx::get_lsb(Virt_addr(virt), size) == 0);
311 auto i = _dir->walk(virt);
313 if (EXPECT_FALSE (! i.is_valid()))
314 return L4_fpage::Rights(0);
316 assert (! (*i.pte & Pt_entry::global())); // Cannot unmap shared pages
318 L4_fpage::Rights ret = i.access_flags();
320 if (! (page_attribs & L4_fpage::Rights::R()))
322 // downgrade PDE (superpage) rights
323 i.del_rights(page_attribs);
324 page_protect(Virt_addr::val(virt), Address(1) << Page_order::val(size),
325 *i.pte & Page_all_attribs);
329 // delete PDE (superpage)
331 page_unmap(Virt_addr::val(virt), Address(1) << Page_order::val(size));
338 * \brief Free all memory allocated for this Mem_space.
339 * \pre Runs after the destructor!
342 Mem_space::~Mem_space()
347 Kmem_alloc::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
352 // --------------------------------------------------------------------
353 IMPLEMENTATION [ia32 || amd64]:
356 #include "l4_types.h"
358 #include "mem_unit.h"
359 #include "cpu_lock.h"
360 #include "lock_guard.h"
368 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
370 Mem_space::make_current()
372 Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
373 _current.cpu(current_cpu()) = this;
376 PUBLIC inline NEEDS ["kmem.h"]
378 Mem_space::phys_dir()
380 return Mem_layout::pmem_to_phys(_dir);
384 * The following functions are all no-ops on native ia32.
385 * Pages appear in an address space when the corresponding PTE is made
386 * ... unlike Fiasco-UX which needs these special tricks
391 Mem_space::page_map(Address, Address, Address, Attr)
396 Mem_space::page_protect(Address, Address, unsigned)
401 Mem_space::page_unmap(Address, Address)
404 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h"]
406 Mem_space::switchin_context(Mem_space *from)
408 // FIXME: this optimization breaks SMP task deletion, an idle thread
409 // may run on an already deleted page table
411 // never switch to kernel space (context of the idle thread)
412 if (dir() == Kmem::dir())
418 CNT_ADDR_SPACE_SWITCH;
425 Mem_space::sync_kernel()
427 return _dir->sync(Virt_addr(Mem_layout::User_max + 1), Kmem::dir(),
428 Virt_addr(Mem_layout::User_max + 1),
429 Virt_size(-(Mem_layout::User_max + 1)), Pdir::Super_level,
431 Kmem_alloc::q_allocator(_quota));
434 // --------------------------------------------------------------------
435 IMPLEMENTATION [amd64]:
441 Mem_space::canonize(Page_number v)
443 if (v & Page_number(Virt_addr(1UL << 48)))
444 v = v | Page_number(Virt_addr(~0UL << 48));
450 Mem_space::init_page_sizes()
452 add_page_size(Page_order(Config::PAGE_SHIFT));
453 if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).superpages())
454 add_page_size(Page_order(21)); // 2MB
456 if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).ext_8000_0001_edx() & (1UL<<26))
457 add_page_size(Page_order(30)); // 1GB
460 // --------------------------------------------------------------------
461 IMPLEMENTATION [ia32 || ux]:
467 Mem_space::canonize(Page_number v)
472 Mem_space::init_page_sizes()
474 add_page_size(Page_order(Config::PAGE_SHIFT));
475 if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).superpages())
476 add_page_size(Page_order(22)); // 4MB