3 #include "entry_frame.h"
6 pagefault_entry(Address, Mword, Mword, Return_frame *);
8 EXTENSION class Mem_space
12 typedef Pdir Dir_type;
14 /** Return status of v_insert. */
17 Insert_ok = 0, ///< Mapping was added successfully.
18 Insert_warn_exists, ///< Mapping already existed
19 Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
20 Insert_err_nomem, ///< Couldn't alloc new page table
21 Insert_err_exists ///< A mapping already exists at the target addr
24 /** Attribute masks for page mappings. */
29 Page_writable = Pt_entry::Writable,
31 /// Page is noncacheable.
32 Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
34 Page_user_accessible = Pt_entry::User,
35 /// Page has been referenced
36 Page_referenced = Pt_entry::Referenced,
38 Page_dirty = Pt_entry::Dirty,
39 Page_references = Page_referenced | Page_dirty,
40 /// A mask which contains all mask bits
41 Page_all_attribs = Page_writable | Page_noncacheable |
42 Page_user_accessible | Page_referenced | Page_dirty,
46 enum // Definitions for map_util
48 Need_insert_tlb_flush = 0,
49 Map_page_size = Config::PAGE_SIZE,
50 Page_shift = Config::PAGE_SHIFT,
51 Map_superpage_size = Config::SUPERPAGE_SIZE,
52 Map_max_address = Mem_layout::User_max,
53 Whole_space = MWORD_BITS,
57 bool try_htab_fault(Address virt);
58 Address lookup( void *);
65 //----------------------------------------------------------------------------
66 IMPLEMENTATION [ppc32]:
73 #include "mem_layout.h"
75 #include "std_macros.h"
79 #include "lock_guard.h"
83 Per_cpu<Mem_space *> DEFINE_PER_CPU Mem_space::_current;
86 Mem_space::Mem_space(Ram_quota *q) //, bool sync_kernel = true)
91 if (EXPECT_FALSE(!(b = Mapped_allocator::allocator()
92 ->q_alloc(_quota, Config::PAGE_SHIFT))))
95 _dir = static_cast<Dir_type*>(b);
96 _dir->clear(); // initialize to zero
100 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
101 : _quota(q), _dir (pdir)
103 _kernel_space = this;
104 _current.cpu(0) = this;
110 Mem_space::xlate_flush(unsigned char rights)
112 Mword a = Page_references;
113 if (rights & L4_fpage::RX)
114 a |= Page_all_attribs;
115 else if (rights & L4_fpage::W)
124 Mem_space::is_full_flush(unsigned char rights)
126 return rights & L4_fpage::RX;
131 Mem_space::xlate_flush_result(Mword attribs)
134 if (attribs & Page_referenced)
137 if (attribs & Page_dirty)
143 PUBLIC inline NEEDS["cpu.h"]
145 Mem_space::has_superpages()
147 return Cpu::have_superpages();
150 //we flush tlb in htab implementation
151 PUBLIC static inline NEEDS["mem_unit.h"]
153 Mem_space::tlb_flush(bool = false)
155 //Mem_unit::tlb_flush();
162 Mem_space::set_attributes(Address virt, unsigned page_attribs)
165 Pdir::Iter i = _dir->walk(virt);
167 if (!i.e->valid() || i.shift() != Config::PAGE_SHIFT)
170 i.e->del_attr(Page::MAX_ATTRIBS);
171 i.e->add_attr(page_attribs);
180 * Destructor. Deletes the address space and unregisters it from
185 Mem_space::dir_shutdown()
188 // free ldt memory if it was allocated
191 // free all page tables we have allocated for this address space
192 // except the ones in kernel space which are always shared
194 _dir->alloc_cast<Mem_space_q_alloc>()
195 ->destroy(0, Kmem::mem_user_max, Pdir::Depth - 1,
196 Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
203 Mem_space::current_mem_space(unsigned cpu) /// XXX: do not fix, deprecated, remove!
205 return _current.cpu(cpu);
208 /** Insert a page-table entry, or upgrade an existing entry with new
210 @param phys Physical address (page-aligned).
211 @param virt Virtual address for which an entry should be created.
212 @param size Size of the page frame -- 4KB or 4MB.
213 @param page_attribs Attributes for the mapping (see
214 Mem_space::Page_attrib).
215 @return Insert_ok if a new mapping was created;
216 Insert_warn_exists if the mapping already exists;
217 Insert_warn_attrib_upgrade if the mapping already existed but
218 attributes could be upgraded;
219 Insert_err_nomem if the mapping could not be inserted because
220 the kernel is out of memory;
221 Insert_err_exists if the mapping could not be inserted because
222 another mapping occupies the virtual-address
224 @pre phys and virt need to be size-aligned according to the size argument.
228 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size,
229 unsigned page_attribs, bool /*upgrade_ignore_size*/)
231 assert(size == Size(Config::PAGE_SIZE)
232 || size == Size(Config::SUPERPAGE_SIZE));
234 printf("v_insert: phys %08lx virt %08lx (%s) %p\n", phys, virt,
235 page_attribs & Page_writable?"rw":"ro", this);*/
236 Pte_base e(phys.value());
237 unsigned attribs = e.to_htab_entry(page_attribs);
239 Status status = v_insert_cache(&e, Virt_addr(virt).value(),
240 Virt_size(size).value(), attribs);
246 Mem_space::try_htab_fault(Address virt)
250 Address pte_ptr, phys;
251 Dir_type *dir = _dir;
253 if(virt >= Mem_layout::User_max)
256 Pdir::Iter i = dir->walk(Addr(virt), Pdir::Super_level);
261 super = i.e->is_super_page();
263 i = dir->walk(Addr(virt));
265 if(!i.e->is_htab_entry() && !super)
268 if(super && !i.e->valid())
270 i = dir->walk(Addr(virt & Config::SUPERPAGE_MASK));
271 phys = Pte_htab::pte_to_addr(i.e);
272 phys += (virt & Config::PAGE_MASK) - (phys & Config::PAGE_MASK);
280 Lock_guard<Cpu_lock> guard(&cpu_lock);
282 status = v_insert_htab(phys, virt, &pte_ptr, &evict);
284 // something had to be replaced update in cache-page table
285 if(EXPECT_FALSE(status == Insert_err_nomem))
287 Pte_base e(evict.phys);
289 //printf("EVICTING: virt: %lx phys: %lx\n", evict.virt, e.raw());
290 status = v_insert_cache(&e, evict.virt, Config::PAGE_SIZE, 0, evict.dir);
295 if(EXPECT_FALSE(status != Insert_ok))
298 // set pointer in cache
303 status = v_insert_cache(&e, virt, Config::PAGE_SIZE, 0, dir);
305 if(EXPECT_FALSE(status != Insert_ok))
312 * Simple page-table lookup.
314 * @param virt Virtual address. This address does not need to be page-aligned.
315 * @return Physical address corresponding to a.
317 PUBLIC inline NEEDS ["paging.h"]
319 Mem_space::virt_to_phys (Address virt) const
321 return dir()->virt_to_phys(virt);
326 Mem_space::virt_to_phys_s0(void *a) const
328 return dir()->virt_to_phys((Address)a);
333 Mem_space::pmem_to_phys (Address virt) const
340 /** Look up a page-table entry.
341 @param virt Virtual address for which we try the look up.
342 @param phys Meaningful only if we find something (and return true).
343 If not 0, we fill in the physical address of the found page
345 @param page_attribs Meaningful only if we find something (and return true).
346 If not 0, we fill in the page attributes for the found page
347 frame (see Mem_space::Page_attrib).
348 @param size If not 0, we fill in the size of the page-table slot. If an
349 entry was found (and we return true), this is the size
350 of the page frame. If no entry was found (and we
351 return false), this is the size of the free slot. In
352 either case, it is either 4KB or 4MB.
353 @return True if an entry was found, false otherwise.
357 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys = 0, Size *size = 0,
358 unsigned *page_attribs = 0)
360 Pdir::Iter i = _dir->walk(virt, Pdir::Super_level);
362 if(size) *size = Size(1UL << i.shift());
367 unsigned shift = i.e->is_super_page() ? Config::SUPERPAGE_SHIFT
368 : Config::PAGE_SHIFT;
369 unsigned mask = (~0UL << shift);
371 i = _dir->walk(virt & Addr(mask));
376 if(size) *size = Size(1UL << i.shift());
378 if(phys || page_attribs)
380 Address addr = phys->value();
381 Pte_htab::pte_lookup(i.e, &addr, page_attribs);
382 *phys = Phys_addr(addr);
385 *page_attribs = to_kernel_fmt(*page_attribs, i.e->is_htab_entry());
390 /** v_lookup wrapper */
393 Mem_space::lookup(void *a)
397 if(!v_lookup(Vaddr((Address)a), &phys))
403 /** Delete page-table entries, or some of the entries' attributes. This
404 function works for one or multiple mappings (in contrast to v_insert!).
405 @param virt Virtual address of the memory region that should be changed.
406 @param size Size of the memory region that should be changed.
407 @param page_attribs If nonzero, delete only the given page attributes.
408 Otherwise, delete the whole entries.
409 @return Combined (bit-ORed) page attributes that were removed. In
410 case of errors, ~Page_all_attribs is additionally bit-ORed in.
414 Mem_space::v_delete(Vaddr virt, Vsize size,
415 unsigned long page_attribs = Page_all_attribs)
418 // delete pages from page tables
419 //printf("v_delete: %lx dir: %p\n", virt, _dir);
420 assert (size == Size(Config::PAGE_SIZE)
421 || size == Size(Config::SUPERPAGE_SIZE));
423 unsigned shift = (size == Virt_size(Config::SUPERPAGE_SIZE)) ?
424 Config::SUPERPAGE_SHIFT : Config::PAGE_SHIFT;
426 Address offs = Virt_addr(virt).value() & (~0UL << shift);
427 Pdir::Iter i = _dir->walk(Addr(offs));
428 Pt_entry *e = nonull_static_cast<Pt_entry*>(i.e);
430 offs < ((Virt_size(size).value() / Config::PAGE_SIZE) *sizeof(Mword));
431 offs += sizeof(Mword))
433 e = reinterpret_cast<Pt_entry*>(e + offs);
439 if(!e->is_htab_entry())
441 ret = v_delete_htab(e->raw(), page_attribs);
443 if(page_attribs & Page_user_accessible)
444 v_delete_cache(e, page_attribs);
447 ret = v_delete_cache(e, page_attribs);
450 if(size != Virt_size(Config::SUPERPAGE_SIZE) && !(page_attribs & Page_user_accessible))
453 //check for and delete super page
454 i = _dir->walk(virt, Pdir::Super_level);
461 /** we assume that v_lookup was called on kernel_space beforehand */
464 Mem_space::kmem_update(void * /* *addr */)
471 Mem_space::canonize(Page_number v)