3 #include "kmem.h" // for "_unused_*" virtual memory regions
4 #include "member_offs.h"
10 EXTENSION class Mem_space
15 typedef Page_table Dir_type;
17 /** Return status of v_insert. */
20 Insert_ok = Page_table::E_OK, ///< Mapping was added successfully.
21 Insert_err_nomem = Page_table::E_NOMEM, ///< Couldn't alloc new page table
22 Insert_err_exists = Page_table::E_EXISTS, ///< A mapping already exists at the target addr
23 Insert_warn_attrib_upgrade = Page_table::E_UPGRADE, ///< Mapping already existed, attribs upgrade
24 Insert_warn_exists, ///< Mapping already existed
28 /** Attribute masks for page mappings. */
33 Page_writable = Mem_page_attr::Write,
34 Page_user_accessible = Mem_page_attr::User,
35 /// Page is noncacheable.
36 Page_noncacheable = Page::NONCACHEABLE,
37 Page_cacheable = Page::CACHEABLE,
38 /// it's a user page (USER_NO | USER_RO = USER_RW).
39 /// A mask which contains all mask bits
40 Page_all_attribs = Page_user_accessible | Page_writable | Page_cacheable,
48 enum // Definitions for map_util
50 Need_insert_tlb_flush = 1,
51 Map_page_size = Config::PAGE_SIZE,
52 Page_shift = Config::PAGE_SHIFT,
53 Map_superpage_size = Config::SUPERPAGE_SIZE,
54 Map_max_address = Mem_layout::User_max,
60 static void kernel_space(Mem_space *);
61 static bool has_superpages() { return true; }
69 //---------------------------------------------------------------------------
79 #include "mapped_alloc.h"
89 Mem_space::xlate_flush(unsigned char rights)
91 Mword a = Page_references;
92 if (rights & L4_fpage::RX)
93 a |= Page_all_attribs;
94 else if (rights & L4_fpage::W)
101 Mem_space::xlate_flush_result(Mword attribs)
104 if (attribs & Page_referenced)
107 if (attribs & Page_dirty)
116 PUBLIC inline NEEDS["mem_unit.h"]
118 Mem_space::tlb_flush(bool force = false)
121 Mem_unit::tlb_flush();
122 else if (force && c_asid())
123 Mem_unit::tlb_flush(c_asid());
125 // else do nothing, we manage ASID local flushes in v_* already
126 // Mem_unit::tlb_flush();
131 Mem_space::enable_reverse_lookup()
133 // Store reverse pointer to Space in page directory
134 assert(((unsigned long)this & 0x03) == 0);
135 Pte pte = _dir->walk((void*)Mem_layout::Space_index,
136 Config::SUPERPAGE_SIZE, false, 0 /*does never allocate*/);
138 pte.set_invalid((unsigned long)this, false);
142 Mem_space *Mem_space::current_mem_space(unsigned cpu)
144 Pte pte = Page_table::current(cpu)->walk((void*)Mem_layout::Space_index,
145 Config::SUPERPAGE_SIZE, false, 0 /*does never allocate*/);
146 return reinterpret_cast<Mem_space*>(pte.raw());
151 Page_table *Mem_space::current_pdir()
153 return Page_table::current();
156 IMPLEMENT inline NEEDS ["kmem.h", Mem_space::c_asid, Mem_space::need_tlb_flush]
157 void Mem_space::switchin_context(Mem_space *from)
160 // never switch to kernel space (context of the idle thread)
161 if (this == kernel_space())
167 else if (need_tlb_flush())
170 _dir->invalidate((void*)Kmem::ipc_window(0), Config::SUPERPAGE_SIZE * 4,
178 void Mem_space::kernel_space( Mem_space *_k_space )
180 _kernel_space = _k_space;
188 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size, unsigned page_attribs,
189 bool upgrade_ignore_size)
191 bool flush = Page_table::current() == _dir;
192 Pte pte = _dir->walk((void*)virt.value(), size.value(), flush, ram_quota());
195 if (EXPECT_FALSE(!upgrade_ignore_size
196 && (pte.size() != size.value() || pte.phys() != phys.value())))
197 return Insert_err_exists;
198 if (pte.attr().get_abstract() == page_attribs)
199 return Insert_warn_exists;
203 Mem_page_attr a(Page::Local_page);
204 a.set_abstract(page_attribs);
205 pte.set(phys.value(), size.value(), a, flush);
209 Mem_page_attr a = pte.attr();
210 a.set_abstract(a.get_abstract() | page_attribs);
211 pte.set(phys.value(), size.value(), a, flush);
214 Mem_unit::tlb_flush((void*)virt.value(), c_asid());
216 return Insert_warn_attrib_upgrade;
221 * Simple page-table lookup.
223 * @param virt Virtual address. This address does not need to be page-aligned.
224 * @return Physical address corresponding to a.
226 PUBLIC inline NEEDS ["paging.h"]
228 Mem_space::virt_to_phys (Address virt) const
230 Pte pte = _dir->walk((void*)virt, 0, false, 0 /*does never allocate*/);
231 if (EXPECT_FALSE(!pte.valid()))
234 return (Address)pte.phys((void*)virt);
237 PUBLIC inline NEEDS [Mem_space::virt_to_phys]
239 Mem_space::pmem_to_phys (Address virt) const
241 return virt_to_phys(virt);
244 /** Simple page-table lookup. This method is similar to Mem_space's
245 lookup(). The difference is that this version handles
246 Sigma0's address space with a special case: For Sigma0, we do not
247 actually consult the page table -- it is meaningless because we
248 create new mappings for Sigma0 transparently; instead, we return the
249 logically-correct result of physical address == virtual address.
250 @param a Virtual address. This address does not need to be page-aligned.
251 @return Physical address corresponding to a.
255 Mem_space::virt_to_phys_s0(void *a) const
257 return virt_to_phys((Address)a);
262 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
263 Size *size, unsigned *page_attribs)
265 Pte p = _dir->walk( (void*)virt.value(), 0, false,0);
267 if (size) *size = Size(p.size());
268 if (page_attribs) *page_attribs = p.attr().get_abstract();
269 // FIXME: we should not use virt but 0 as offset for phys return value!
270 if (phys) *phys = Phys_addr(p.phys((void*)virt.value()));
276 Mem_space::v_delete(Vaddr virt, Vsize size,
277 unsigned long page_attribs)
279 bool flush = Page_table::current() == _dir;
280 Pte pte = _dir->walk((void*)virt.value(), 0, false, ram_quota());
281 if (EXPECT_FALSE(!pte.valid()))
284 if (EXPECT_FALSE(pte.size() != size.value()))
286 kdb_ke("v_del: size mismatch\n");
290 Mem_unit::flush_vcache((void*)(virt.value() & ~(pte.size()-1)),
291 (void*)((virt.value() & ~(pte.size()-1)) + pte.size()));
293 Mem_page_attr a = pte.attr();
294 unsigned long abs_a = a.get_abstract();
296 if (!(page_attribs & Page_user_accessible))
298 a.set_ap(abs_a & ~page_attribs);
302 pte.set_invalid(0, flush);
305 Mem_unit::tlb_flush((void*)virt.value(), c_asid());
307 return abs_a & page_attribs;
313 Mem_space::set_attributes(Address virt, unsigned page_attribs)
315 Pte p = _dir->walk( (void*)virt, 0, false,0);
317 // copy current shared kernel page directory
320 Mem_page_attr a = p.attr();
321 a.set_ap(page_attribs);
326 IMPLEMENT inline NEEDS[Mem_space::c_asid]
327 void Mem_space::kmem_update (void *addr)
329 _dir->copy_in(addr, kernel_space()->_dir,
330 addr, Config::SUPERPAGE_SIZE, c_asid());
336 * Tests if a task is the sigma0 task.
337 * @return true if the task is sigma0, false otherwise.
340 bool Mem_space::is_sigma0()
342 return this == sigma0_space;
346 * \brief Free all memory allocated for this Mem_space.
347 * \pre Runs after the destructor!
350 Mem_space::~Mem_space()
354 _dir->free_page_tables(0, (void*)Mem_layout::User_max);
356 ram_quota()->free(sizeof(Page_table));
361 /** Constructor. Creates a new address space and registers it with
364 * Registration may fail (if a task with the given number already
365 * exists, or if another thread creates an address space for the same
366 * task number concurrently). In this case, the newly-created
367 * address space should be deleted again.
369 * @param new_number Task number of the new address space
372 Mem_space::Mem_space(Ram_quota *q)
378 if (EXPECT_FALSE(!ram_quota()->alloc(sizeof(Page_table))))
381 _dir = new Page_table();
384 // copy current shared kernel page directory
385 _dir->copy_in((void*)Mem_layout::User_max,
386 kernel_space()->_dir,
387 (void*)Mem_layout::User_max,
388 Mem_layout::Kernel_max - Mem_layout::User_max);
390 enable_reverse_lookup ();
394 Mem_space::Mem_space (Ram_quota *q, Dir_type* pdir)
395 : _quota(q), _dir (pdir)
398 enable_reverse_lookup ();
403 Mem_space::canonize(Page_number v)
406 //----------------------------------------------------------------------------
407 IMPLEMENTATION [armv5]:
411 Mem_space::asid(unsigned long)
416 Mem_space::c_asid() const
420 void Mem_space::make_current()
426 //----------------------------------------------------------------------------
427 INTERFACE [armv6 || armv7]:
429 EXTENSION class Mem_space
432 enum { Have_asids = 1 };
434 unsigned long _asid[Config::Max_num_cpus];
436 static Per_cpu<unsigned char> _next_free_asid;
437 static Per_cpu<Mem_space *[256]> _active_asids;
440 //----------------------------------------------------------------------------
441 INTERFACE [!(armv6 || armv7)]:
443 EXTENSION class Mem_space
446 enum { Have_asids = 0 };
450 //----------------------------------------------------------------------------
451 IMPLEMENTATION [armv6 || armv7]:
454 Per_cpu<unsigned char> DEFINE_PER_CPU Mem_space::_next_free_asid;
455 Per_cpu<Mem_space *[256]> DEFINE_PER_CPU Mem_space::_active_asids;
459 Mem_space::asid(unsigned long a)
461 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
467 Mem_space::c_asid() const
468 { return _asid[current_cpu()]; }
470 PRIVATE inline static
472 Mem_space::next_asid(unsigned cpu)
473 { return _next_free_asid.cpu(cpu)++; }
475 PRIVATE inline NEEDS[Mem_space::next_asid]
479 unsigned cpu = current_cpu();
480 if (EXPECT_FALSE(_asid[cpu] == ~0UL))
482 // FIFO ASID replacement strategy
483 unsigned char new_asid = next_asid(cpu);
484 Mem_space **bad_guy = &_active_asids.cpu(cpu)[new_asid];
487 // need ASID replacement
488 if (*bad_guy == current_mem_space(cpu))
490 // do not replace the ASID of the current space
491 new_asid = next_asid(cpu);
492 bad_guy = &_active_asids.cpu(cpu)[new_asid];
496 //LOG_MSG_3VAL(current(), "ASIDr", new_asid, (Mword)*bad_guy, (Mword)this);
497 Mem_unit::tlb_flush(new_asid);
498 (*bad_guy)->_asid[cpu] = ~0UL;
504 _asid[cpu] = new_asid;
507 //LOG_MSG_3VAL(current(), "ASID", (Mword)this, _asid, (Mword)__builtin_return_address(0));
511 IMPLEMENT inline NEEDS[Mem_space::asid]
512 void Mem_space::make_current()
514 _dir->activate(asid());