3 #include "auto_quota.h"
4 #include "kmem.h" // for "_unused_*" virtual memory regions
5 #include "member_offs.h"
11 EXTENSION class Mem_space
16 typedef Page_table Dir_type;
18 /** Return status of v_insert. */
21 Insert_ok = Page_table::E_OK, ///< Mapping was added successfully.
22 Insert_err_nomem = Page_table::E_NOMEM, ///< Couldn't alloc new page table
23 Insert_err_exists = Page_table::E_EXISTS, ///< A mapping already exists at the target addr
24 Insert_warn_attrib_upgrade = Page_table::E_UPGRADE, ///< Mapping already existed, attribs upgrade
25 Insert_warn_exists, ///< Mapping already existed
29 /** Attribute masks for page mappings. */
34 Page_writable = Mem_page_attr::Write,
35 Page_user_accessible = Mem_page_attr::User,
36 /// Page is noncacheable.
37 Page_noncacheable = Page::NONCACHEABLE,
38 Page_cacheable = Page::CACHEABLE,
39 /// it's a user page (USER_NO | USER_RO = USER_RW).
40 /// A mask which contains all mask bits
41 Page_all_attribs = Page_user_accessible | Page_writable | Page_cacheable,
49 enum // Definitions for map_util
51 Need_insert_tlb_flush = 1,
52 Map_page_size = Config::PAGE_SIZE,
53 Page_shift = Config::PAGE_SHIFT,
54 Map_superpage_size = Config::SUPERPAGE_SIZE,
55 Map_max_address = Mem_layout::User_max,
61 static void kernel_space(Mem_space *);
62 static bool has_superpages() { return true; }
70 //---------------------------------------------------------------------------
86 #include "kmem_alloc.h"
92 Mem_space::xlate_flush(unsigned char rights)
94 Mword a = Page_references;
95 if (rights & L4_fpage::RX)
96 a |= Page_all_attribs;
97 else if (rights & L4_fpage::W)
104 Mem_space::is_full_flush(unsigned char rights)
106 return rights & L4_fpage::RX;
111 Mem_space::xlate_flush_result(Mword attribs)
114 if (attribs & Page_referenced)
117 if (attribs & Page_dirty)
125 PUBLIC inline NEEDS["mem_unit.h"]
127 Mem_space::tlb_flush(bool force = false)
130 Mem_unit::tlb_flush();
131 else if (force && c_asid() != ~0UL)
132 Mem_unit::tlb_flush(c_asid());
134 // else do nothing, we manage ASID local flushes in v_* already
135 // Mem_unit::tlb_flush();
138 PUBLIC static inline NEEDS["mem_unit.h"]
140 Mem_space::tlb_flush_spaces(bool all, Mem_space *s1, Mem_space *s2)
142 if (all || !Have_asids)
143 Mem_unit::tlb_flush();
155 Mem_space *Mem_space::current_mem_space(unsigned cpu)
157 return _current.cpu(cpu);
161 IMPLEMENT inline NEEDS ["kmem.h", Mem_space::c_asid]
162 void Mem_space::switchin_context(Mem_space *from)
165 // never switch to kernel space (context of the idle thread)
166 if (this == kernel_space())
175 _dir->invalidate((void*)Kmem::ipc_window(0), Config::SUPERPAGE_SIZE * 4,
183 void Mem_space::kernel_space(Mem_space *_k_space)
185 _kernel_space = _k_space;
190 static unsigned pd_index(void const *address)
191 { return (Mword)address >> 20; /* 1MB steps */ }
194 static unsigned pt_index(void const *address)
195 { return ((Mword)address >> 12) & 255; /* 4KB steps for coarse pts */ }
200 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size, unsigned page_attribs,
201 bool upgrade_ignore_size)
203 Mem_space *c = _current.current();
204 bool flush = c == this;
206 Pte pte = _dir->walk((void*)virt.value(), size.value(), flush,
207 Kmem_alloc::q_allocator(ram_quota()),
211 if (EXPECT_FALSE(!upgrade_ignore_size
212 && (pte.size() != size.value() || pte.phys() != phys.value())))
213 return Insert_err_exists;
214 if (pte.attr().get_abstract() == page_attribs)
215 return Insert_warn_exists;
217 Mem_page_attr a = pte.attr();
218 a.set_abstract(a.get_abstract() | page_attribs);
219 pte.set(phys.value(), size.value(), a, flush);
221 BUG_ON(pte.phys() != phys.value(), "overwrite phys addr: %lx with %lx\n",
222 pte.phys(), phys.value());
225 Mem_unit::tlb_flush((void*)virt.value(), c_asid());
227 return Insert_warn_attrib_upgrade;
229 else if (pte.size() != size.value())
230 return Insert_err_nomem;
233 // we found an invalid entry for the right size
234 Mem_page_attr a(Page::Local_page);
235 a.set_abstract(page_attribs);
236 pte.set(phys.value(), size.value(), a, flush);
243 * Simple page-table lookup.
245 * @param virt Virtual address. This address does not need to be page-aligned.
246 * @return Physical address corresponding to a.
250 Mem_space::virt_to_phys(Address virt) const
252 Pte pte = _dir->walk((void*)virt, 0, false, Ptab::Null_alloc(), 0);
253 if (EXPECT_FALSE(!pte.valid()))
256 return (Address)pte.phys((void*)virt);
259 PUBLIC inline NEEDS [Mem_space::virt_to_phys]
261 Mem_space::pmem_to_phys(Address virt) const
263 return virt_to_phys(virt);
266 /** Simple page-table lookup. This method is similar to Mem_space's
267 lookup(). The difference is that this version handles
268 Sigma0's address space with a special case: For Sigma0, we do not
269 actually consult the page table -- it is meaningless because we
270 create new mappings for Sigma0 transparently; instead, we return the
271 logically-correct result of physical address == virtual address.
272 @param a Virtual address. This address does not need to be page-aligned.
273 @return Physical address corresponding to a.
277 Mem_space::virt_to_phys_s0(void *a) const
279 return virt_to_phys((Address)a);
284 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
285 Size *size, unsigned *page_attribs)
287 Pte p = _dir->walk( (void*)virt.value(), 0, false, Ptab::Null_alloc(), 0);
289 if (size) *size = Size(p.size());
290 if (page_attribs) *page_attribs = p.attr().get_abstract();
291 // FIXME: we should not use virt but 0 as offset for phys return value!
292 if (phys) *phys = Phys_addr(p.phys((void*)virt.value()));
298 Mem_space::v_delete(Vaddr virt, Vsize size,
299 unsigned long del_attribs)
302 bool flush = _current.current() == this;
303 Pte pte = _dir->walk((void*)virt.value(), 0, false, Ptab::Null_alloc(), 0);
304 if (EXPECT_FALSE(!pte.valid()))
307 BUG_ON(pte.size() != size.value(), "size mismatch: va=%lx sz=%lx dir=%p\n",
308 virt.value(), size.value(), _dir);
310 Mem_unit::flush_vcache((void*)(virt.value() & ~(pte.size()-1)),
311 (void*)((virt.value() & ~(pte.size()-1)) + pte.size()));
313 Mem_page_attr a = pte.attr();
314 unsigned long abs_a = a.get_abstract();
316 if (!(del_attribs & Page_user_accessible))
318 a.set_ap(abs_a & ~del_attribs);
322 pte.set_invalid(0, flush);
325 Mem_unit::tlb_flush((void*)virt.value(), c_asid());
327 return abs_a & del_attribs;
333 Mem_space::set_attributes(Address virt, unsigned page_attribs)
335 Pte p = _dir->walk( (void*)virt, 0, false, Ptab::Null_alloc(), 0);
337 // copy current shared kernel page directory
340 Mem_page_attr a = p.attr();
341 a.set_ap(page_attribs);
347 * \brief Free all memory allocated for this Mem_space.
348 * \pre Runs after the destructor!
351 Mem_space::~Mem_space()
356 _dir->free_page_tables(0, (void*)Mem_layout::User_max,
357 Kmem_alloc::q_allocator(ram_quota()));
358 Kmem_alloc::allocator()->q_unaligned_free(ram_quota(), sizeof(Page_table), _dir);
363 /** Constructor. Creates a new address space and registers it with
366 * Registration may fail (if a task with the given number already
367 * exists, or if another thread creates an address space for the same
368 * task number concurrently). In this case, the newly-created
369 * address space should be deleted again.
372 Mem_space::Mem_space(Ram_quota *q)
378 PROTECTED inline NEEDS[<new>, "kmem_alloc.h", Mem_space::asid]
380 Mem_space::initialize()
382 Auto_quota<Ram_quota> q(ram_quota(), sizeof(Page_table));
383 if (EXPECT_FALSE(!q))
386 _dir = (Page_table*)Kmem_alloc::allocator()->unaligned_alloc(sizeof(Page_table));
390 new (_dir) Page_table;
398 Mem_space::sync_kernel()
400 // copy current shared kernel page directory
401 _dir->copy_in((void*)Mem_layout::User_max,
402 kernel_space()->_dir,
403 (void*)Mem_layout::User_max,
404 Mem_layout::Kernel_max - Mem_layout::User_max);
408 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
409 : _quota(q), _dir (pdir)
412 _current.cpu(0) = this;
417 Mem_space::canonize(Page_number v)
420 //----------------------------------------------------------------------------
421 IMPLEMENTATION [armv5]:
425 Mem_space::asid(unsigned long)
430 Mem_space::reset_asid()
435 Mem_space::c_asid() const
439 void Mem_space::make_current()
442 _current.current() = this;
446 //----------------------------------------------------------------------------
447 INTERFACE [armv6 || armv7]:
449 EXTENSION class Mem_space
452 enum { Have_asids = 1 };
454 unsigned long _asid[Config::Max_num_cpus];
456 static Per_cpu<unsigned char> _next_free_asid;
457 static Per_cpu<Mem_space *[256]> _active_asids;
460 //----------------------------------------------------------------------------
461 INTERFACE [!(armv6 || armv7)]:
463 EXTENSION class Mem_space
466 enum { Have_asids = 0 };
469 //----------------------------------------------------------------------------
470 IMPLEMENTATION [armv6 || armca8]:
472 PRIVATE inline static
474 Mem_space::next_asid(unsigned cpu)
476 return _next_free_asid.cpu(cpu)++;
479 //----------------------------------------------------------------------------
480 IMPLEMENTATION [armv7 && armca9]:
482 PRIVATE inline static
484 Mem_space::next_asid(unsigned cpu)
486 if (_next_free_asid.cpu(cpu) == 0)
487 ++_next_free_asid.cpu(cpu);
488 return _next_free_asid.cpu(cpu)++;
491 //----------------------------------------------------------------------------
492 IMPLEMENTATION [armv6 || armv7]:
494 DEFINE_PER_CPU Per_cpu<unsigned char> Mem_space::_next_free_asid;
495 DEFINE_PER_CPU Per_cpu<Mem_space *[256]> Mem_space::_active_asids;
499 Mem_space::asid(unsigned long a)
501 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
507 Mem_space::c_asid() const
508 { return _asid[current_cpu()]; }
510 PRIVATE inline NEEDS[Mem_space::next_asid, "types.h"]
514 unsigned cpu = current_cpu();
515 if (EXPECT_FALSE(_asid[cpu] == ~0UL))
517 // FIFO ASID replacement strategy
518 unsigned char new_asid = next_asid(cpu);
519 Mem_space **bad_guy = &_active_asids.cpu(cpu)[new_asid];
520 while (Mem_space *victim = access_once(bad_guy))
522 // need ASID replacement
523 if (victim == current_mem_space(cpu))
525 // do not replace the ASID of the current space
526 new_asid = next_asid(cpu);
527 bad_guy = &_active_asids.cpu(cpu)[new_asid];
531 //LOG_MSG_3VAL(current(), "ASIDr", new_asid, (Mword)*bad_guy, (Mword)this);
532 Mem_unit::tlb_flush(new_asid);
534 // If the victim is valid and we get a 1 written to the ASID array
535 // then we have to reset the ASID of our victim, else the
536 // reset_asid function is currently resetting the ASIDs of the
537 // victim on a different CPU.
538 if (victim != reinterpret_cast<Mem_space*>(~0UL) &&
539 mp_cas(bad_guy, victim, reinterpret_cast<Mem_space*>(1)))
540 write_now(&victim->_asid[cpu], ~0UL);
544 _asid[cpu] = new_asid;
545 write_now(bad_guy, this);
548 //LOG_MSG_3VAL(current(), "ASID", (Mword)this, _asid[cpu], (Mword)__builtin_return_address(0));
554 Mem_space::reset_asid()
556 for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
558 unsigned asid = access_once(&_asid[i]);
562 Mem_space **a = &_active_asids.cpu(i)[asid];
563 if (!mp_cas(a, this, reinterpret_cast<Mem_space*>(~0UL)))
564 // It could be our ASID is in the process of being preempted,
565 // so wait until this is done.
566 while (access_once(a) == reinterpret_cast<Mem_space*>(1))
571 IMPLEMENT inline NEEDS[Mem_space::asid]
572 void Mem_space::make_current()
574 _dir->activate(asid());
575 _current.current() = this;