]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/arm/mem_space-arm.cpp
update
[l4.git] / kernel / fiasco / src / kern / arm / mem_space-arm.cpp
1 INTERFACE [arm]:
2
3 #include "auto_quota.h"
4 #include "kmem.h"               // for "_unused_*" virtual memory regions
5 #include "member_offs.h"
6 #include "paging.h"
7 #include "types.h"
8 #include "pagetable.h"
9 #include "ram_quota.h"
10
11 EXTENSION class Mem_space
12 {
13   friend class Jdb;
14
15 public:
16   typedef Page_table Dir_type;
17
18   /** Return status of v_insert. */
19   enum // Status
20   {
21     Insert_ok = Page_table::E_OK,               ///< Mapping was added successfully.
22     Insert_err_nomem  = Page_table::E_NOMEM,  ///< Couldn't alloc new page table
23     Insert_err_exists = Page_table::E_EXISTS, ///< A mapping already exists at the target addr
24     Insert_warn_attrib_upgrade = Page_table::E_UPGRADE, ///< Mapping already existed, attribs upgrade
25     Insert_warn_exists,         ///< Mapping already existed
26
27   };
28
29   /** Attribute masks for page mappings. */
30   enum Page_attrib
31   {
32     Page_no_attribs = 0,
33     /// Page is writable.
34     Page_writable = Mem_page_attr::Write,
35     Page_user_accessible = Mem_page_attr::User,
36     /// Page is noncacheable.
37     Page_noncacheable = Page::NONCACHEABLE,
38     Page_cacheable = Page::CACHEABLE,
39     /// it's a user page (USER_NO | USER_RO = USER_RW).
40     /// A mask which contains all mask bits
41     Page_all_attribs = Page_user_accessible | Page_writable | Page_cacheable,
42     Page_referenced = 0,
43     Page_dirty = 0,
44     Page_references = 0,
45   };
46
47   // Mapping utilities
48
49   enum                          // Definitions for map_util
50   {
51     Need_insert_tlb_flush = 1,
52     Map_page_size = Config::PAGE_SIZE,
53     Page_shift = Config::PAGE_SHIFT,
54     Map_superpage_size = Config::SUPERPAGE_SIZE,
55     Map_max_address = Mem_layout::User_max,
56     Whole_space = 32,
57     Identity_map = 0,
58   };
59
60
61   static void kernel_space(Mem_space *);
62   static bool has_superpages() { return true; }
63
64
65 private:
66   // DATA
67   Dir_type *_dir;
68 };
69
70 //---------------------------------------------------------------------------
71 IMPLEMENTATION [arm]:
72
73 #include <cassert>
74 #include <cstring>
75 #include <new>
76
77 #include "atomic.h"
78 #include "bug.h"
79 #include "config.h"
80 #include "globals.h"
81 #include "kdb_ke.h"
82 #include "l4_types.h"
83 #include "panic.h"
84 #include "paging.h"
85 #include "kmem.h"
86 #include "kmem_alloc.h"
87 #include "mem_unit.h"
88
89
90 PUBLIC static inline
91 Mword
92 Mem_space::xlate_flush(unsigned char rights)
93 {
94   Mword a = Page_references;
95   if (rights & L4_fpage::RX)
96     a |= Page_all_attribs;
97   else if (rights & L4_fpage::W)
98     a |= Page_writable;
99   return a;
100 }
101
102 PUBLIC static inline
103 Mword
104 Mem_space::is_full_flush(unsigned char rights)
105 {
106   return rights & L4_fpage::RX;
107 }
108
109 PUBLIC static inline
110 unsigned char
111 Mem_space::xlate_flush_result(Mword attribs)
112 {
113   unsigned char r = 0;
114   if (attribs & Page_referenced)
115     r |= L4_fpage::RX;
116
117   if (attribs & Page_dirty)
118     r |= L4_fpage::W;
119
120   return r;
121 }
122
123 // Mapping utilities
124
125 PUBLIC inline NEEDS["mem_unit.h"]
126 void
127 Mem_space::tlb_flush(bool force = false)
128 {
129   if (!Have_asids)
130     Mem_unit::tlb_flush();
131   else if (force && c_asid() != ~0UL)
132     Mem_unit::tlb_flush(c_asid());
133
134   // else do nothing, we manage ASID local flushes in v_* already
135   // Mem_unit::tlb_flush();
136 }
137
138 PUBLIC static inline NEEDS["mem_unit.h"]
139 void
140 Mem_space::tlb_flush_spaces(bool all, Mem_space *s1, Mem_space *s2)
141 {
142   if (all || !Have_asids)
143     Mem_unit::tlb_flush();
144   else
145     {
146       if (s1)
147         s1->tlb_flush(true);
148       if (s2)
149         s2->tlb_flush(true);
150     }
151 }
152
153
154 IMPLEMENT inline
155 Mem_space *Mem_space::current_mem_space(unsigned cpu)
156 {
157   return _current.cpu(cpu);
158 }
159
160
161 IMPLEMENT inline NEEDS ["kmem.h", Mem_space::c_asid]
162 void Mem_space::switchin_context(Mem_space *from)
163 {
164 #if 0
165   // never switch to kernel space (context of the idle thread)
166   if (this == kernel_space())
167     return;
168 #endif
169
170   if (from != this)
171     make_current();
172   else
173     tlb_flush(true);
174 #if 0
175   _dir->invalidate((void*)Kmem::ipc_window(0), Config::SUPERPAGE_SIZE * 4,
176       c_asid());
177 #endif
178
179 }
180
181
182 IMPLEMENT inline
183 void Mem_space::kernel_space(Mem_space *_k_space)
184 {
185   _kernel_space = _k_space;
186 }
187
188
189 inline
190 static unsigned pd_index(void const *address)
191 { return (Mword)address >> 20; /* 1MB steps */ }
192
193 inline
194 static unsigned pt_index(void const *address)
195 { return ((Mword)address >> 12) & 255; /* 4KB steps for coarse pts */ }
196
197
198 IMPLEMENT
199 Mem_space::Status
200 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size, unsigned page_attribs,
201                     bool upgrade_ignore_size)
202 {
203   Mem_space *c = _current.current();
204   bool flush = c == this;
205
206   Pte pte = _dir->walk((void*)virt.value(), size.value(), flush,
207                        Kmem_alloc::q_allocator(ram_quota()),
208                        c->dir());
209   if (pte.valid())
210     {
211       if (EXPECT_FALSE(!upgrade_ignore_size 
212             && (pte.size() != size.value() || pte.phys() != phys.value())))
213         return Insert_err_exists;
214       if (pte.attr().get_abstract() == page_attribs)
215         return Insert_warn_exists;
216
217       Mem_page_attr a = pte.attr();
218       a.set_abstract(a.get_abstract() | page_attribs);
219       pte.set(phys.value(), size.value(), a, flush);
220
221       BUG_ON(pte.phys() != phys.value(), "overwrite phys addr: %lx with %lx\n",
222              pte.phys(), phys.value());
223
224       if (Have_asids)
225         Mem_unit::tlb_flush((void*)virt.value(), c_asid());
226
227       return Insert_warn_attrib_upgrade;
228     }
229   else if (pte.size() != size.value())
230     return Insert_err_nomem;
231   else
232     {
233       // we found an invalid entry for the right size
234       Mem_page_attr a(Page::Local_page);
235       a.set_abstract(page_attribs);
236       pte.set(phys.value(), size.value(), a, flush);
237       return Insert_ok;
238     }
239 }
240
241
242 /**
243  * Simple page-table lookup.
244  *
245  * @param virt Virtual address.  This address does not need to be page-aligned.
246  * @return Physical address corresponding to a.
247  */
248 PUBLIC inline
249 Address
250 Mem_space::virt_to_phys(Address virt) const
251 {
252   Pte pte = _dir->walk((void*)virt, 0, false, Ptab::Null_alloc(), 0);
253   if (EXPECT_FALSE(!pte.valid()))
254     return ~0UL;
255
256   return (Address)pte.phys((void*)virt);
257 }
258
259 PUBLIC inline NEEDS [Mem_space::virt_to_phys]
260 Address
261 Mem_space::pmem_to_phys(Address virt) const
262 {
263   return virt_to_phys(virt);
264 }
265
266 /** Simple page-table lookup.  This method is similar to Mem_space's 
267     lookup().  The difference is that this version handles 
268     Sigma0's address space with a special case: For Sigma0, we do not 
269     actually consult the page table -- it is meaningless because we
270     create new mappings for Sigma0 transparently; instead, we return the
271     logically-correct result of physical address == virtual address.
272     @param a Virtual address.  This address does not need to be page-aligned.
273     @return Physical address corresponding to a.
274  */
275 PUBLIC inline
276 virtual Address
277 Mem_space::virt_to_phys_s0(void *a) const
278 {
279   return virt_to_phys((Address)a);
280 }
281
282 IMPLEMENT
283 bool
284 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
285                     Size *size, unsigned *page_attribs)
286 {
287   Pte p = _dir->walk( (void*)virt.value(), 0, false, Ptab::Null_alloc(), 0);
288
289   if (size) *size = Size(p.size());
290   if (page_attribs) *page_attribs = p.attr().get_abstract();
291   // FIXME: we should not use virt but 0 as offset for phys return value!
292   if (phys) *phys = Phys_addr(p.phys((void*)virt.value()));
293   return p.valid();
294 }
295
296 IMPLEMENT
297 unsigned long
298 Mem_space::v_delete(Vaddr virt, Vsize size,
299                     unsigned long del_attribs)
300 {
301   (void)size;
302   bool flush = _current.current() == this;
303   Pte pte = _dir->walk((void*)virt.value(), 0, false, Ptab::Null_alloc(), 0);
304   if (EXPECT_FALSE(!pte.valid()))
305     return 0;
306
307   BUG_ON(pte.size() != size.value(), "size mismatch: va=%lx sz=%lx dir=%p\n",
308          virt.value(), size.value(), _dir);
309
310   Mem_unit::flush_vcache((void*)(virt.value() & ~(pte.size()-1)), 
311       (void*)((virt.value() & ~(pte.size()-1)) + pte.size()));
312
313   Mem_page_attr a = pte.attr();
314   unsigned long abs_a = a.get_abstract();
315
316   if (!(del_attribs & Page_user_accessible))
317     {
318       a.set_ap(abs_a & ~del_attribs);
319       pte.attr(a, flush);
320     }
321   else
322     pte.set_invalid(0, flush);
323
324   if (Have_asids)
325     Mem_unit::tlb_flush((void*)virt.value(), c_asid());
326
327   return abs_a & del_attribs;
328 }
329
330
331 PUBLIC inline
332 bool
333 Mem_space::set_attributes(Address virt, unsigned page_attribs)
334 {
335   Pte p = _dir->walk( (void*)virt, 0, false, Ptab::Null_alloc(), 0);
336   if (!p.valid())
337   // copy current shared kernel page directory
338     return false;
339
340   Mem_page_attr a = p.attr();
341   a.set_ap(page_attribs);
342   p.attr(a, true);
343   return true;
344 }
345
346 /**
347  * \brief Free all memory allocated for this Mem_space.
348  * \pre Runs after the destructor!
349  */
350 PUBLIC
351 Mem_space::~Mem_space()
352 {
353   reset_asid();
354   if (_dir)
355     {
356       _dir->free_page_tables(0, (void*)Mem_layout::User_max,
357                              Kmem_alloc::q_allocator(ram_quota()));
358       Kmem_alloc::allocator()->q_unaligned_free(ram_quota(), sizeof(Page_table), _dir);
359     }
360 }
361
362
363 /** Constructor.  Creates a new address space and registers it with
364   * Space_index.
365   *
366   * Registration may fail (if a task with the given number already
367   * exists, or if another thread creates an address space for the same
368   * task number concurrently).  In this case, the newly-created
369   * address space should be deleted again.
370   */
371 PUBLIC inline
372 Mem_space::Mem_space(Ram_quota *q)
373 : _quota(q), _dir(0)
374 {
375   asid(~0UL);
376 }
377
378 PROTECTED inline NEEDS[<new>, "kmem_alloc.h", Mem_space::asid]
379 bool
380 Mem_space::initialize()
381 {
382   Auto_quota<Ram_quota> q(ram_quota(), sizeof(Page_table));
383   if (EXPECT_FALSE(!q))
384     return false;
385
386   _dir = (Page_table*)Kmem_alloc::allocator()->unaligned_alloc(sizeof(Page_table));
387   if (!_dir)
388     return false;
389
390   new (_dir) Page_table;
391
392   q.release();
393   return true;
394 }
395
396 PROTECTED inline
397 void
398 Mem_space::sync_kernel()
399 {
400   // copy current shared kernel page directory
401   _dir->copy_in((void*)Mem_layout::User_max,
402                 kernel_space()->_dir,
403                 (void*)Mem_layout::User_max,
404                 Mem_layout::Kernel_max - Mem_layout::User_max);
405 }
406
407 PUBLIC
408 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
409   : _quota(q), _dir (pdir)
410 {
411   asid(~0UL);
412   _current.cpu(0) = this;
413 }
414
415 PUBLIC static inline
416 Page_number
417 Mem_space::canonize(Page_number v)
418 { return v; }
419
420 //----------------------------------------------------------------------------
421 IMPLEMENTATION [armv5]:
422
423 PRIVATE inline
424 void
425 Mem_space::asid(unsigned long)
426 {}
427
428 PRIVATE inline
429 void
430 Mem_space::reset_asid()
431 {}
432
433 PUBLIC inline
434 unsigned long
435 Mem_space::c_asid() const
436 { return 0; }
437
438 IMPLEMENT inline
439 void Mem_space::make_current()
440 {
441   _dir->activate();
442   _current.current() = this;
443 }
444
445
446 //----------------------------------------------------------------------------
447 INTERFACE [armv6 || armv7]:
448
449 EXTENSION class Mem_space
450 {
451 public:
452   enum { Have_asids = 1 };
453 private:
454   unsigned long _asid[Config::Max_num_cpus];
455
456   static Per_cpu<unsigned char> _next_free_asid;
457   static Per_cpu<Mem_space *[256]>   _active_asids;
458 };
459
460 //----------------------------------------------------------------------------
461 INTERFACE [!(armv6 || armv7)]:
462
463 EXTENSION class Mem_space
464 {
465 public:
466   enum { Have_asids = 0 };
467 };
468
469 //----------------------------------------------------------------------------
470 IMPLEMENTATION [armv6 || armca8]:
471
472 PRIVATE inline static
473 unsigned long
474 Mem_space::next_asid(unsigned cpu)
475 {
476   return _next_free_asid.cpu(cpu)++;
477 }
478
479 //----------------------------------------------------------------------------
480 IMPLEMENTATION [armv7 && armca9]:
481
482 PRIVATE inline static
483 unsigned long
484 Mem_space::next_asid(unsigned cpu)
485 {
486   if (_next_free_asid.cpu(cpu) == 0)
487     ++_next_free_asid.cpu(cpu);
488   return _next_free_asid.cpu(cpu)++;
489 }
490
491 //----------------------------------------------------------------------------
492 IMPLEMENTATION [armv6 || armv7]:
493
494 DEFINE_PER_CPU Per_cpu<unsigned char>    Mem_space::_next_free_asid;
495 DEFINE_PER_CPU Per_cpu<Mem_space *[256]> Mem_space::_active_asids;
496
497 PRIVATE inline
498 void
499 Mem_space::asid(unsigned long a)
500 {
501   for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
502     _asid[i] = a;
503 }
504
505 PUBLIC inline
506 unsigned long
507 Mem_space::c_asid() const
508 { return _asid[current_cpu()]; }
509
510 PRIVATE inline NEEDS[Mem_space::next_asid, "types.h"]
511 unsigned long
512 Mem_space::asid()
513 {
514   unsigned cpu = current_cpu();
515   if (EXPECT_FALSE(_asid[cpu] == ~0UL))
516     {
517       // FIFO ASID replacement strategy
518       unsigned char new_asid = next_asid(cpu);
519       Mem_space **bad_guy = &_active_asids.cpu(cpu)[new_asid];
520       while (Mem_space *victim = access_once(bad_guy))
521         {
522           // need ASID replacement
523           if (victim == current_mem_space(cpu))
524             {
525               // do not replace the ASID of the current space
526               new_asid = next_asid(cpu);
527               bad_guy = &_active_asids.cpu(cpu)[new_asid];
528               continue;
529             }
530
531           //LOG_MSG_3VAL(current(), "ASIDr", new_asid, (Mword)*bad_guy, (Mword)this);
532           Mem_unit::tlb_flush(new_asid);
533
534           // If the victim is valid and we get a 1 written to the ASID array
535           // then we have to reset the ASID of our victim, else the
536           // reset_asid function is currently resetting the ASIDs of the
537           // victim on a different CPU.
538           if (victim != reinterpret_cast<Mem_space*>(~0UL) &&
539               mp_cas(bad_guy, victim, reinterpret_cast<Mem_space*>(1)))
540             write_now(&victim->_asid[cpu], ~0UL);
541           break;
542         }
543
544       _asid[cpu] = new_asid;
545       write_now(bad_guy, this);
546     }
547
548   //LOG_MSG_3VAL(current(), "ASID", (Mword)this, _asid[cpu], (Mword)__builtin_return_address(0));
549   return _asid[cpu];
550 };
551
552 PRIVATE inline
553 void
554 Mem_space::reset_asid()
555 {
556   for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
557     {
558       unsigned asid = access_once(&_asid[i]);
559       if (asid == ~0UL)
560         continue;
561
562       Mem_space **a = &_active_asids.cpu(i)[asid];
563       if (!mp_cas(a, this, reinterpret_cast<Mem_space*>(~0UL)))
564         // It could be our ASID is in the process of being preempted,
565         // so wait until this is done.
566         while (access_once(a) == reinterpret_cast<Mem_space*>(1))
567           ;
568     }
569 }
570
571 IMPLEMENT inline NEEDS[Mem_space::asid]
572 void Mem_space::make_current()
573 {
574   _dir->activate(asid());
575   _current.current() = this;
576 }