]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/arm/mem_space-arm.cpp
4edecd670146ead75baf7cb25e8e60e2a8da874e
[l4.git] / kernel / fiasco / src / kern / arm / mem_space-arm.cpp
1 INTERFACE [arm]:
2
3 #include "kmem.h"               // for "_unused_*" virtual memory regions
4 #include "member_offs.h"
5 #include "paging.h"
6 #include "types.h"
7 #include "pagetable.h"
8 #include "ram_quota.h"
9
10 EXTENSION class Mem_space
11 {
12   friend class Jdb;
13
14 public:
15   typedef Page_table Dir_type;
16
17   /** Return status of v_insert. */
18   enum // Status
19   {
20     Insert_ok = Page_table::E_OK,               ///< Mapping was added successfully.
21     Insert_err_nomem  = Page_table::E_NOMEM,  ///< Couldn't alloc new page table
22     Insert_err_exists = Page_table::E_EXISTS, ///< A mapping already exists at the target addr
23     Insert_warn_attrib_upgrade = Page_table::E_UPGRADE, ///< Mapping already existed, attribs upgrade
24     Insert_warn_exists,         ///< Mapping already existed
25
26   };
27
28   /** Attribute masks for page mappings. */
29   enum Page_attrib
30   {
31     Page_no_attribs = 0,
32     /// Page is writable.
33     Page_writable = Mem_page_attr::Write,
34     Page_user_accessible = Mem_page_attr::User,
35     /// Page is noncacheable.
36     Page_noncacheable = Page::NONCACHEABLE,
37     Page_cacheable = Page::CACHEABLE,
38     /// it's a user page (USER_NO | USER_RO = USER_RW).
39     /// A mask which contains all mask bits
40     Page_all_attribs = Page_user_accessible | Page_writable | Page_cacheable,
41     Page_referenced = 0,
42     Page_dirty = 0,
43     Page_references = 0,
44   };
45
46   // Mapping utilities
47
48   enum                          // Definitions for map_util
49   {
50     Need_insert_tlb_flush = 1,
51     Map_page_size = Config::PAGE_SIZE,
52     Page_shift = Config::PAGE_SHIFT,
53     Map_superpage_size = Config::SUPERPAGE_SIZE,
54     Map_max_address = Mem_layout::User_max,
55     Whole_space = 32,
56     Identity_map = 0,
57   };
58
59
60   static void kernel_space(Mem_space *);
61   static bool has_superpages() { return true; }
62
63
64 private:
65   // DATA
66   Dir_type *_dir;
67 };
68
69 //---------------------------------------------------------------------------
70 IMPLEMENTATION [arm]:
71
72 #include <cassert>
73 #include <cstring>
74
75 #include "atomic.h"
76 #include "config.h"
77 #include "globals.h"
78 #include "kdb_ke.h"
79 #include "mapped_alloc.h"
80 #include "l4_types.h"
81 #include "panic.h"
82 #include "paging.h"
83 #include "kmem.h"
84 #include "mem_unit.h"
85
86
87 PUBLIC static inline
88 Mword
89 Mem_space::xlate_flush(unsigned char rights)
90 {
91   Mword a = Page_references;
92   if (rights & L4_fpage::RX)
93     a |= Page_all_attribs;
94   else if (rights & L4_fpage::W)
95     a |= Page_writable;
96   return a;
97 }
98
99 PUBLIC static inline
100 unsigned char
101 Mem_space::xlate_flush_result(Mword attribs)
102 {
103   unsigned char r = 0;
104   if (attribs & Page_referenced)
105     r |= L4_fpage::RX;
106
107   if (attribs & Page_dirty)
108     r |= L4_fpage::W;
109
110   return r;
111 }
112
113 // Mapping utilities
114
115
116 PUBLIC inline NEEDS["mem_unit.h"]
117 void
118 Mem_space::tlb_flush(bool force = false)
119 {
120   if (!Have_asids)
121     Mem_unit::tlb_flush();
122   else if (force && c_asid())
123     Mem_unit::tlb_flush(c_asid());
124
125   // else do nothing, we manage ASID local flushes in v_* already
126   // Mem_unit::tlb_flush();
127 }
128
129 PUBLIC inline
130 void
131 Mem_space::enable_reverse_lookup()
132 {
133   // Store reverse pointer to Space in page directory
134   assert(((unsigned long)this & 0x03) == 0);
135   Pte pte = _dir->walk((void*)Mem_layout::Space_index, 
136       Config::SUPERPAGE_SIZE, false, 0 /*does never allocate*/);
137
138   pte.set_invalid((unsigned long)this, false);
139 }
140
141 IMPLEMENT inline
142 Mem_space *Mem_space::current_mem_space(unsigned cpu)
143 {
144   Pte pte = Page_table::current(cpu)->walk((void*)Mem_layout::Space_index,
145       Config::SUPERPAGE_SIZE, false, 0 /*does never allocate*/);
146   return reinterpret_cast<Mem_space*>(pte.raw());
147 }
148
149
150 PRIVATE inline
151 Page_table *Mem_space::current_pdir()
152 {
153   return Page_table::current();
154 }
155
156 IMPLEMENT inline NEEDS ["kmem.h", Mem_space::c_asid, Mem_space::need_tlb_flush]
157 void Mem_space::switchin_context(Mem_space *from)
158 {
159 #if 0
160   // never switch to kernel space (context of the idle thread)
161   if (this == kernel_space())
162     return;
163 #endif
164
165   if (from != this)
166     make_current();
167   else if (need_tlb_flush())
168     tlb_flush(true);
169 #if 0
170   _dir->invalidate((void*)Kmem::ipc_window(0), Config::SUPERPAGE_SIZE * 4,
171       c_asid());
172 #endif
173
174 }
175
176
177 IMPLEMENT inline
178 void Mem_space::kernel_space( Mem_space *_k_space )
179 {
180   _kernel_space = _k_space;
181 }
182
183
184
185
186 IMPLEMENT
187 Mem_space::Status
188 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size, unsigned page_attribs,
189                     bool upgrade_ignore_size)
190 {
191   bool flush = Page_table::current() == _dir;
192   Pte pte = _dir->walk((void*)virt.value(), size.value(), flush, ram_quota());
193   if (pte.valid())
194     {
195       if (EXPECT_FALSE(!upgrade_ignore_size 
196             && (pte.size() != size.value() || pte.phys() != phys.value())))
197         return Insert_err_exists;
198       if (pte.attr().get_abstract() == page_attribs)
199         return Insert_warn_exists;
200     }
201   else
202     {
203       Mem_page_attr a(Page::Local_page);
204       a.set_abstract(page_attribs);
205       pte.set(phys.value(), size.value(), a, flush);
206       return Insert_ok;
207     }
208
209   Mem_page_attr a = pte.attr();
210   a.set_abstract(a.get_abstract() | page_attribs);
211   pte.set(phys.value(), size.value(), a, flush);
212
213   if (Have_asids)
214     Mem_unit::tlb_flush((void*)virt.value(), c_asid());
215
216   return Insert_warn_attrib_upgrade;
217 }
218
219
220 /**
221  * Simple page-table lookup.
222  *
223  * @param virt Virtual address.  This address does not need to be page-aligned.
224  * @return Physical address corresponding to a.
225  */
226 PUBLIC inline NEEDS ["paging.h"]
227 Address
228 Mem_space::virt_to_phys (Address virt) const
229 {
230   Pte pte = _dir->walk((void*)virt, 0, false, 0 /*does never allocate*/);
231   if (EXPECT_FALSE(!pte.valid()))
232     return ~0UL;
233
234   return (Address)pte.phys((void*)virt);
235 }
236
237 PUBLIC inline NEEDS [Mem_space::virt_to_phys]
238 Address
239 Mem_space::pmem_to_phys (Address virt) const
240 {
241   return virt_to_phys(virt);
242 }
243
244 /** Simple page-table lookup.  This method is similar to Mem_space's 
245     lookup().  The difference is that this version handles 
246     Sigma0's address space with a special case: For Sigma0, we do not 
247     actually consult the page table -- it is meaningless because we
248     create new mappings for Sigma0 transparently; instead, we return the
249     logically-correct result of physical address == virtual address.
250     @param a Virtual address.  This address does not need to be page-aligned.
251     @return Physical address corresponding to a.
252  */
253 PUBLIC inline
254 virtual Address
255 Mem_space::virt_to_phys_s0(void *a) const
256 {
257   return virt_to_phys((Address)a);
258 }
259
260 IMPLEMENT
261 bool
262 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
263                     Size *size, unsigned *page_attribs)
264 {
265   Pte p = _dir->walk( (void*)virt.value(), 0, false,0);
266
267   if (size) *size = Size(p.size());
268   if (page_attribs) *page_attribs = p.attr().get_abstract();
269   // FIXME: we should not use virt but 0 as offset for phys return value!
270   if (phys) *phys = Phys_addr(p.phys((void*)virt.value()));
271   return p.valid();
272 }
273
274 IMPLEMENT
275 unsigned long
276 Mem_space::v_delete(Vaddr virt, Vsize size,
277                     unsigned long page_attribs)
278 {
279   bool flush = Page_table::current() == _dir;
280   Pte pte = _dir->walk((void*)virt.value(), 0, false, ram_quota());
281   if (EXPECT_FALSE(!pte.valid()))
282     return 0;
283
284   if (EXPECT_FALSE(pte.size() != size.value()))
285     {
286       kdb_ke("v_del: size mismatch\n");
287       return 0;
288     }
289
290   Mem_unit::flush_vcache((void*)(virt.value() & ~(pte.size()-1)), 
291       (void*)((virt.value() & ~(pte.size()-1)) + pte.size()));
292
293   Mem_page_attr a = pte.attr();
294   unsigned long abs_a = a.get_abstract();
295
296   if (!(page_attribs & Page_user_accessible))
297     {
298       a.set_ap(abs_a & ~page_attribs);
299       pte.attr(a, flush);
300     }
301   else
302     pte.set_invalid(0, flush);
303
304   if (Have_asids)
305     Mem_unit::tlb_flush((void*)virt.value(), c_asid());
306
307   return abs_a & page_attribs;
308 }
309
310
311 PUBLIC inline
312 bool
313 Mem_space::set_attributes(Address virt, unsigned page_attribs)
314 {
315   Pte p = _dir->walk( (void*)virt, 0, false,0);
316   if (!p.valid())
317   // copy current shared kernel page directory
318     return false;
319
320   Mem_page_attr a = p.attr();
321   a.set_ap(page_attribs);
322   p.attr(a, true);
323   return true;
324 }
325
326 IMPLEMENT inline NEEDS[Mem_space::c_asid]
327 void Mem_space::kmem_update (void *addr)
328 {
329   _dir->copy_in(addr, kernel_space()->_dir, 
330           addr, Config::SUPERPAGE_SIZE, c_asid());
331
332 }
333
334
335 /** 
336  * Tests if a task is the sigma0 task.
337  * @return true if the task is sigma0, false otherwise.
338  */
339 PUBLIC inline 
340 bool Mem_space::is_sigma0()
341 {
342   return this == sigma0_space;
343 }
344
345 /**
346  * \brief Free all memory allocated for this Mem_space.
347  * \pre Runs after the destructor!
348  */
349 PUBLIC
350 Mem_space::~Mem_space()
351 {
352   if (_dir)
353     {
354       _dir->free_page_tables(0, (void*)Mem_layout::User_max);
355       delete _dir;
356       ram_quota()->free(sizeof(Page_table));
357     }
358 }
359
360
361 /** Constructor.  Creates a new address space and registers it with
362   * Space_index.
363   *
364   * Registration may fail (if a task with the given number already
365   * exists, or if another thread creates an address space for the same
366   * task number concurrently).  In this case, the newly-created
367   * address space should be deleted again.
368   *
369   * @param new_number Task number of the new address space
370   */
371 PUBLIC
372 Mem_space::Mem_space(Ram_quota *q)
373   : _quota(q),
374     _dir(0)
375 {
376   asid(~0UL);
377
378   if (EXPECT_FALSE(!ram_quota()->alloc(sizeof(Page_table))))
379       return;
380
381   _dir = new Page_table();
382   assert(_dir);
383
384   // copy current shared kernel page directory
385   _dir->copy_in((void*)Mem_layout::User_max,
386                 kernel_space()->_dir,
387                 (void*)Mem_layout::User_max,
388                 Mem_layout::Kernel_max - Mem_layout::User_max);
389
390   enable_reverse_lookup ();
391 }
392
393 PUBLIC
394 Mem_space::Mem_space (Ram_quota *q, Dir_type* pdir)
395   : _quota(q), _dir (pdir)
396 {
397   asid(0);
398   enable_reverse_lookup ();
399 }
400
401 PUBLIC static inline
402 Page_number
403 Mem_space::canonize(Page_number v)
404 { return v; }
405
406 //----------------------------------------------------------------------------
407 IMPLEMENTATION [armv5]:
408
409 PRIVATE inline
410 void
411 Mem_space::asid(unsigned long)
412 {}
413
414 PUBLIC inline
415 unsigned long
416 Mem_space::c_asid() const
417 { return 0; }
418
419 IMPLEMENT inline
420 void Mem_space::make_current()
421 {
422   _dir->activate();
423 }
424
425
426 //----------------------------------------------------------------------------
427 INTERFACE [armv6 || armv7]:
428
429 EXTENSION class Mem_space
430 {
431 public:
432   enum { Have_asids = 1 };
433 private:
434   unsigned long _asid[Config::Max_num_cpus];
435
436   static Per_cpu<unsigned char> _next_free_asid;
437   static Per_cpu<Mem_space *[256]>   _active_asids;
438 };
439
440 //----------------------------------------------------------------------------
441 INTERFACE [!(armv6 || armv7)]:
442
443 EXTENSION class Mem_space
444 {
445 public:
446   enum { Have_asids = 0 };
447 };
448
449
450 //----------------------------------------------------------------------------
451 IMPLEMENTATION [armv6 || armv7]:
452
453
454 Per_cpu<unsigned char>    DEFINE_PER_CPU Mem_space::_next_free_asid;
455 Per_cpu<Mem_space *[256]> DEFINE_PER_CPU Mem_space::_active_asids;
456
457 PRIVATE inline
458 void
459 Mem_space::asid(unsigned long a)
460 {
461   for (unsigned i = 0; i < Config::Max_num_cpus; ++i)
462     _asid[i] = a;
463 }
464
465 PUBLIC inline
466 unsigned long
467 Mem_space::c_asid() const
468 { return _asid[current_cpu()]; }
469
470 PRIVATE inline static
471 unsigned long
472 Mem_space::next_asid(unsigned cpu)
473
474   unsigned long ret = _next_free_asid.cpu(cpu)++;
475   return ret; 
476 }
477
478 PRIVATE inline NEEDS[Mem_space::next_asid]
479 unsigned long
480 Mem_space::asid()
481 {
482   unsigned cpu = current_cpu();
483   if (EXPECT_FALSE(_asid[cpu] == ~0UL))
484     {
485       // FIFO ASID replacement strategy
486       unsigned char new_asid = next_asid(cpu);
487       Mem_space **bad_guy = &_active_asids.cpu(cpu)[new_asid];
488       while (*bad_guy)
489         {
490           // need ASID replacement
491           if (*bad_guy == current_mem_space(cpu))
492             {
493               // do not replace the ASID of the current space
494               new_asid = next_asid(cpu);
495               bad_guy = &_active_asids.cpu(cpu)[new_asid];
496               continue;
497             }
498
499           //LOG_MSG_3VAL(current(), "ASIDr", new_asid, (Mword)*bad_guy, (Mword)this);
500           Mem_unit::tlb_flush(new_asid);
501           (*bad_guy)->_asid[cpu] = ~0UL;
502
503           break;
504         }
505
506       *bad_guy = this;
507       _asid[cpu] = new_asid;
508     }
509
510   //LOG_MSG_3VAL(current(), "ASID", (Mword)this, _asid, (Mword)__builtin_return_address(0));
511   return _asid[cpu];
512 };
513
514 IMPLEMENT inline NEEDS[Mem_space::asid]
515 void Mem_space::make_current()
516 {
517   _dir->activate(asid());
518 }