]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/ia32/mem_space-ia32.cpp
update
[l4.git] / kernel / fiasco / src / kern / ia32 / mem_space-ia32.cpp
1 INTERFACE [ia32 || ux || amd64]:
2
3 EXTENSION class Mem_space
4 {
5 public:
6   typedef Pdir Dir_type;
7
8   /** Return status of v_insert. */
9   enum // Status
10   {
11     Insert_ok = 0,              ///< Mapping was added successfully.
12     Insert_warn_exists,         ///< Mapping already existed
13     Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14     Insert_err_nomem,           ///< Couldn't alloc new page table
15     Insert_err_exists           ///< A mapping already exists at the target addr
16   };
17
18   /** Attribute masks for page mappings. */
19   enum Page_attrib
20   {
21     Page_no_attribs = 0,
22     /// Page is writable.
23     Page_writable = Pt_entry::Writable,
24     Page_cacheable = 0,
25     /// Page is noncacheable.
26     Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
27     /// it's a user page.
28     Page_user_accessible = Pt_entry::User,
29     /// Page has been referenced
30     Page_referenced = Pt_entry::Referenced,
31     /// Page is dirty
32     Page_dirty = Pt_entry::Dirty,
33     Page_references = Page_referenced | Page_dirty,
34     /// A mask which contains all mask bits
35     Page_all_attribs = Page_writable | Page_noncacheable |
36                        Page_user_accessible | Page_referenced | Page_dirty,
37   };
38
39   // Mapping utilities
40
41   enum                          // Definitions for map_util
42   {
43     Need_insert_tlb_flush = 0,
44     Map_page_size = Config::PAGE_SIZE,
45     Page_shift = Config::PAGE_SHIFT,
46     Whole_space = MWORD_BITS,
47     Identity_map = 0,
48   };
49
50
51   void  page_map        (Address phys, Address virt,
52                          Address size, Attr);
53
54   void  page_unmap      (Address virt, Address size);
55
56   void  page_protect    (Address virt, Address size,
57                          unsigned page_attribs);
58
59 protected:
60   // DATA
61   Dir_type *_dir;
62 };
63
64 //----------------------------------------------------------------------------
65 IMPLEMENTATION [ia32 || ux || amd64]:
66
67 #include <cstring>
68 #include <cstdio>
69 #include "cpu.h"
70 #include "kdb_ke.h"
71 #include "l4_types.h"
72 #include "mem_layout.h"
73 #include "paging.h"
74 #include "std_macros.h"
75
76
77
78
79 PUBLIC explicit inline
80 Mem_space::Mem_space(Ram_quota *q) : _quota(q), _dir(0) {}
81
82 PROTECTED inline
83 bool
84 Mem_space::initialize()
85 {
86   void *b;
87   if (EXPECT_FALSE(!(b = Kmem_alloc::allocator()
88           ->q_alloc(_quota, Config::PAGE_SHIFT))))
89     return false;
90
91   _dir = static_cast<Dir_type*>(b);
92   _dir->clear(false);   // initialize to zero
93   return true; // success
94 }
95
96 PUBLIC
97 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
98   : _quota(q), _dir(pdir)
99 {
100   _kernel_space = this;
101   _current.cpu(Cpu_number::boot_cpu()) = this;
102 }
103
104 PUBLIC static inline
105 bool
106 Mem_space::is_full_flush(L4_fpage::Rights rights)
107 {
108   return rights & L4_fpage::Rights::R();
109 }
110
111 PUBLIC inline NEEDS["cpu.h"]
112 static bool
113 Mem_space::has_superpages()
114 {
115   return Cpu::have_superpages();
116 }
117
118
119 PUBLIC inline NEEDS["mem_unit.h"]
120 void
121 Mem_space::tlb_flush(bool = false)
122 {
123   if (_current.current() == this)
124     Mem_unit::tlb_flush();
125 }
126
127
128 IMPLEMENT inline
129 Mem_space *
130 Mem_space::current_mem_space(Cpu_number cpu) /// XXX: do not fix, deprecated, remove!
131 {
132   return _current.cpu(cpu);
133 }
134
135 PUBLIC inline
136 bool
137 Mem_space::set_attributes(Virt_addr virt, Attr page_attribs)
138 {
139   auto i = _dir->walk(virt);
140
141   if (!i.is_valid())
142     return false;
143
144   i.set_attribs(page_attribs);
145   return true;
146 }
147
148
149 PROTECTED inline
150 void
151 Mem_space::destroy()
152 {}
153
154 /**
155  * Destructor.  Deletes the address space and unregisters it from
156  * Space_index.
157  */
158 PRIVATE
159 void
160 Mem_space::dir_shutdown()
161 {
162   // free all page tables we have allocated for this address space
163   // except the ones in kernel space which are always shared
164   _dir->destroy(Virt_addr(0UL),
165                 Virt_addr(Mem_layout::User_max), 0, Pdir::Depth,
166                 Kmem_alloc::q_allocator(_quota));
167
168   // free all unshared page table levels for the kernel space
169   _dir->destroy(Virt_addr(Mem_layout::User_max + 1),
170                 Virt_addr(~0UL), 0, Pdir::Super_level,
171                 Kmem_alloc::q_allocator(_quota));
172
173 }
174
175 IMPLEMENT
176 Mem_space::Status
177 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Page_order size,
178                     Attr page_attribs)
179 {
180   // insert page into page table
181
182   // XXX should modify page table using compare-and-swap
183
184   assert (cxx::get_lsb(Phys_addr(phys), size) == 0);
185   assert (cxx::get_lsb(Virt_addr(virt), size) == 0);
186
187   int level;
188   for (level = 0; level <= Pdir::Depth; ++level)
189     if (Page_order(Pdir::page_order_for_level(level)) <= size)
190       break;
191
192   auto i = _dir->walk(virt, level, false,
193                             Kmem_alloc::q_allocator(_quota));
194
195   if (EXPECT_FALSE(!i.is_valid() && i.level != level))
196     return Insert_err_nomem;
197
198   if (EXPECT_FALSE(i.is_valid()
199                    && (i.level != level || Phys_addr(i.page_addr()) != phys)))
200     return Insert_err_exists;
201
202   if (i.is_valid())
203     {
204       if (EXPECT_FALSE(!i.add_attribs(page_attribs)))
205         return Insert_warn_exists;
206
207       page_protect(Virt_addr::val(virt), Address(1) << Page_order::val(size),
208                    *i.pte & Page_all_attribs);
209
210       return Insert_warn_attrib_upgrade;
211     }
212   else
213     {
214       i.create_page(phys, page_attribs);
215       page_map(Virt_addr::val(phys), Virt_addr::val(virt),
216                Address(1) << Page_order::val(size), page_attribs);
217
218       return Insert_ok;
219     }
220
221 }
222
223 IMPLEMENT
224 void
225 Mem_space::v_set_access_flags(Vaddr virt, L4_fpage::Rights access_flags)
226 {
227   auto i = _dir->walk(virt);
228
229   if (EXPECT_FALSE(!i.is_valid()))
230     return;
231
232   unsigned page_attribs = 0;
233
234   if (access_flags & L4_fpage::Rights::R())
235     page_attribs |= Page_referenced;
236   if (access_flags & L4_fpage::Rights::W())
237     page_attribs |= Page_dirty;
238
239   i.add_attribs(page_attribs);
240 }
241
242 /**
243  * Simple page-table lookup.
244  *
245  * @param virt Virtual address.  This address does not need to be page-aligned.
246  * @return Physical address corresponding to a.
247  */
248 PUBLIC inline NEEDS ["paging.h"]
249 Address
250 Mem_space::virt_to_phys(Address virt) const
251 {
252   return dir()->virt_to_phys(virt);
253 }
254
255 /**
256  * Simple page-table lookup.
257  *
258  * @param virt Virtual address.  This address does not need to be page-aligned.
259  * @return Physical address corresponding to a.
260  */
261 PUBLIC inline NEEDS ["mem_layout.h"]
262 Address
263 Mem_space::pmem_to_phys(Address virt) const
264 {
265   return Mem_layout::pmem_to_phys(virt);
266 }
267
268 /**
269  * Simple page-table lookup.
270  *
271  * This method is similar to Space_context's virt_to_phys().
272  * The difference is that this version handles Sigma0's
273  * address space with a special case:  For Sigma0, we do not
274  * actually consult the page table -- it is meaningless because we
275  * create new mappings for Sigma0 transparently; instead, we return the
276  * logically-correct result of physical address == virtual address.
277  *
278  * @param a Virtual address.  This address does not need to be page-aligned.
279  * @return Physical address corresponding to a.
280  */
281 PUBLIC inline
282 virtual Address
283 Mem_space::virt_to_phys_s0(void *a) const
284 {
285   return dir()->virt_to_phys((Address)a);
286 }
287
288 IMPLEMENT
289 bool
290 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
291                     Page_order *order, Attr *page_attribs)
292 {
293   auto i = _dir->walk(virt);
294   if (order) *order = Page_order(i.page_order());
295
296   if (!i.is_valid())
297     return false;
298
299   if (phys) *phys = Phys_addr(i.page_addr());
300   if (page_attribs) *page_attribs = i.attribs();
301
302   return true;
303 }
304
305 IMPLEMENT
306 L4_fpage::Rights
307 Mem_space::v_delete(Vaddr virt, Page_order size, L4_fpage::Rights page_attribs)
308 {
309   assert (cxx::get_lsb(Virt_addr(virt), size) == 0);
310
311   auto i = _dir->walk(virt);
312
313   if (EXPECT_FALSE (! i.is_valid()))
314     return L4_fpage::Rights(0);
315
316   assert (! (*i.pte & Pt_entry::global())); // Cannot unmap shared pages
317
318   L4_fpage::Rights ret = i.access_flags();
319
320   if (! (page_attribs & L4_fpage::Rights::R()))
321     {
322       // downgrade PDE (superpage) rights
323       i.del_rights(page_attribs);
324       page_protect(Virt_addr::val(virt), Address(1) << Page_order::val(size),
325                    *i.pte & Page_all_attribs);
326     }
327   else
328     {
329       // delete PDE (superpage)
330       i.clear();
331       page_unmap(Virt_addr::val(virt), Address(1) << Page_order::val(size));
332     }
333
334   return ret;
335 }
336
337 /**
338  * \brief Free all memory allocated for this Mem_space.
339  * \pre Runs after the destructor!
340  */
341 PUBLIC
342 Mem_space::~Mem_space()
343 {
344   if (_dir)
345     {
346       dir_shutdown();
347       Kmem_alloc::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
348     }
349 }
350
351
352 // --------------------------------------------------------------------
353 IMPLEMENTATION [ia32 || amd64]:
354
355 #include <cassert>
356 #include "l4_types.h"
357 #include "kmem.h"
358 #include "mem_unit.h"
359 #include "cpu_lock.h"
360 #include "lock_guard.h"
361 #include "logdefs.h"
362 #include "paging.h"
363
364 #include <cstring>
365 #include "config.h"
366 #include "kmem.h"
367
368 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
369 void
370 Mem_space::make_current()
371 {
372   Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
373   _current.cpu(current_cpu()) = this;
374 }
375
376 PUBLIC inline NEEDS ["kmem.h"]
377 Address
378 Mem_space::phys_dir()
379 {
380   return Mem_layout::pmem_to_phys(_dir);
381 }
382
383 /*
384  * The following functions are all no-ops on native ia32.
385  * Pages appear in an address space when the corresponding PTE is made
386  * ... unlike Fiasco-UX which needs these special tricks
387  */
388
389 IMPLEMENT inline
390 void
391 Mem_space::page_map(Address, Address, Address, Attr)
392 {}
393
394 IMPLEMENT inline
395 void
396 Mem_space::page_protect(Address, Address, unsigned)
397 {}
398
399 IMPLEMENT inline
400 void
401 Mem_space::page_unmap(Address, Address)
402 {}
403
404 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h"]
405 void
406 Mem_space::switchin_context(Mem_space *from)
407 {
408   // FIXME: this optimization breaks SMP task deletion, an idle thread
409   // may run on an already deleted page table
410 #if 0
411   // never switch to kernel space (context of the idle thread)
412   if (dir() == Kmem::dir())
413     return;
414 #endif
415
416   if (from != this)
417     {
418       CNT_ADDR_SPACE_SWITCH;
419       make_current();
420     }
421 }
422
423 PROTECTED inline
424 int
425 Mem_space::sync_kernel()
426 {
427   return _dir->sync(Virt_addr(Mem_layout::User_max + 1), Kmem::dir(),
428                     Virt_addr(Mem_layout::User_max + 1),
429                     Virt_size(-(Mem_layout::User_max + 1)), Pdir::Super_level,
430                     false,
431                     Kmem_alloc::q_allocator(_quota));
432 }
433
434 // --------------------------------------------------------------------
435 IMPLEMENTATION [amd64]:
436
437 #include "cpu.h"
438
439 PUBLIC static inline
440 Page_number
441 Mem_space::canonize(Page_number v)
442 {
443   if (v & Page_number(Virt_addr(1UL << 48)))
444     v = v | Page_number(Virt_addr(~0UL << 48));
445   return v;
446 }
447
448 PUBLIC static
449 void
450 Mem_space::init_page_sizes()
451 {
452   add_page_size(Page_order(Config::PAGE_SHIFT));
453   if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).superpages())
454     add_page_size(Page_order(21)); // 2MB
455
456   if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).ext_8000_0001_edx() & (1UL<<26))
457     add_page_size(Page_order(30)); // 1GB
458 }
459
460 // --------------------------------------------------------------------
461 IMPLEMENTATION [ia32 || ux]:
462
463 #include "cpu.h"
464
465 PUBLIC static inline
466 Page_number
467 Mem_space::canonize(Page_number v)
468 { return v; }
469
470 PUBLIC static
471 void
472 Mem_space::init_page_sizes()
473 {
474   add_page_size(Page_order(Config::PAGE_SHIFT));
475   if (Cpu::cpus.cpu(Cpu_number::boot_cpu()).superpages())
476     add_page_size(Page_order(22)); // 4MB
477 }