]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/ia32/mem_space-ia32.cpp
a3b3695ac1258961160f1affb9d81b9003405655
[l4.git] / kernel / fiasco / src / kern / ia32 / mem_space-ia32.cpp
1 INTERFACE [ia32 || ux || amd64]:
2
3 EXTENSION class Mem_space
4 {
5 public:
6   typedef Pdir Dir_type;
7
8   /** Return status of v_insert. */
9   enum // Status
10   {
11     Insert_ok = 0,              ///< Mapping was added successfully.
12     Insert_warn_exists,         ///< Mapping already existed
13     Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14     Insert_err_nomem,           ///< Couldn't alloc new page table
15     Insert_err_exists           ///< A mapping already exists at the target addr
16   };
17
18   /** Attribute masks for page mappings. */
19   enum Page_attrib
20   {
21     Page_no_attribs = 0,
22     /// Page is writable.
23     Page_writable = Pt_entry::Writable,
24     Page_cacheable = 0,
25     /// Page is noncacheable.
26     Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
27     /// it's a user page.
28     Page_user_accessible = Pt_entry::User,
29     /// Page has been referenced
30     Page_referenced = Pt_entry::Referenced,
31     /// Page is dirty
32     Page_dirty = Pt_entry::Dirty,
33     Page_references = Page_referenced | Page_dirty,
34     /// A mask which contains all mask bits
35     Page_all_attribs = Page_writable | Page_noncacheable |
36                        Page_user_accessible | Page_referenced | Page_dirty,
37   };
38
39   // Mapping utilities
40
41   enum                          // Definitions for map_util
42   {
43     Need_insert_tlb_flush = 0,
44     Map_page_size = Config::PAGE_SIZE,
45     Page_shift = Config::PAGE_SHIFT,
46     Map_superpage_size = Config::SUPERPAGE_SIZE,
47     Map_max_address = Mem_layout::User_max,
48     Whole_space = MWORD_BITS,
49     Identity_map = 0,
50   };
51
52
53   void  page_map        (Address phys, Address virt,
54                          Address size, unsigned page_attribs);
55
56   void  page_unmap      (Address virt, Address size);
57
58   void  page_protect    (Address virt, Address size,
59                          unsigned page_attribs);
60
61 protected:
62   // DATA
63   Dir_type *_dir;
64 };
65
66 //----------------------------------------------------------------------------
67 IMPLEMENTATION [ia32 || ux || amd64]:
68
69 #include <cstring>
70 #include <cstdio>
71 #include "cpu.h"
72 #include "kdb_ke.h"
73 #include "l4_types.h"
74 #include "mem_layout.h"
75 #include "paging.h"
76 #include "std_macros.h"
77
78
79
80
81 PUBLIC explicit inline
82 Mem_space::Mem_space(Ram_quota *q) : _quota(q), _dir(0) {}
83
84 PROTECTED inline
85 bool
86 Mem_space::initialize()
87 {
88   void *b;
89   if (EXPECT_FALSE(!(b = Kmem_alloc::allocator()
90           ->q_alloc(_quota, Config::PAGE_SHIFT))))
91     return false;
92
93   _dir = static_cast<Dir_type*>(b);
94   _dir->clear();        // initialize to zero
95   return true; // success
96 }
97
98 PUBLIC
99 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
100   : _quota(q), _dir(pdir)
101 {
102   _kernel_space = this;
103   _current.cpu(0) = this;
104 }
105
106
107 PUBLIC static inline
108 Mword
109 Mem_space::xlate_flush(unsigned char rights)
110 {
111   Mword a = Page_references;
112   if (rights & L4_fpage::RX)
113     a |= Page_all_attribs;
114   else if (rights & L4_fpage::W)
115     a |= Page_writable;
116   return a;
117 }
118
119 PUBLIC static inline
120 Mword
121 Mem_space::is_full_flush(unsigned char rights)
122 {
123   return rights & L4_fpage::RX;
124 }
125
126 PUBLIC static inline
127 unsigned char
128 Mem_space::xlate_flush_result(Mword attribs)
129 {
130   unsigned char r = 0;
131   if (attribs & Page_referenced)
132     r |= L4_fpage::RX;
133
134   if (attribs & Page_dirty)
135     r |= L4_fpage::W;
136
137   return r;
138 }
139
140 PUBLIC inline NEEDS["cpu.h"]
141 static bool
142 Mem_space::has_superpages()
143 {
144   return Cpu::have_superpages();
145 }
146
147
148 PUBLIC static inline NEEDS["mem_unit.h"]
149 void
150 Mem_space::tlb_flush(bool = false)
151 {
152   Mem_unit::tlb_flush();
153 }
154
155 PUBLIC static inline
156 void
157 Mem_space::tlb_flush_spaces(bool, Mem_space *, Mem_space *)
158 {
159   tlb_flush();
160 }
161
162
163 IMPLEMENT inline
164 Mem_space *
165 Mem_space::current_mem_space(unsigned cpu) /// XXX: do not fix, deprecated, remove!
166 {
167   return _current.cpu(cpu);
168 }
169
170 PUBLIC inline
171 bool
172 Mem_space::set_attributes(Addr virt, unsigned page_attribs)
173 {
174   Pdir::Iter i = _dir->walk(virt);
175
176   if (!i.e->valid() || i.shift() != Config::PAGE_SHIFT)
177     return 0;
178
179   i.e->del_attr(Page::MAX_ATTRIBS);
180   i.e->add_attr(page_attribs);
181   return true;
182 }
183
184
185 PROTECTED inline
186 void
187 Mem_space::destroy()
188 {}
189
190 /**
191  * Destructor.  Deletes the address space and unregisters it from
192  * Space_index.
193  */
194 PRIVATE
195 void
196 Mem_space::dir_shutdown()
197 {
198   // free all page tables we have allocated for this address space
199   // except the ones in kernel space which are always shared
200   _dir->destroy(Virt_addr(0),
201                 Virt_addr(Kmem::mem_user_max), Pdir::Depth - 1,
202                 Kmem_alloc::q_allocator(_quota));
203
204 }
205
206 IMPLEMENT
207 Mem_space::Status
208 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size,
209                     unsigned page_attribs, bool upgrade_ignore_size)
210 {
211   // insert page into page table
212
213   // XXX should modify page table using compare-and-swap
214
215   assert_kdb (size == Size(Config::PAGE_SIZE)
216               || size == Size(Config::SUPERPAGE_SIZE));
217   if (size == Size(Config::SUPERPAGE_SIZE))
218     {
219       assert (Cpu::have_superpages());
220       assert (virt.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
221       assert (phys.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
222     }
223
224   unsigned level = (size == Size(Config::SUPERPAGE_SIZE) ? (int)Pdir::Super_level : (int)Pdir::Depth);
225   unsigned shift = (size == Size(Config::SUPERPAGE_SIZE) ? Config::SUPERPAGE_SHIFT : Config::PAGE_SHIFT);
226   unsigned attrs = (size == Size(Config::SUPERPAGE_SIZE) ? (unsigned long)Pt_entry::Pse_bit : 0);
227
228   Pdir::Iter i = _dir->walk(virt, level,
229                             Kmem_alloc::q_allocator(_quota));
230
231   if (EXPECT_FALSE(!i.e->valid() && i.shift() != shift))
232     return Insert_err_nomem;
233
234   if (EXPECT_FALSE(!upgrade_ignore_size
235         && i.e->valid() && (i.shift() != shift || i.addr() != phys.value())))
236     return Insert_err_exists;
237
238   if (i.e->valid())
239     {
240       if (EXPECT_FALSE((i.e->raw() | page_attribs) == i.e->raw()))
241         return Insert_warn_exists;
242
243       i.e->add_attr(page_attribs);
244       page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
245
246       return Insert_warn_attrib_upgrade;
247     }
248   else
249     {
250       *i.e = Addr(phys).value() | Pt_entry::Valid | attrs | page_attribs;
251       page_map (Addr(phys).value(), Addr(virt).value(), Size(size).value(), page_attribs);
252
253       return Insert_ok;
254     }
255 }
256
257 /**
258  * Simple page-table lookup.
259  *
260  * @param virt Virtual address.  This address does not need to be page-aligned.
261  * @return Physical address corresponding to a.
262  */
263 PUBLIC inline NEEDS ["paging.h"]
264 Address
265 Mem_space::virt_to_phys(Address virt) const
266 {
267   return dir()->virt_to_phys(virt);
268 }
269
270 /**
271  * Simple page-table lookup.
272  *
273  * @param virt Virtual address.  This address does not need to be page-aligned.
274  * @return Physical address corresponding to a.
275  */
276 PUBLIC inline NEEDS ["mem_layout.h"]
277 Address
278 Mem_space::pmem_to_phys (Address virt) const
279 {
280   return Mem_layout::pmem_to_phys(virt);
281 }
282
283 /**
284  * Simple page-table lookup.
285  *
286  * This method is similar to Space_context's virt_to_phys().
287  * The difference is that this version handles Sigma0's
288  * address space with a special case:  For Sigma0, we do not
289  * actually consult the page table -- it is meaningless because we
290  * create new mappings for Sigma0 transparently; instead, we return the
291  * logically-correct result of physical address == virtual address.
292  *
293  * @param a Virtual address.  This address does not need to be page-aligned.
294  * @return Physical address corresponding to a.
295  */
296 PUBLIC inline
297 virtual Address
298 Mem_space::virt_to_phys_s0(void *a) const
299 {
300   return dir()->virt_to_phys((Address)a);
301 }
302
303 IMPLEMENT
304 bool
305 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
306                     Size *size, unsigned *page_attribs)
307 {
308   Pdir::Iter i = _dir->walk(virt);
309   if (size) *size = Size(1UL << i.shift());
310
311   if (!i.e->valid())
312     return false;
313
314   if (phys) *phys = Addr(i.e->addr() & (~0UL << i.shift()));
315   if (page_attribs) *page_attribs = (i.e->raw() & Page_all_attribs);
316
317   return true;
318 }
319
320 IMPLEMENT
321 unsigned long
322 Mem_space::v_delete(Vaddr virt, Vsize size,
323                     unsigned long page_attribs = Page_all_attribs)
324 {
325   unsigned ret;
326
327   // delete pages from page tables
328   assert (size == Size(Config::PAGE_SIZE) || size == Size(Config::SUPERPAGE_SIZE));
329
330   if (size == Size(Config::SUPERPAGE_SIZE))
331     {
332       assert (Cpu::have_superpages());
333       assert (!virt.offset(Size(Config::SUPERPAGE_SIZE)));
334     }
335
336   Pdir::Iter i = _dir->walk(virt);
337
338   if (EXPECT_FALSE (! i.e->valid()))
339     return 0;
340
341   assert (! (i.e->raw() & Pt_entry::global())); // Cannot unmap shared ptables
342
343   ret = i.e->raw() & page_attribs;
344
345   if (! (page_attribs & Page_user_accessible))
346     {
347       // downgrade PDE (superpage) rights
348       i.e->del_attr(page_attribs);
349       page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
350     }
351   else
352     {
353       // delete PDE (superpage)
354       *i.e = 0;
355       page_unmap (Addr(virt).value(), Size(size).value());
356     }
357
358   return ret;
359 }
360
361 /**
362  * \brief Free all memory allocated for this Mem_space.
363  * \pre Runs after the destructor!
364  */
365 PUBLIC
366 Mem_space::~Mem_space()
367 {
368   if (_dir)
369     {
370       dir_shutdown();
371       Kmem_alloc::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
372     }
373 }
374
375
376 // --------------------------------------------------------------------
377 IMPLEMENTATION [ia32 || amd64]:
378
379 #include <cassert>
380 #include "l4_types.h"
381 #include "kmem.h"
382 #include "mem_unit.h"
383 #include "cpu_lock.h"
384 #include "lock_guard.h"
385 #include "logdefs.h"
386 #include "paging.h"
387
388 #include <cstring>
389 #include "config.h"
390 #include "kmem.h"
391
392 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
393 void
394 Mem_space::make_current()
395 {
396   Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
397   _current.cpu(current_cpu()) = this;
398 }
399
400 PUBLIC inline NEEDS ["kmem.h"]
401 Address
402 Mem_space::phys_dir()
403 {
404   return Mem_layout::pmem_to_phys(_dir);
405 }
406
407 /*
408  * The following functions are all no-ops on native ia32.
409  * Pages appear in an address space when the corresponding PTE is made
410  * ... unlike Fiasco-UX which needs these special tricks
411  */
412
413 IMPLEMENT inline
414 void
415 Mem_space::page_map (Address, Address, Address, unsigned)
416 {}
417
418 IMPLEMENT inline
419 void
420 Mem_space::page_protect (Address, Address, unsigned)
421 {}
422
423 IMPLEMENT inline
424 void
425 Mem_space::page_unmap (Address, Address)
426 {}
427
428 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h"]
429 void
430 Mem_space::switchin_context(Mem_space *from)
431 {
432   // FIXME: this optimization breaks SMP task deletion, an idle thread
433   // may run on an already deleted page table
434 #if 0
435   // never switch to kernel space (context of the idle thread)
436   if (dir() == Kmem::dir())
437     return;
438 #endif
439
440   if (from != this)
441     {
442       CNT_ADDR_SPACE_SWITCH;
443       make_current();
444     }
445 }
446
447 PROTECTED inline
448 void
449 Mem_space::sync_kernel()
450 {
451   _dir->sync(Virt_addr(Mem_layout::User_max), Kmem::dir(),
452              Virt_addr(Mem_layout::User_max),
453              Virt_addr(-Mem_layout::User_max), Pdir::Super_level,
454              Kmem_alloc::q_allocator(_quota));
455 }
456
457 // --------------------------------------------------------------------
458 IMPLEMENTATION [amd64]:
459
460 PUBLIC static inline
461 Page_number
462 Mem_space::canonize(Page_number v)
463 {
464   if (v & Virt_addr(1UL << 48))
465     v = v | Virt_addr(~0UL << 48);
466   return v;
467 }
468
469 // --------------------------------------------------------------------
470 IMPLEMENTATION [ia32 || ux]:
471
472 PUBLIC static inline
473 Page_number
474 Mem_space::canonize(Page_number v)
475 { return v; }