]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/ia32/mem_space-ia32.cpp
update
[l4.git] / kernel / fiasco / src / kern / ia32 / mem_space-ia32.cpp
1 INTERFACE [ia32 || ux || amd64]:
2
3 EXTENSION class Mem_space
4 {
5 public:
6   typedef Pdir Dir_type;
7
8   /** Return status of v_insert. */
9   enum // Status
10   {
11     Insert_ok = 0,              ///< Mapping was added successfully.
12     Insert_warn_exists,         ///< Mapping already existed
13     Insert_warn_attrib_upgrade, ///< Mapping already existed, attribs upgrade
14     Insert_err_nomem,           ///< Couldn't alloc new page table
15     Insert_err_exists           ///< A mapping already exists at the target addr
16   };
17
18   /** Attribute masks for page mappings. */
19   enum Page_attrib
20   {
21     Page_no_attribs = 0,
22     /// Page is writable.
23     Page_writable = Pt_entry::Writable,
24     Page_cacheable = 0,
25     /// Page is noncacheable.
26     Page_noncacheable = Pt_entry::Noncacheable | Pt_entry::Write_through,
27     /// it's a user page.
28     Page_user_accessible = Pt_entry::User,
29     /// Page has been referenced
30     Page_referenced = Pt_entry::Referenced,
31     /// Page is dirty
32     Page_dirty = Pt_entry::Dirty,
33     Page_references = Page_referenced | Page_dirty,
34     /// A mask which contains all mask bits
35     Page_all_attribs = Page_writable | Page_noncacheable |
36                        Page_user_accessible | Page_referenced | Page_dirty,
37   };
38
39   // Mapping utilities
40
41   enum                          // Definitions for map_util
42   {
43     Need_insert_tlb_flush = 0,
44     Map_page_size = Config::PAGE_SIZE,
45     Page_shift = Config::PAGE_SHIFT,
46     Map_superpage_size = Config::SUPERPAGE_SIZE,
47     Map_max_address = Mem_layout::User_max,
48     Whole_space = MWORD_BITS,
49     Identity_map = 0,
50   };
51
52
53   void  page_map        (Address phys, Address virt,
54                          Address size, unsigned page_attribs);
55
56   void  page_unmap      (Address virt, Address size);
57
58   void  page_protect    (Address virt, Address size,
59                          unsigned page_attribs);
60
61 protected:
62   // DATA
63   Dir_type *_dir;
64 };
65
66 //----------------------------------------------------------------------------
67 IMPLEMENTATION [ia32 || ux || amd64]:
68
69 #include <cstring>
70 #include <cstdio>
71 #include "cpu.h"
72 #include "kdb_ke.h"
73 #include "l4_types.h"
74 #include "mem_layout.h"
75 #include "paging.h"
76 #include "std_macros.h"
77
78
79 Per_cpu<Mem_space *> DEFINE_PER_CPU Mem_space::_current;
80
81
82 PUBLIC
83 Mem_space::Mem_space (Ram_quota *q, bool sync_kernel = true)
84   : _quota(q),
85     _dir (0)
86 {
87   void *b;
88   if (EXPECT_FALSE(! (b = Mapped_allocator::allocator()
89           ->q_alloc(_quota, Config::PAGE_SHIFT))))
90     return;
91
92   _dir = static_cast<Dir_type*>(b);
93   _dir->clear();        // initialize to zero
94   if (sync_kernel)
95     initial_sync();
96 }
97
98 PUBLIC
99 Mem_space::Mem_space(Ram_quota *q, Dir_type* pdir)
100   : _quota(q), _dir (pdir)
101 {
102   _kernel_space = this;
103   _current.cpu(0) = this;
104 }
105
106
107 PUBLIC static inline
108 Mword
109 Mem_space::xlate_flush(unsigned char rights)
110 {
111   Mword a = Page_references;
112   if (rights & L4_fpage::RX)
113     a |= Page_all_attribs;
114   else if (rights & L4_fpage::W)
115     a |= Page_writable;
116   return a;
117 }
118
119 PUBLIC static inline
120 Mword
121 Mem_space::is_full_flush(unsigned char rights)
122 {
123   return rights & L4_fpage::RX;
124 }
125
126 PUBLIC static inline
127 unsigned char
128 Mem_space::xlate_flush_result(Mword attribs)
129 {
130   unsigned char r = 0;
131   if (attribs & Page_referenced)
132     r |= L4_fpage::RX;
133
134   if (attribs & Page_dirty)
135     r |= L4_fpage::W;
136
137   return r;
138 }
139
140 PUBLIC inline NEEDS["cpu.h"]
141 static bool
142 Mem_space::has_superpages()
143 {
144   return Cpu::have_superpages();
145 }
146
147
148 PUBLIC static inline NEEDS["mem_unit.h"]
149 void
150 Mem_space::tlb_flush(bool = false)
151 {
152   Mem_unit::tlb_flush();
153 }
154
155 PUBLIC static inline
156 void
157 Mem_space::tlb_flush_spaces(bool, Mem_space *, Mem_space *)
158 {
159   tlb_flush();
160 }
161
162
163 IMPLEMENT inline
164 Mem_space *
165 Mem_space::current_mem_space(unsigned cpu) /// XXX: do not fix, deprecated, remove!
166 {
167   return _current.cpu(cpu);
168 }
169
170 PUBLIC inline
171 bool
172 Mem_space::set_attributes(Addr virt, unsigned page_attribs)
173 {
174   Pdir::Iter i = _dir->walk(virt);
175
176   if (!i.e->valid() || i.shift() != Config::PAGE_SHIFT)
177     return 0;
178
179   i.e->del_attr(Page::MAX_ATTRIBS);
180   i.e->add_attr(page_attribs);
181   return true;
182 }
183
184
185 /**
186  * Destructor.  Deletes the address space and unregisters it from
187  * Space_index.
188  */
189 PRIVATE
190 void
191 Mem_space::dir_shutdown()
192 {
193   // free all page tables we have allocated for this address space
194   // except the ones in kernel space which are always shared
195   _dir->alloc_cast<Mem_space_q_alloc>()
196     ->destroy(Virt_addr(0),
197               Virt_addr(Kmem::mem_user_max), Pdir::Depth - 1,
198               Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
199
200 }
201
202 IMPLEMENT
203 Mem_space::Status
204 Mem_space::v_insert(Phys_addr phys, Vaddr virt, Vsize size,
205                     unsigned page_attribs, bool upgrade_ignore_size)
206 {
207   // insert page into page table
208
209   // XXX should modify page table using compare-and-swap
210
211   assert_kdb (size == Size(Config::PAGE_SIZE)
212               || size == Size(Config::SUPERPAGE_SIZE));
213   if (size == Size(Config::SUPERPAGE_SIZE))
214     {
215       assert (Cpu::have_superpages());
216       assert (virt.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
217       assert (phys.offset(Size(Config::SUPERPAGE_SIZE)) == 0);
218     }
219
220   unsigned level = (size == Size(Config::SUPERPAGE_SIZE) ? (int)Pdir::Super_level : (int)Pdir::Depth);
221   unsigned shift = (size == Size(Config::SUPERPAGE_SIZE) ? Config::SUPERPAGE_SHIFT : Config::PAGE_SHIFT);
222   unsigned attrs = (size == Size(Config::SUPERPAGE_SIZE) ? (unsigned long)Pt_entry::Pse_bit : 0);
223
224   Pdir::Iter i = _dir->alloc_cast<Mem_space_q_alloc>()
225     ->walk(virt, level,
226            Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
227
228   if (EXPECT_FALSE(!i.e->valid() && i.shift() != shift))
229     return Insert_err_nomem;
230
231   if (EXPECT_FALSE(!upgrade_ignore_size
232         && i.e->valid() && (i.shift() != shift || i.addr() != phys.value())))
233     return Insert_err_exists;
234
235   if (i.e->valid())
236     {
237       if (EXPECT_FALSE((i.e->raw() | page_attribs) == i.e->raw()))
238         return Insert_warn_exists;
239
240       i.e->add_attr(page_attribs);
241       page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
242
243       return Insert_warn_attrib_upgrade;
244     }
245   else
246     {
247       *i.e = Addr(phys).value() | Pt_entry::Valid | attrs | page_attribs;
248       page_map (Addr(phys).value(), Addr(virt).value(), Size(size).value(), page_attribs);
249
250       return Insert_ok;
251     }
252 }
253
254 /**
255  * Simple page-table lookup.
256  *
257  * @param virt Virtual address.  This address does not need to be page-aligned.
258  * @return Physical address corresponding to a.
259  */
260 PUBLIC inline NEEDS ["paging.h"]
261 Address
262 Mem_space::virt_to_phys(Address virt) const
263 {
264   return dir()->virt_to_phys(virt);
265 }
266
267 /**
268  * Simple page-table lookup.
269  *
270  * @param virt Virtual address.  This address does not need to be page-aligned.
271  * @return Physical address corresponding to a.
272  */
273 PUBLIC inline NEEDS ["mem_layout.h"]
274 Address
275 Mem_space::pmem_to_phys (Address virt) const
276 {
277   return Mem_layout::pmem_to_phys(virt);
278 }
279
280 /**
281  * Simple page-table lookup.
282  *
283  * This method is similar to Space_context's virt_to_phys().
284  * The difference is that this version handles Sigma0's
285  * address space with a special case:  For Sigma0, we do not
286  * actually consult the page table -- it is meaningless because we
287  * create new mappings for Sigma0 transparently; instead, we return the
288  * logically-correct result of physical address == virtual address.
289  *
290  * @param a Virtual address.  This address does not need to be page-aligned.
291  * @return Physical address corresponding to a.
292  */
293 PUBLIC inline
294 virtual Address
295 Mem_space::virt_to_phys_s0(void *a) const
296 {
297   return dir()->virt_to_phys((Address)a);
298 }
299
300 IMPLEMENT
301 bool
302 Mem_space::v_lookup(Vaddr virt, Phys_addr *phys,
303                     Size *size, unsigned *page_attribs)
304 {
305   Pdir::Iter i = _dir->walk(virt);
306   if (size) *size = Size(1UL << i.shift());
307
308   if (!i.e->valid())
309     return false;
310
311   if (phys) *phys = Addr(i.e->addr() & (~0UL << i.shift()));
312   if (page_attribs) *page_attribs = (i.e->raw() & Page_all_attribs);
313
314   return true;
315 }
316
317 IMPLEMENT
318 unsigned long
319 Mem_space::v_delete(Vaddr virt, Vsize size,
320                     unsigned long page_attribs = Page_all_attribs)
321 {
322   unsigned ret;
323
324   // delete pages from page tables
325   assert (size == Size(Config::PAGE_SIZE) || size == Size(Config::SUPERPAGE_SIZE));
326
327   if (size == Size(Config::SUPERPAGE_SIZE))
328     {
329       assert (Cpu::have_superpages());
330       assert (!virt.offset(Size(Config::SUPERPAGE_SIZE)));
331     }
332
333   Pdir::Iter i = _dir->walk(virt);
334
335   if (EXPECT_FALSE (! i.e->valid()))
336     {
337       if (Config::conservative)
338         kdb_ke("v_delete did not find anything");
339
340       return 0;
341     }
342
343   assert (! (i.e->raw() & Pt_entry::global())); // Cannot unmap shared ptables
344
345   ret = i.e->raw() & page_attribs;
346
347   if (! (page_attribs & Page_user_accessible))
348     {
349       // downgrade PDE (superpage) rights
350       i.e->del_attr(page_attribs);
351       page_protect (Addr(virt).value(), Size(size).value(), i.e->raw() & Page_all_attribs);
352     }
353   else
354     {
355       // delete PDE (superpage)
356       *i.e = 0;
357       page_unmap (Addr(virt).value(), Size(size).value());
358     }
359
360   return ret;
361 }
362
363 /**
364  * \brief Free all memory allocated for this Mem_space.
365  * \pre Runs after the destructor!
366  */
367 PUBLIC
368 Mem_space::~Mem_space()
369 {
370   if (_dir)
371     {
372       dir_shutdown();
373       Mapped_allocator::allocator()->q_free(_quota, Config::PAGE_SHIFT, _dir);
374     }
375 }
376
377
378 // --------------------------------------------------------------------
379 IMPLEMENTATION [ia32 || amd64]:
380
381 #include <cassert>
382 #include "l4_types.h"
383 #include "kmem.h"
384 #include "mem_unit.h"
385 #include "cpu_lock.h"
386 #include "lock_guard.h"
387 #include "logdefs.h"
388 #include "paging.h"
389
390 #include <cstring>
391 #include "config.h"
392 #include "kmem.h"
393
394 PRIVATE static inline NEEDS ["cpu.h", "kmem.h"]
395 Pdir *
396 Mem_space::current_pdir()
397 {
398   return reinterpret_cast<Pdir*>(Kmem::phys_to_virt(Cpu::get_pdbr()));
399 }
400
401 IMPLEMENT inline NEEDS ["cpu.h", "kmem.h"]
402 void
403 Mem_space::make_current()
404 {
405   Cpu::set_pdbr((Mem_layout::pmem_to_phys(_dir)));
406   _current.cpu(current_cpu()) = this;
407 }
408
409 PUBLIC inline NEEDS ["kmem.h"]
410 Address
411 Mem_space::phys_dir()
412 {
413   return Mem_layout::pmem_to_phys(_dir);
414 }
415
416 /*
417  * The following functions are all no-ops on native ia32.
418  * Pages appear in an address space when the corresponding PTE is made
419  * ... unlike Fiasco-UX which needs these special tricks
420  */
421
422 IMPLEMENT inline
423 void
424 Mem_space::page_map (Address, Address, Address, unsigned)
425 {}
426
427 IMPLEMENT inline
428 void
429 Mem_space::page_protect (Address, Address, unsigned)
430 {}
431
432 IMPLEMENT inline
433 void
434 Mem_space::page_unmap (Address, Address)
435 {}
436
437 IMPLEMENT inline NEEDS ["kmem.h"]
438 void Mem_space::kmem_update (void *addr)
439 {
440   Pdir::Iter dir = _dir->walk(Addr::create((Address)addr), Pdir::Super_level);
441   Pdir::Iter kdir = Kmem::dir()->walk(Addr::create((Address)addr), Pdir::Super_level);
442   *dir.e = *kdir.e;
443 }
444
445 IMPLEMENT inline NEEDS["kmem.h", "logdefs.h", Mem_space::current_pdir]
446 void
447 Mem_space::switchin_context(Mem_space *from)
448 {
449   // FIXME: this optimization breaks SMP task deletion, an idle thread
450   // may run on an already deleted page table
451 #if 0
452   // never switch to kernel space (context of the idle thread)
453   if (dir() == Kmem::dir())
454     return;
455 #endif
456
457   if (from != this)
458     {
459       CNT_ADDR_SPACE_SWITCH;
460       make_current();
461     }
462 }
463
464 PRIVATE inline NOEXPORT
465 void
466 Mem_space::initial_sync()
467 {
468   _dir->alloc_cast<Mem_space_q_alloc>()
469     ->sync(Virt_addr(Mem_layout::User_max), Kmem::dir(),
470            Virt_addr(Mem_layout::User_max),
471            Virt_addr(-Mem_layout::User_max), Pdir::Super_level,
472            Mem_space_q_alloc(_quota, Mapped_allocator::allocator()));
473 }
474
475 // --------------------------------------------------------------------
476 IMPLEMENTATION [amd64]:
477
478 PUBLIC static inline
479 Page_number
480 Mem_space::canonize(Page_number v)
481 {
482   if (v & Virt_addr(1UL << 48))
483     v = v | Virt_addr(~0UL << 48);
484   return v;
485 }
486
487 // --------------------------------------------------------------------
488 IMPLEMENTATION [ia32 || ux]:
489
490 PUBLIC static inline
491 Page_number
492 Mem_space::canonize(Page_number v)
493 { return v; }