]> rtime.felk.cvut.cz Git - l4.git/blob - kernel/fiasco/src/kern/ia32/dmar_space.cpp
Some minor fixes.
[l4.git] / kernel / fiasco / src / kern / ia32 / dmar_space.cpp
1 INTERFACE [iommu]:
2
3 #include "task.h"
4 #include "ptab_base.h"
5 #include "bitmap.h"
6
7 class Dmar_space :
8   public cxx::Dyn_castable<Dmar_space, Task>
9 {
10 private:
11   template<typename T>
12   static void clean_dcache(T *p)
13   { Mem_unit::clean_dcache(p, p + 1); }
14
15   class Dmar_ptr
16   {
17   public:
18     struct Dmar_ptr_val
19     {
20       Unsigned64 v;
21       Dmar_ptr_val() = default;
22       Dmar_ptr_val(Unsigned64 v) : v(v) {}
23       CXX_BITFIELD_MEMBER(0, 1, present, v);
24     };
25
26     Dmar_ptr_val *e;
27
28   public:
29     typedef Mem_space::Attr Attr;
30
31     unsigned char level;
32     Dmar_ptr() = default;
33     Dmar_ptr(Unsigned64 *e, unsigned char l)
34     : e(reinterpret_cast<Dmar_ptr_val*>(e)), level(l) {}
35
36     bool is_valid() const { return e->present(); }
37     bool is_leaf() const
38     { return (level == Dmar_pt::Depth) || (e->v & (1 << 7)); }
39     Unsigned64 next_level() const
40     { return cxx::mask_lsb(e->v, Config::PAGE_SHIFT); }
41
42     void set(Unsigned64 v);
43     void clear() { set(0); }
44
45     unsigned char page_order() const;
46     Unsigned64 page_addr() const;
47     Attr attribs() const
48     {
49       typedef L4_fpage::Rights R;
50
51       auto raw = access_once(&e->v);
52
53       R r = R::UR();
54       if (raw & 2) r |= R::W();
55
56       return Attr(r, Page::Type::Normal());
57     }
58
59     bool add_attribs(Page::Attr attr)
60     {
61       typedef L4_fpage::Rights R;
62
63       if (attr.rights & R::W())
64         {
65           auto p = access_once(&e->v);
66           auto o = p;
67           p |= 2;
68           if (o != p)
69             {
70               write_now(&e->v, p);
71               clean_dcache(e);
72               return true;
73             }
74         }
75       return false;
76     }
77
78     void set_next_level(Unsigned64 phys)
79     { set(phys | 3); }
80
81     void write_back_if(bool) const {}
82     static void write_back(void *, void *) {}
83
84     L4_fpage::Rights access_flags() const
85     {
86       return L4_fpage::Rights(0);
87     }
88
89     void del_rights(L4_fpage::Rights r)
90     {
91       if (r & L4_fpage::Rights::W())
92         {
93           auto p = access_once(&e->v);
94           auto o = p & ~(Unsigned64)2;
95           if (o != p)
96             {
97               write_now(&e->v, p);
98               clean_dcache(e);
99             }
100         }
101     }
102
103     /*
104      * WARNING: The VT-d documentation says that the super page bit
105      * WARNING: is ignored in page table entries for 4k pages. However,
106      * WARNING: this is not true. The super page bit must be zero.
107      */
108     void create_page(Phys_mem_addr addr, Page::Attr attr)
109     {
110       typedef L4_fpage::Rights R;
111
112       assert(level <= Dmar_pt::Depth);
113       Unsigned64 r = (level == Dmar_pt::Depth) ? 0 : (Unsigned64)(1<<7);
114       r |= 1; // Read
115       if (attr.rights & R::W()) r |= 2;
116
117       set(cxx::int_value<Phys_mem_addr>(addr) | r);
118     }
119   };
120
121   typedef Ptab::Tupel<Ptab::Traits<Unsigned64, 39, 9, true>,
122                       Ptab::Traits<Unsigned64, 30, 9, true>,
123                       Ptab::Traits<Unsigned64, 21, 9, true>,
124                       Ptab::Traits<Unsigned64, 12, 9, true> >::List Dmar_traits;
125
126   typedef Ptab::Shift<Dmar_traits, 12>::List Dmar_traits_vpn;
127   typedef Ptab::Page_addr_wrap<Page_number, 12> Dmar_va_vpn;
128   typedef Ptab::Base<Dmar_ptr, Dmar_traits_vpn, Dmar_va_vpn> Dmar_pt;
129
130 public:
131   enum { Max_nr_did = 0x10000 };
132   virtual void *debug_dir() const { return (void *)_dmarpt; }
133   static void create_identity_map();
134   static Dmar_pt *identity_map;
135
136   virtual void tlb_flush(bool);
137   virtual bool global() const { return true; }
138
139 private:
140   Dmar_pt *_dmarpt;
141   unsigned long _did;
142
143   static bool _initialized;
144
145   typedef Bitmap<Max_nr_did> Did_map;
146
147   static Did_map *_free_dids;
148   static unsigned _max_did;
149 };
150
151 // -----------------------------------------------------------
152 IMPLEMENTATION [iommu]:
153
154 #include "boot_alloc.h"
155 #include "intel_iommu.h"
156 #include "kmem_slab.h"
157 #include "warn.h"
158
159 Dmar_space::Dmar_pt *Dmar_space::identity_map;
160 bool Dmar_space::_initialized;
161 Dmar_space::Did_map *Dmar_space::_free_dids;
162 unsigned Dmar_space::_max_did;
163
164
165 PUBLIC
166 Page_number
167 Dmar_space::mem_space_map_max_address() const override
168 {
169   return Page_number(1UL << (Intel::Io_mmu::hw_addr_width - Mem_space::Page_shift));
170 }
171
172 PUBLIC static inline
173 Mword
174 Dmar_space::get_root(Dmar_pt *pt, unsigned aw_level)
175 {
176   aw_level += 2;
177   if (aw_level == Dmar_pt::Depth + 1)
178     return Mem_layout::pmem_to_phys(pt);
179
180   assert(aw_level <= Dmar_pt::Depth);
181
182   auto i = pt->walk(Mem_space::V_pfn(0), Dmar_pt::Depth - aw_level);
183   assert(i.is_valid());
184   return i.next_level();
185 }
186
187 PUBLIC inline
188 Mword
189 Dmar_space::get_root(int aw_level) const
190 {
191   return get_root(_dmarpt, aw_level);
192 }
193
194 PRIVATE static
195 unsigned
196 Dmar_space::alloc_did()
197 {
198   /* DID 0 may be reserved by the architecture, DID 1 is identity map. */
199   for (unsigned did = 2; did < _max_did; ++did)
200     if (_free_dids->atomic_get_and_set(did) == false)
201       return did;
202
203   panic("DMAR: Out of DIDs");
204 }
205
206 PRIVATE static
207 void
208 Dmar_space::free_did(unsigned long did)
209 {
210   if (_free_dids->atomic_get_and_clear(did) != true)
211     panic("DMAR: Freeing free DID");
212 }
213
214 PUBLIC static
215 void
216 Dmar_space::init(unsigned max_did)
217 {
218   add_page_size(Mem_space::Page_order(Config::PAGE_SHIFT));
219   /* XXX CEH: Add additional page sizes based on CAP_REG[34:37] */
220
221   _max_did = max_did;
222   _free_dids = new Boot_object<Did_map>();
223   _initialized = true;
224 }
225
226 PUBLIC inline
227 bool
228 Dmar_space::initialize()
229 {
230   void *b;
231
232   if (!_initialized)
233     return false;
234
235   b = Kmem_alloc::allocator()->q_alloc(ram_quota(), Config::PAGE_SHIFT);
236   if (EXPECT_FALSE(!b))
237     return false;
238
239   _dmarpt = static_cast<Dmar_pt *>(b);
240   _dmarpt->clear(false);
241
242   /*
243    * Make sure that the very first entry in a page table is valid and
244    * not a super page. This is neccessary if the hardware supports
245    * fewer levels than the current software implementation.
246    *
247    * Force allocation of two levels in entry 0, so get_root works
248    */
249   auto i = _dmarpt->walk(Mem_space::V_pfn(0), 2, false,
250                          Kmem_alloc::q_allocator(ram_quota()));
251   if (i.level != 2)
252     {
253       // Got a page-table entry with the wrong level. That happens in the
254       // case of an out-of-memory situation. So free everything we already
255       // allocated and fail.
256       _dmarpt->destroy(Virt_addr(0UL), Virt_addr(~0UL), 0, Dmar_pt::Depth,
257                        Kmem_alloc::q_allocator(ram_quota()));
258       Kmem_alloc::allocator()->q_free(ram_quota(), Config::PAGE_SHIFT, _dmarpt);
259       _dmarpt = 0;
260       return false;
261     }
262
263   return true;
264 }
265
266 IMPLEMENT inline
267 void
268 Dmar_space::Dmar_ptr::set(Unsigned64 v)
269 {
270   write_consistent(e, Dmar_ptr_val(v));
271   clean_dcache(e);
272 }
273
274 PUBLIC inline
275 unsigned long
276 Dmar_space::get_did()
277 {
278   // XXX: possibly need a loop here
279   if (_did == 0)
280     {
281       unsigned long ndid = alloc_did();
282       if (!mp_cas(&_did, (unsigned long)0, ndid))
283         free_did(ndid);
284     }
285   return _did;
286 }
287
288 IMPLEMENT
289 void
290 Dmar_space::create_identity_map()
291 {
292   if (identity_map)
293     return;
294
295   WARN("At least one IOMMU does not support passthrough.\n");
296
297   check (identity_map = Kmem_alloc::allocator()->alloc_array<Dmar_pt>(1));
298   identity_map->clear(false);
299
300   Unsigned64 max_phys = 0;
301   for (auto const &m: Kip::k()->mem_descs_a())
302     if (m.valid() && m.type() == Mem_desc::Conventional
303         && !m.is_virtual() && m.end() > max_phys)
304       max_phys = m.end();
305
306   Unsigned64 epfn;
307   epfn = min(1ULL << (Intel::Io_mmu::hw_addr_width - Config::PAGE_SHIFT),
308              (max_phys + Config::PAGE_SIZE - 1) >> Config::PAGE_SHIFT);
309
310   printf("IOMMU: identity map 0 - 0x%llx (%lldGB)\n", epfn << Config::PAGE_SHIFT,
311          (epfn << Config::PAGE_SHIFT) >> 30);
312   for (Unsigned64 pfn = 0; pfn <= epfn; ++pfn)
313     {
314       auto i = identity_map->walk(Mem_space::V_pfn(pfn),
315                                   Dmar_pt::Depth, false,
316                                   Kmem_alloc::q_allocator(Ram_quota::root));
317       if (i.page_order() != 12)
318         panic("IOMMU: cannot allocate identity IO page table, OOM\n");
319
320       i.set((pfn << Config::PAGE_SHIFT) | 3);
321     }
322 }
323
324
325 PUBLIC inline
326 int
327 Dmar_space::resume_vcpu(Context *, Vcpu_state *, bool) override
328 {
329   return -L4_err::EInval;
330 }
331
332
333 IMPLEMENT inline
334 unsigned char
335 Dmar_space::Dmar_ptr::page_order() const
336 { return Dmar_space::Dmar_pt::page_order_for_level(level); }
337
338 IMPLEMENT inline
339 Unsigned64
340 Dmar_space::Dmar_ptr::page_addr() const
341 {
342   unsigned char o = page_order();
343   return cxx::mask_lsb(e->v, o);
344 }
345
346 IMPLEMENT
347 void
348 Dmar_space::tlb_flush(bool)
349 {
350   if (_did)
351     for (auto &mmu: Intel::Io_mmu::iommus)
352       mmu.flush_iotlb(_did);
353 }
354
355 PUBLIC
356 bool
357 Dmar_space::v_lookup(Mem_space::Vaddr virt, Mem_space::Phys_addr *phys,
358                      Mem_space::Page_order *order,
359                      Mem_space::Attr *page_attribs) override
360 {
361   auto i = _dmarpt->walk(virt);
362   // XXX CEH: Check if this hack is still needed!
363   if (order)
364     *order = Mem_space::Page_order(i.page_order() > 30 ? 30 : i.page_order());
365
366   if (!i.is_valid())
367     return false;
368
369   if (phys)
370     *phys = Mem_space::Phys_addr(i.page_addr());
371
372   if (page_attribs)
373     *page_attribs = i.attribs();
374
375   return true;
376 }
377
378 PUBLIC
379 Mem_space::Status
380 Dmar_space::v_insert(Mem_space::Phys_addr phys, Mem_space::Vaddr virt,
381                      Mem_space::Page_order order,
382                      Mem_space::Attr page_attribs) override
383 {
384   assert (cxx::get_lsb(Mem_space::Phys_addr(phys), order) == 0);
385   assert (cxx::get_lsb(Virt_addr(virt), order) == 0);
386
387   int level;
388   for (level = 0; level < Dmar_pt::Depth; ++level)
389     if (Mem_space::Page_order(Dmar_pt::page_order_for_level(level)) <= order)
390       break;
391
392   auto i = _dmarpt->walk(virt, level, false,
393                          Kmem_alloc::q_allocator(ram_quota()));
394
395   if (EXPECT_FALSE(!i.is_valid() && i.level != level))
396     return Mem_space::Insert_err_nomem;
397
398   if (EXPECT_FALSE(i.is_valid()
399       && (i.level != level || Mem_space::Phys_addr(i.page_addr()) != phys)))
400     return Mem_space::Insert_err_exists;
401
402   if (i.is_valid())
403     {
404       if (EXPECT_FALSE(!i.add_attribs(page_attribs)))
405         return Mem_space::Insert_warn_exists;
406
407       return Mem_space::Insert_warn_attrib_upgrade;
408     }
409   else
410     {
411       i.create_page(phys, page_attribs);
412       return Mem_space::Insert_ok;
413     }
414 }
415
416 PUBLIC
417 L4_fpage::Rights
418 Dmar_space::v_delete(Mem_space::Vaddr virt, Mem_space::Page_order order,
419                      L4_fpage::Rights page_attribs) override
420 {
421   assert(cxx::get_lsb(Virt_addr(virt), order) == 0);
422
423   auto i = _dmarpt->walk(virt);
424
425   if (EXPECT_FALSE(!i.is_valid()))
426     return L4_fpage::Rights(0);
427
428   if (EXPECT_FALSE(Mem_space::Page_order(i.page_order()) != order))
429     return L4_fpage::Rights(0);
430
431   L4_fpage::Rights ret = i.access_flags();
432
433   if (!(page_attribs & L4_fpage::Rights::R()))
434     i.del_rights(page_attribs);
435   else
436     i.clear();
437
438   return ret;
439 }
440
441 PUBLIC
442 void
443 Dmar_space::v_set_access_flags(Mem_space::Vaddr, L4_fpage::Rights) override
444 {}
445
446 static Mem_space::Fit_size::Size_array __dmar_ps;
447
448 PUBLIC
449 Mem_space::Fit_size
450 Dmar_space::mem_space_fitting_sizes() const override
451 { return Mem_space::Fit_size(__dmar_ps); }
452
453 PRIVATE static
454 void
455 Dmar_space::add_page_size(Mem_space::Page_order o)
456 {
457   add_global_page_size(o);
458   for (Mem_space::Page_order c = o; c < __dmar_ps.size(); ++c)
459     __dmar_ps[c] = o;
460 }
461
462 PUBLIC
463 void *
464 Dmar_space::operator new (size_t size, void *p) throw()
465 {
466   (void)size;
467   assert (size == sizeof (Dmar_space));
468   return p;
469 }
470
471 PUBLIC
472 void
473 Dmar_space::operator delete (void *ptr)
474 {
475   Dmar_space *t = reinterpret_cast<Dmar_space *>(ptr);
476   Kmem_slab_t<Dmar_space>::q_free(t->ram_quota(), ptr);
477 }
478
479 PUBLIC inline
480 Dmar_space::Dmar_space(Ram_quota *q)
481 : Dyn_castable_class(q, Caps::mem()),
482   _dmarpt(0), _did(0)
483 {}
484
485 PRIVATE
486 void
487 Dmar_space::remove_from_all_iommus()
488 {
489   unsigned long did = access_once(&_did);
490   if (!did)
491     return;
492
493   // someone else changed the did
494   if (!mp_cas(&_did, did, 0ul))
495     return;
496
497   for (auto &mmu: Intel::Io_mmu::iommus)
498     {
499       for (unsigned bus = 0; bus < 255; ++bus)
500         for (unsigned df = 0; df < 255; ++df)
501           {
502             auto entryp = mmu.get_context_entry(bus, df, false);
503             if (!entryp)
504               break; // complete bus is empty
505
506             Intel::Io_mmu::Cte entry = access_once(entryp.unsafe_ptr());
507             // different space bound, skip
508             if (entry.slptptr() != get_root(mmu.aw()))
509               continue;
510
511             // when the CAS fails someone else already unbound this slot,
512             // so ignore that case
513             mmu.cas_context_entry(entryp, bus, df, entry, Intel::Io_mmu::Cte());
514           }
515
516       mmu.flush_iotlb(did);
517     }
518
519   free_did(did);
520 }
521
522 PUBLIC
523 void
524 Dmar_space::destroy(Kobject ***rl) override
525 {
526   Task::destroy(rl);
527   remove_from_all_iommus();
528 }
529
530 PUBLIC
531 Dmar_space::~Dmar_space()
532 {
533   remove_from_all_iommus();
534
535   if (_dmarpt)
536     {
537       _dmarpt->destroy(Virt_addr(0UL), Virt_addr(~0UL), 0, Dmar_pt::Depth,
538                        Kmem_alloc::q_allocator(ram_quota()));
539       Kmem_alloc::allocator()->q_free(ram_quota(), Config::PAGE_SHIFT, _dmarpt);
540       _dmarpt = 0;
541     }
542 }
543
544 namespace {
545 static inline void __attribute__((constructor)) FIASCO_INIT
546 register_factory()
547 {
548   Kobject_iface::set_factory(L4_msg_tag::Label_dma_space,
549                              &Task::generic_factory<Dmar_space>);
550 }
551 }