1 //---------------------------------------------------------------------------
9 unsigned long get_ap() const;
10 void set_ap(unsigned long ap);
16 //---------------------------------------------------------------------------
17 INTERFACE[arm && armv5]:
19 EXTENSION class Mem_page_attr
30 //---------------------------------------------------------------------------
31 INTERFACE[arm && (armv6 || armv7)]:
33 EXTENSION class Mem_page_attr
44 //---------------------------------------------------------------------------
45 INTERFACE[arm && !(mpcore || armca9)]:
47 EXTENSION class Mem_page_attr
50 // do not use Shaed bit on non MP CPUs because this leads to uncached memory
51 // accesses most of the time!
55 EXTENSION class Page_table
58 enum { Ttbr_bits = 0x0 };
61 //---------------------------------------------------------------------------
62 INTERFACE[arm && (mpcore || armca9)]:
64 EXTENSION class Mem_page_attr
67 // use shared bit on MP CPUs as we need cache coherency
68 enum { Shared = 0x400 };
71 //---------------------------------------------------------------------------
72 INTERFACE[arm && mpcore]:
74 EXTENSION class Page_table
77 enum { Ttbr_bits = 0xa };
80 //---------------------------------------------------------------------------
81 INTERFACE[arm && armca9]:
83 EXTENSION class Page_table
88 Ttbr_bits = (1 << 1) // S, Shareable bit
89 | (1 << 3) // RGN, Region bits, Outer WriteBackWriteAlloc
90 | (0 << 0) | (1 << 6) // IRGN, Inner region bits, WB-WA
96 //---------------------------------------------------------------------------
100 #include "per_cpu_data.h"
112 Pte(Page_table *pt, unsigned level, Mword *pte)
113 : _pt((unsigned long)pt | level), _pte(pte)
118 EXTENSION class Page_table
124 Pt_base_mask = 0xfffffc00,
125 Pde_type_coarse = 0x01,
129 static Per_cpu<Page_table *> _current;
132 //---------------------------------------------------------------------------
133 IMPLEMENTATION [arm && vcache]:
137 Pte::need_cache_clean()
142 //---------------------------------------------------------------------------
143 IMPLEMENTATION [arm && !vcache && !armca9]:
147 Pte::need_cache_clean()
152 //---------------------------------------------------------------------------
153 IMPLEMENTATION [arm && !vcache && armca9]:
157 Pte::need_cache_clean()
162 //---------------------------------------------------------------------------
163 IMPLEMENTATION [arm]:
168 #include "mem_unit.h"
170 #include "ram_quota.h"
172 PUBLIC inline explicit
173 Mem_page_attr::Mem_page_attr(unsigned long attr) : _a(attr)
178 Mem_page_attr::raw() const
183 Mem_page_attr::set_caching(unsigned long del, unsigned long set)
185 del &= Page::Cache_mask;
186 set &= Page::Cache_mask;
187 _a = (_a & ~del) | set;
190 PUBLIC inline NEEDS[Mem_page_attr::get_ap]
192 Mem_page_attr::get_abstract() const
193 { return get_ap() | (_a & Page::Cache_mask); }
195 PUBLIC inline NEEDS[Mem_page_attr::set_ap]
197 Mem_page_attr::set_abstract(unsigned long a)
199 _a = (_a & ~Page::Cache_mask) | (a & Page::Cache_mask);
203 PUBLIC inline NEEDS[Mem_page_attr::get_ap]
205 Mem_page_attr::permits(unsigned long attr)
206 { return (get_ap() & attr) == attr; }
208 Per_cpu<Page_table *> DEFINE_PER_CPU Page_table::_current;
213 { return *_pte & 3; }
224 case 2: return *_pte & ~((1 << 20) - 1); // 1MB
225 default: return ~0UL;
230 case 2: return *_pte & ~((4 << 10) - 1);
231 default: return ~0UL;
233 default: return ~0UL;
239 Pte::phys(void *virt)
241 unsigned long p = phys();
242 return p | (((unsigned long)virt) & (size()-1));
248 { return (_pt & 3); }
257 Pte::superpage() const
258 { return !(_pt & 3) && ((*_pte & 3) == 2); }
269 case 2: return 1 << 20; // 1MB
270 default: return 1 << 20;
275 case 1: return 64 << 10;
276 case 2: return 4 << 10;
277 case 3: return 1 << 10;
278 default: return 4 << 10;
285 PRIVATE inline NEEDS["mem_unit.h"]
287 Pte::__set(unsigned long v, bool write_back)
290 if (write_back || need_cache_clean())
291 Mem_unit::clean_dcache(_pte);
294 PUBLIC inline NEEDS[Pte::__set]
296 Pte::set_invalid(unsigned long val, bool write_back)
297 { __set(val & ~3, write_back); }
300 //-----------------------------------------------------------------------------
301 IMPLEMENTATION [arm && armv5]:
305 Mem_page_attr::get_ap() const
307 static unsigned char const _map[4] = { 0x8, 0x4, 0x0, 0xc };
308 return ((unsigned long)_map[(_a >> 10) & 0x3]) << 8UL;
313 Mem_page_attr::set_ap(unsigned long ap)
315 static unsigned char const _map[4] = { 0x4, 0x4, 0x0, 0xc };
316 _a = (_a & ~0xc00) | (((unsigned long)_map[(ap >> 10) & 0x3]) << 8UL);
319 PUBLIC inline NEEDS[Pte::__set, Mem_page_attr::raw]
321 Pte::set(Address phys, unsigned long size, Mem_page_attr const &attr,
327 if (size != (1 << 20))
329 __set(phys | (attr.raw() & Page::MAX_ATTRIBS) | 2, write_back);
333 if (size != (4 << 10))
335 unsigned long ap = attr.raw() & 0xc00; ap |= ap >> 2; ap |= ap >> 4;
336 __set(phys | (attr.raw() & 0x0c) | ap | 2, write_back);
342 PUBLIC inline NEEDS[Mem_page_attr::Mem_page_attr]
344 Pte::attr() const { return Mem_page_attr(*_pte & 0xc0c); }
346 PUBLIC inline NEEDS["mem_unit.h"]
348 Pte::attr(Mem_page_attr const &attr, bool write_back)
353 __set((*_pte & ~0xc0c) | (attr.raw() & 0xc0c), write_back);
357 unsigned long ap = attr.raw() & 0xc00; ap |= ap >> 2; ap |= ap >> 4;
358 __set((*_pte & ~0xffc) | (attr.raw() & 0x0c) | ap, write_back);
365 void Page_table::activate()
367 Pte p = walk(this, 0, false, 0);
368 if (_current.cpu(current_cpu()) != this)
370 _current.cpu(current_cpu()) = this;
371 Mem_unit::flush_vcache();
373 "mcr p15, 0, r0, c8, c7, 0x00 \n" // TLB flush
374 "mcr p15, 0, %0, c2, c0 \n" // pdbr
376 "mrc p15, 0, r1, c2, c0 \n"
386 //-----------------------------------------------------------------------------
387 IMPLEMENTATION [arm && (armv6 || armv7)]:
391 Mem_page_attr::get_ap() const
393 return (_a & User) | ((_a & Write) ^ Write);
396 IMPLEMENT inline NEEDS[Mem_page_attr::raw]
398 Mem_page_attr::set_ap(unsigned long ap)
400 _a = (_a & ~(User | Write)) | (ap & User) | ((ap & Write) ^ Write) | 0x10;
403 PUBLIC inline NEEDS[Pte::__set]
405 Pte::set(Address phys, unsigned long size, Mem_page_attr const &attr,
411 if (size != (1 << 20))
414 unsigned long a = attr.raw() & 0x0c; // C & B
415 a |= ((attr.raw() & 0xff0) | Mem_page_attr::Shared) << 6;
416 __set(phys | a | 0x2, write_back);
420 if (size != (4 << 10))
422 __set(phys | (attr.raw() & Page::MAX_ATTRIBS) | 0x2 | Mem_page_attr::Shared, write_back);
427 PUBLIC inline NEEDS[Mem_page_attr::raw]
435 unsigned long a = *_pte & 0x0c; // C & B
436 a |= (*_pte >> 6) & 0xff0;
437 return Mem_page_attr(a);
441 return Mem_page_attr(*_pte & Page::MAX_ATTRIBS);
445 PUBLIC inline NEEDS["mem_unit.h", Mem_page_attr::raw]
447 Pte::attr(Mem_page_attr const &attr, bool write_back)
452 __set((*_pte & ~Page::MAX_ATTRIBS)
453 | (attr.raw() & Page::MAX_ATTRIBS), write_back);
457 unsigned long a = attr.raw() & 0x0c;
458 a |= (attr.raw() & 0xff0) << 6;
459 __set((*_pte & ~0x3fcc) | a, write_back);
466 void Page_table::activate(unsigned long asid)
468 Pte p = walk(this, 0, false, 0);
469 if (_current.cpu(current_cpu()) != this)
471 _current.cpu(current_cpu()) = this;
473 "mcr p15, 0, %2, c7, c5, 6 \n" // bt flush
474 "mcr p15, 0, r0, c7, c10, 4 \n"
475 "mcr p15, 0, %0, c2, c0 \n" // pdbr
476 "mcr p15, 0, %1, c13, c0, 1 \n"
478 "mrc p15, 0, r1, c2, c0 \n"
483 : "r" (p.phys(this) | Ttbr_bits), "r"(asid), "r" (0)
488 //-----------------------------------------------------------------------------
489 IMPLEMENTATION [arm && !mp]:
493 Page_table::current_virt_to_phys(void *virt)
495 return current()->walk(virt, 0, false, 0).phys(virt);
498 //-----------------------------------------------------------------------------
499 IMPLEMENTATION [arm && mp]:
502 * This version for MP avoids calling Page_table::current() which calls
503 * current_cpu() which is not available on the boot-stack. That's why we use
504 * the following way of doing a virt->phys translation on the current page
509 Page_table::current_virt_to_phys(void *virt)
512 Mword offset = (Mword)virt & ~Config::PAGE_MASK;
513 asm volatile("mcr p15,0,%1,c7,c8,0 \n"
514 "mrc p15,0,%0,c7,c4,0 \n"
516 : "r" ((Mword)virt & Config::PAGE_MASK));
517 return (phys & Config::PAGE_MASK) | offset;
521 //-----------------------------------------------------------------------------
522 IMPLEMENTATION [arm]:
528 void *Page_table::operator new(size_t s)
531 assert(s == 16*1024);
532 return alloc()->alloc(14); // 2^14 = 16K
536 void Page_table::operator delete(void *b)
538 alloc()->free(14, b);
543 Page_table::Page_table()
545 for( unsigned i = 0; i< 4096; ++i )
547 if (Pte::need_cache_clean())
548 Mem_unit::clean_dcache(raw, raw + 4096);
552 void Page_table::free_page_tables(void *start, void *end)
555 for (unsigned i = (Address)start >> 20; i < ((Address)end >> 20); ++i)
557 Pte p(this, 0, raw + i);
558 if (p.valid() && !p.superpage())
560 void *pt = (void*)Mem_layout::phys_to_pmem(p.raw() & Pt_base_mask);
561 alloc()->free(10, pt);
567 static unsigned Page_table::pd_index( void const *const address )
569 return (Mword)address >> 20; // 1MB steps
573 static unsigned Page_table::pt_index( void const *const address )
575 return ((Mword)address >> 12) & 255; // 4KB steps for coarse pts
580 Page_table::walk(void *va, unsigned long size, bool write_back, Ram_quota *q)
582 unsigned const pd_idx = pd_index(va);
586 Pte pde(this, 0, raw + pd_idx);
590 if (size == (4 << 10))
595 pt = (Mword*)alloc()->alloc(10);
596 if (EXPECT_FALSE(!pt))
602 Mem::memset_mwords(pt, 0, 1024 >> 2);
604 if (write_back || Pte::need_cache_clean())
605 Mem_unit::clean_dcache(pt, (char*)pt + 1024);
607 raw[pd_idx] = current_virt_to_phys(pt) | Pde_type_coarse;
609 if (write_back || Pte::need_cache_clean())
610 Mem_unit::clean_dcache(raw + pd_idx);
615 else if (pde.superpage())
619 pt = (Mword *)Mem_layout::phys_to_pmem(pde.raw() & Pt_base_mask);
621 unsigned const pt_idx = pt_index(va);
623 return Pte(this, 1, pt + pt_idx);
628 void Page_table::init(Page_table *current)
630 unsigned domains = 0x0001;
631 _current.cpu(0) = current;
634 "mcr p15, 0, %0, c3, c0 \n" // domains
641 void Page_table::copy_in(void *my_base, Page_table *o,
642 void *base, size_t size, unsigned long asid)
644 unsigned pd_idx = pd_index(my_base);
645 unsigned pd_idx_max = pd_index(my_base) + pd_index((void*)size);
646 unsigned o_pd_idx = pd_index(base);
647 bool need_flush = false;
649 //printf("copy_in: %03x-%03x from %03x\n", pd_idx, pd_idx_max, o_pd_idx);
653 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
654 if (Pte(this, 0, raw + i).valid())
656 Mem_unit::flush_vdcache();
662 for (unsigned i = pd_idx; i < pd_idx_max; ++i, ++o_pd_idx)
663 raw[i] = o->raw[o_pd_idx];
665 if (Pte::need_cache_clean())
666 Mem_unit::clean_dcache(raw + pd_idx, raw + pd_idx_max);
668 if (need_flush && (asid != ~0UL))
669 Mem_unit::dtlb_flush(asid);
675 Page_table::invalidate(void *my_base, unsigned size, unsigned long asid = ~0UL)
677 unsigned pd_idx = pd_index(my_base);
678 unsigned pd_idx_max = pd_index(my_base) + pd_index((void*)size);
679 bool need_flush = false;
681 //printf("invalidate: %03x-%03x\n", pd_idx, pd_idx_max);
685 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
686 if (Pte(this, 0, raw + i).valid())
688 Mem_unit::flush_vdcache();
694 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
697 // clean the caches if manipulating the current pt or in the case if phys.
699 if ((asid != ~0UL) || Pte::need_cache_clean())
700 Mem_unit::clean_dcache(raw + pd_idx, raw + pd_idx_max);
702 if (need_flush && (asid != ~0UL))
703 Mem_unit::tlb_flush(asid);
708 Page_table *Page_table::current(unsigned cpu)
709 { return _current.cpu(cpu); }
711 IMPLEMENT inline NEEDS["globals.h"]
712 Page_table *Page_table::current()
713 { return _current.cpu(current_cpu()); }
717 Page_table::dir() const
719 return const_cast<Page_table *>(this);