1 //---------------------------------------------------------------------------
9 unsigned long get_ap() const;
10 void set_ap(unsigned long ap);
16 //---------------------------------------------------------------------------
17 INTERFACE[arm && armv5]:
19 EXTENSION class Mem_page_attr
30 //---------------------------------------------------------------------------
31 INTERFACE[arm && (armv6 || armv7)]:
33 EXTENSION class Mem_page_attr
44 //---------------------------------------------------------------------------
45 INTERFACE[arm && !(mpcore || armca9)]:
47 EXTENSION class Mem_page_attr
50 // do not use Shaed bit on non MP CPUs because this leads to uncached memory
51 // accesses most of the time!
55 EXTENSION class Page_table
58 enum { Ttbr_bits = 0x0 };
61 //---------------------------------------------------------------------------
62 INTERFACE[arm && (mpcore || armca9)]:
64 EXTENSION class Mem_page_attr
67 // use shared bit on MP CPUs as we need cache coherency
68 enum { Shared = 0x400 };
71 //---------------------------------------------------------------------------
72 INTERFACE[arm && mpcore]:
74 EXTENSION class Page_table
77 enum { Ttbr_bits = 0xa };
80 //---------------------------------------------------------------------------
81 INTERFACE[arm && armca9]:
83 EXTENSION class Page_table
88 Ttbr_bits = (1 << 1) // S, Shareable bit
89 | (1 << 3) // RGN, Region bits, Outer WriteBackWriteAlloc
90 | (0 << 0) | (1 << 6) // IRGN, Inner region bits, WB-WA
96 //---------------------------------------------------------------------------
100 #include "per_cpu_data.h"
112 Pte(Page_table *pt, unsigned level, Mword *pte)
113 : _pt((unsigned long)pt | level), _pte(pte)
118 EXTENSION class Page_table
124 Pt_base_mask = 0xfffffc00,
125 Pde_type_coarse = 0x01,
129 //---------------------------------------------------------------------------
130 IMPLEMENTATION [arm && vcache]:
134 Pte::need_cache_clean()
139 //---------------------------------------------------------------------------
140 IMPLEMENTATION [arm && !vcache && !armca9]:
144 Pte::need_cache_clean()
149 //---------------------------------------------------------------------------
150 IMPLEMENTATION [arm && !vcache && armca9]:
154 Pte::need_cache_clean()
159 //---------------------------------------------------------------------------
160 IMPLEMENTATION [arm]:
165 #include "mem_unit.h"
167 #include "ram_quota.h"
169 PUBLIC inline explicit
170 Mem_page_attr::Mem_page_attr(unsigned long attr) : _a(attr)
175 Mem_page_attr::raw() const
180 Mem_page_attr::set_caching(unsigned long del, unsigned long set)
182 del &= Page::Cache_mask;
183 set &= Page::Cache_mask;
184 _a = (_a & ~del) | set;
187 PUBLIC inline NEEDS[Mem_page_attr::get_ap]
189 Mem_page_attr::get_abstract() const
190 { return get_ap() | (_a & Page::Cache_mask); }
192 PUBLIC inline NEEDS[Mem_page_attr::set_ap]
194 Mem_page_attr::set_abstract(unsigned long a)
196 _a = (_a & ~Page::Cache_mask) | (a & Page::Cache_mask);
200 PUBLIC inline NEEDS[Mem_page_attr::get_ap]
202 Mem_page_attr::permits(unsigned long attr)
203 { return (get_ap() & attr) == attr; }
208 { return *_pte & 3; }
219 case 2: return *_pte & ~((1 << 20) - 1); // 1MB
220 default: return ~0UL;
225 case 2: return *_pte & ~((4 << 10) - 1);
226 default: return ~0UL;
228 default: return ~0UL;
234 Pte::phys(void *virt)
236 unsigned long p = phys();
237 return p | (((unsigned long)virt) & (size()-1));
243 { return (_pt & 3); }
252 Pte::superpage() const
253 { return !(_pt & 3) && ((*_pte & 3) == 2); }
264 case 2: return 1 << 20; // 1MB
265 default: return 1 << 20;
270 case 1: return 64 << 10;
271 case 2: return 4 << 10;
272 case 3: return 1 << 10;
273 default: return 4 << 10;
280 PRIVATE inline NEEDS["mem_unit.h"]
282 Pte::__set(unsigned long v, bool write_back)
285 if (write_back || need_cache_clean())
286 Mem_unit::clean_dcache(_pte);
289 PUBLIC inline NEEDS[Pte::__set]
291 Pte::set_invalid(unsigned long val, bool write_back)
292 { __set(val & ~3, write_back); }
295 //-----------------------------------------------------------------------------
296 IMPLEMENTATION [arm && armv5]:
300 Mem_page_attr::get_ap() const
302 static unsigned char const _map[4] = { 0x8, 0x4, 0x0, 0xc };
303 return ((unsigned long)_map[(_a >> 10) & 0x3]) << 8UL;
308 Mem_page_attr::set_ap(unsigned long ap)
310 static unsigned char const _map[4] = { 0x4, 0x4, 0x0, 0xc };
311 _a = (_a & ~0xc00) | (((unsigned long)_map[(ap >> 10) & 0x3]) << 8UL);
314 PUBLIC inline NEEDS[Pte::__set, Mem_page_attr::raw]
316 Pte::set(Address phys, unsigned long size, Mem_page_attr const &attr,
322 if (size != (1 << 20))
324 __set(phys | (attr.raw() & Page::MAX_ATTRIBS) | 2, write_back);
328 if (size != (4 << 10))
330 unsigned long ap = attr.raw() & 0xc00; ap |= ap >> 2; ap |= ap >> 4;
331 __set(phys | (attr.raw() & 0x0c) | ap | 2, write_back);
337 PUBLIC inline NEEDS[Mem_page_attr::Mem_page_attr]
339 Pte::attr() const { return Mem_page_attr(*_pte & 0xc0c); }
341 PUBLIC inline NEEDS["mem_unit.h"]
343 Pte::attr(Mem_page_attr const &attr, bool write_back)
348 __set((*_pte & ~0xc0c) | (attr.raw() & 0xc0c), write_back);
352 unsigned long ap = attr.raw() & 0xc00; ap |= ap >> 2; ap |= ap >> 4;
353 __set((*_pte & ~0xffc) | (attr.raw() & 0x0c) | ap, write_back);
360 void Page_table::activate()
362 Pte p = walk(this, 0, false, Ptab::Null_alloc(), 0);
363 Mem_unit::flush_vcache();
365 "mcr p15, 0, r0, c8, c7, 0 \n" // TLB flush
366 "mcr p15, 0, %0, c2, c0 \n" // pdbr
368 "mrc p15, 0, r1, c2, c0 \n"
377 //-----------------------------------------------------------------------------
378 IMPLEMENTATION [arm && (armv6 || armv7)]:
382 Mem_page_attr::get_ap() const
384 return (_a & User) | ((_a & Write) ^ Write);
387 IMPLEMENT inline NEEDS[Mem_page_attr::raw]
389 Mem_page_attr::set_ap(unsigned long ap)
391 _a = (_a & ~(User | Write)) | (ap & User) | ((ap & Write) ^ Write) | 0x10;
394 PUBLIC inline NEEDS[Pte::__set]
396 Pte::set(Address phys, unsigned long size, Mem_page_attr const &attr,
402 if (size != (1 << 20))
405 unsigned long a = attr.raw() & 0x0c; // C & B
406 a |= ((attr.raw() & 0xff0) | Mem_page_attr::Shared) << 6;
407 __set(phys | a | 0x2, write_back);
411 if (size != (4 << 10))
413 __set(phys | (attr.raw() & Page::MAX_ATTRIBS) | 0x2 | Mem_page_attr::Shared, write_back);
418 PUBLIC inline NEEDS[Mem_page_attr::raw]
426 unsigned long a = *_pte & 0x0c; // C & B
427 a |= (*_pte >> 6) & 0xff0;
428 return Mem_page_attr(a);
432 return Mem_page_attr(*_pte & Page::MAX_ATTRIBS);
436 PUBLIC inline NEEDS["mem_unit.h", Mem_page_attr::raw]
438 Pte::attr(Mem_page_attr const &attr, bool write_back)
443 __set((*_pte & ~Page::MAX_ATTRIBS)
444 | (attr.raw() & Page::MAX_ATTRIBS), write_back);
448 unsigned long a = attr.raw() & 0x0c;
449 a |= (attr.raw() & 0xff0) << 6;
450 __set((*_pte & ~0x3fcc) | a, write_back);
456 //-----------------------------------------------------------------------------
457 IMPLEMENTATION [armv6 || armca8]:
460 void Page_table::activate(unsigned long asid)
462 Pte p = walk(this, 0, false, Ptab::Null_alloc(), 0);
464 "mcr p15, 0, %2, c7, c5, 6 \n" // bt flush
465 "mcr p15, 0, r0, c7, c10, 4 \n" // dsb
466 "mcr p15, 0, %0, c2, c0 \n" // set TTBR
467 "mcr p15, 0, r0, c7, c10, 4 \n" // dsb
468 "mcr p15, 0, %1, c13, c0, 1 \n" // set new ASID value
469 "mcr p15, 0, r0, c7, c5, 4 \n" // isb
470 "mcr p15, 0, %2, c7, c5, 6 \n" // bt flush
471 "mrc p15, 0, r1, c2, c0 \n"
475 : "r" (p.phys(this) | Ttbr_bits), "r"(asid), "r" (0)
479 //-----------------------------------------------------------------------------
480 IMPLEMENTATION [armv7 && armca9]:
483 void Page_table::activate(unsigned long asid)
485 Pte p = walk(this, 0, false, Ptab::Null_alloc(), 0);
487 "mcr p15, 0, %2, c7, c5, 6 \n" // bt flush
489 "mcr p15, 0, %2, c13, c0, 1 \n" // change ASID to 0
491 "mcr p15, 0, %0, c2, c0 \n" // set TTBR
493 "mcr p15, 0, %1, c13, c0, 1 \n" // set new ASID value
495 "mcr p15, 0, %2, c7, c5, 6 \n" // bt flush
500 : "r" (p.phys(this) | Ttbr_bits), "r"(asid), "r" (0)
504 //-----------------------------------------------------------------------------
505 IMPLEMENTATION [arm && !mp]:
509 Page_table::current_virt_to_phys(void *virt)
511 return walk(virt, 0, false, Ptab::Null_alloc(), 0).phys(virt);
514 //-----------------------------------------------------------------------------
515 IMPLEMENTATION [arm && mp]:
518 * This version for MP avoids calling Page_table::current() which calls
519 * current_cpu() which is not available on the boot-stack. That's why we use
520 * the following way of doing a virt->phys translation on the current page
525 Page_table::current_virt_to_phys(void *virt)
528 Mword offset = (Mword)virt & ~Config::PAGE_MASK;
529 asm volatile("mcr p15,0,%1,c7,c8,0 \n"
530 "mrc p15,0,%0,c7,c4,0 \n"
532 : "r" ((Mword)virt & Config::PAGE_MASK));
533 return (phys & Config::PAGE_MASK) | offset;
537 //-----------------------------------------------------------------------------
538 IMPLEMENTATION [arm]:
542 #include "auto_quota.h"
546 void *Page_table::operator new(size_t s) throw()
549 assert(s == 16*1024);
550 return alloc()->alloc(14); // 2^14 = 16K
554 void Page_table::operator delete(void *b)
556 alloc()->free(14, b);
561 Page_table::Page_table()
563 Mem::memset_mwords(raw, 0, sizeof(raw) / sizeof(Mword));
564 if (Pte::need_cache_clean())
565 Mem_unit::clean_dcache(raw, (char *)raw + sizeof(raw));
569 PUBLIC template< typename ALLOC >
570 void Page_table::free_page_tables(void *start, void *end, ALLOC const &a)
572 for (unsigned i = (Address)start >> 20; i < ((Address)end >> 20); ++i)
574 Pte p(this, 0, raw + i);
575 if (p.valid() && !p.superpage())
577 void *pt = (void*)Mem_layout::phys_to_pmem(p.raw() & Pt_base_mask);
579 BUG_ON(pt == (void*)~0UL, "cannot get virtual (pmem) address for %lx (pte @ %p)\n",
580 p.raw() & Pt_base_mask, p._pte);
588 static unsigned Page_table::pd_index( void const *const address )
590 return (Mword)address >> 20; // 1MB steps
594 static unsigned Page_table::pt_index( void const *const address )
596 return ((Mword)address >> 12) & 255; // 4KB steps for coarse pts
599 PUBLIC template< typename Alloc >
600 inline NEEDS[<cassert>, "bug.h", Page_table::pd_index,
601 Page_table::current_virt_to_phys, Page_table::pt_index]
603 Page_table::walk(void *va, unsigned long size, bool write_back, Alloc const &q,
606 unsigned const pd_idx = pd_index(va);
610 Pte pde(this, 0, raw + pd_idx);
614 if (size == (4 << 10))
617 pt = (Mword*)q.alloc(1<<10);
618 if (EXPECT_FALSE(!pt))
621 Mem::memset_mwords(pt, 0, 1024 >> 2);
623 if (write_back || Pte::need_cache_clean())
624 Mem_unit::clean_dcache(pt, (char*)pt + 1024);
626 raw[pd_idx] = ldir->current_virt_to_phys(pt) | Pde_type_coarse;
628 if (write_back || Pte::need_cache_clean())
629 Mem_unit::clean_dcache(raw + pd_idx);
634 else if (pde.superpage())
638 pt = (Mword *)Mem_layout::phys_to_pmem(pde.raw() & Pt_base_mask);
640 BUG_ON(pt == (void*)~0UL, "could not get virtual address for %lx (from pte @%p)\n",
641 pde.raw(), pde._pte);
643 return Pte(this, 1, pt + pt_index(va));
648 void Page_table::init()
650 unsigned domains = 0x0001;
653 "mcr p15, 0, %0, c3, c0 \n" // domains
660 void Page_table::copy_in(void *my_base, Page_table *o,
661 void *base, size_t size, unsigned long asid)
663 unsigned pd_idx = pd_index(my_base);
664 unsigned pd_idx_max = pd_index(my_base) + pd_index((void*)size);
665 unsigned o_pd_idx = pd_index(base);
666 bool need_flush = false;
668 //printf("copy_in: %03x-%03x from %03x\n", pd_idx, pd_idx_max, o_pd_idx);
672 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
673 if (Pte(this, 0, raw + i).valid())
675 Mem_unit::flush_vdcache();
681 for (unsigned i = pd_idx; i < pd_idx_max; ++i, ++o_pd_idx)
682 raw[i] = o->raw[o_pd_idx];
684 if (Pte::need_cache_clean())
685 Mem_unit::clean_dcache(raw + pd_idx, raw + pd_idx_max);
687 if (need_flush && (asid != ~0UL))
688 Mem_unit::dtlb_flush(asid);
694 Page_table::invalidate(void *my_base, unsigned size, unsigned long asid = ~0UL)
696 unsigned pd_idx = pd_index(my_base);
697 unsigned pd_idx_max = pd_index(my_base) + pd_index((void*)size);
698 bool need_flush = false;
700 //printf("invalidate: %03x-%03x\n", pd_idx, pd_idx_max);
704 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
705 if (Pte(this, 0, raw + i).valid())
707 Mem_unit::flush_vdcache();
713 for (unsigned i = pd_idx; i < pd_idx_max; ++i)
716 // clean the caches if manipulating the current pt or in the case if phys.
718 if ((asid != ~0UL) || Pte::need_cache_clean())
719 Mem_unit::clean_dcache(raw + pd_idx, raw + pd_idx_max);
721 if (need_flush && (asid != ~0UL))
722 Mem_unit::tlb_flush(asid);
728 Page_table::dir() const
730 return const_cast<Page_table *>(this);