5 #include "mem_layout.h"
7 //---------------------------------------------------------------------------
11 #include "kmem_space.h"
14 extern char kernel_page_directory[];
22 typedef cxx::int_type<unsigned, Order_t> Order;
23 typedef cxx::int_type_order<Unsigned32, Virt_addr_t, Order> Virt_addr;
27 Virt_ofs = Mem_layout::Sdram_phys_base - Mem_layout::Map_base,
32 //---------------------------------------------------------------------------
33 INTERFACE [arm && armv5]:
36 inline void set_asid()
40 //---------------------------------------------------------------------------
41 INTERFACE [arm && (armv6 || armv7)]:
47 asm volatile ("mcr p15, 0, %0, c13, c0, 1" : : "r" (0)); // ASID 0
51 //---------------------------------------------------------------------------
52 INTERFACE [arm && !arm_lpae]:
58 asm volatile("mcr p15, 0, %[pdir], c2, c0" // TTBR0
59 : : [pdir] "r" (pdir));
63 //---------------------------------------------------------------------------
64 INTERFACE [arm && arm_lpae]:
70 asm volatile("mcrr p15, 0, %[pdir], %[null], c2" // TTBR0
77 //---------------------------------------------------------------------------
78 IMPLEMENTATION [arm && arm1176_cache_alias_fix]:
82 do_arm_1176_cache_alias_workaround()
85 asm volatile ("mrc p15, 0, %0, c0, c0, 1 \n" : "=r" (v));
86 if (v & ((1 << 23) | (1 << 11)))
88 asm volatile ("mrc p15, 0, r0, c1, c0, 1 \n"
89 "orr r0, r0, #(1 << 6) \n"
90 "mcr p15, 0, r0, c1, c0, 1 \n"
96 //---------------------------------------------------------------------------
97 IMPLEMENTATION [arm && !arm1176_cache_alias_fix]:
100 static void do_arm_1176_cache_alias_workaround() {}
104 //---------------------------------------------------------------------------
105 IMPLEMENTATION [arm && arm_lpae]:
107 #include <cxx/cxx_int>
109 extern char kernel_lpae_dir[];
111 namespace Bootstrap {
112 typedef cxx::int_type_order<Unsigned64, Phys_addr_t, Order> Phys_addr;
113 inline Order map_page_order() { return Order(21); }
115 inline Phys_addr pt_entry(Phys_addr pa, bool cache, bool local)
117 Phys_addr res = cxx::mask_lsb(pa, map_page_order()) | Phys_addr(1); // this is a block
120 res |= Phys_addr(1 << 11); // nG flag
125 res |= Phys_addr(1 << 10); // AF
126 res |= Phys_addr(3 << 8); // Inner sharable
130 inline Phys_addr init_paging(void *const page_dir)
132 Phys_addr *const lpae = reinterpret_cast<Phys_addr*>(kernel_lpae_dir + Virt_ofs);
134 for (unsigned i = 0; i < 4; ++i)
135 lpae[i] = Phys_addr(((Address)page_dir + 0x1000 * i) | 3);;
137 asm volatile ("mcr p15, 0, %0, c10, c2, 0 \n" // MAIR0
138 : : "r"(Page::Mair0_bits));
140 return Phys_addr((Mword)lpae);
145 //---------------------------------------------------------------------------
146 IMPLEMENTATION [arm && !arm_lpae]:
148 #include <cxx/cxx_int>
150 namespace Bootstrap {
151 typedef cxx::int_type_order<Unsigned32, Phys_addr_t, Order> Phys_addr;
152 inline Order map_page_order() { return Order(20); }
154 inline Phys_addr pt_entry(Phys_addr pa, bool cache, bool local)
156 return cxx::mask_lsb(pa, map_page_order())
157 | Phys_addr(cache ? Page::Section_cachable : Page::Section_no_cache)
158 | Phys_addr(local ? Page::Section_local : Page::Section_global);
161 inline Phys_addr init_paging(void *const page_dir)
163 return Phys_addr((Mword)page_dir);
168 //---------------------------------------------------------------------------
169 IMPLEMENTATION [arm]:
171 namespace Bootstrap {
173 inline Phys_addr map_page_size_phys() { return Phys_addr(1) << map_page_order(); }
174 inline Virt_addr map_page_size() { return Virt_addr(1) << map_page_order(); }
177 map_memory(void volatile *pd, Virt_addr va, Phys_addr pa,
178 bool cache, bool local)
180 Phys_addr *const p = (Phys_addr*)pd;
181 p[cxx::int_value<Virt_addr>(va >> map_page_order())]
182 = pt_entry(pa, cache, local);
186 create_initial_mappings(void *const page_dir)
188 typedef Bootstrap::Phys_addr Phys_addr;
189 typedef Bootstrap::Virt_addr Virt_addr;
194 // map sdram linear from 0xf0000000
195 for (va = Virt_addr(Mem_layout::Map_base), pa = Phys_addr(Mem_layout::Sdram_phys_base);
196 va < Virt_addr(Mem_layout::Map_base + (4 << 20));
197 va += Bootstrap::map_page_size(), pa += Bootstrap::map_page_size_phys())
198 Bootstrap::map_memory(page_dir, va, pa, true, false);
201 for (va = Virt_addr(Mem_layout::Sdram_phys_base);
202 va < Virt_addr(Mem_layout::Sdram_phys_base + (4 << 20));
203 va += Bootstrap::map_page_size())
204 Bootstrap::map_memory(page_dir, va, Phys_addr(cxx::int_value<Virt_addr>(va)), true, true);
210 // The first 4MB of phys memory are always mapped to Map_base
211 Mem_layout::add_pmem(Mem_layout::Sdram_phys_base, Mem_layout::Map_base,
218 ".section .text.init,#alloc,#execinstr \n"
221 " ldr sp, __init_data \n"
222 " bl bootstrap_main \n"
236 #include "globalconfig.h"
238 extern char bootstrap_bss_start[];
239 extern char bootstrap_bss_end[];
240 extern char __bss_start[];
241 extern char __bss_end[];
243 extern "C" void _start_kernel(void) __attribute__((long_call));
245 extern "C" void bootstrap_main()
247 void *const page_dir = kernel_page_directory + Bootstrap::Virt_ofs;
249 Unsigned32 tbbr = cxx::int_value<Bootstrap::Phys_addr>(Bootstrap::init_paging(page_dir))
252 Bootstrap::create_initial_mappings(page_dir);
254 unsigned domains = 0x55555555; // client for all domains
255 unsigned control = Config::Cache_enabled
256 ? Cpu::Cp15_c1_cache_enabled : Cpu::Cp15_c1_cache_disabled;
258 Mmu<Bootstrap::Cache_flush_area, true>::flush_cache();
260 Bootstrap::do_arm_1176_cache_alias_workaround();
261 Bootstrap::set_asid();
263 asm volatile("mcr p15, 0, %[ttbcr], c2, c0, 2" // TTBCR
264 : : [ttbcr] "r" (Page::Ttbcr_bits));
266 asm volatile("mcr p15, 0, %[null], c8, c7, 0" // TLBIALL
269 asm volatile("mcr p15, 0, %[doms], c3, c0" // domains
270 : : [doms] "r" (domains));
272 Bootstrap::set_ttbr(tbbr | Page::Ttbr_bits);
274 asm volatile("mcr p15, 0, %[control], c1, c0" // control
275 : : [control] "r" (control));
278 Bootstrap::add_initial_pmem();