2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <asm/control.h>
14 #include <asm/setup.h>
15 #include <asm/setup_mmu.h>
16 #include <asm/sysregs.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/printk.h>
21 * Two identity mappings need to be created for enabling the MMU: one for the
22 * code and one for the stack.
23 * There should not currently be any conflict with the existing mappings, but we
24 * still make sure not to override anything by using the 'conflict' flag.
32 extern unsigned long trampoline_start, trampoline_end;
34 /* When disabling Jailhouse, we will need to restore the Linux stub */
35 static unsigned long saved_vectors = 0;
37 static int set_id_map(int i, unsigned long address, unsigned long size)
39 if (i >= ARRAY_SIZE(id_maps))
42 /* The trampoline code should be contained in one page. */
43 if ((address & PAGE_MASK) != ((address + size - 1) & PAGE_MASK)) {
44 printk("FATAL: Unable to IDmap more than one page at at time.\n");
48 id_maps[i].addr = address;
49 id_maps[i].conflict = false;
50 id_maps[i].flags = PAGE_DEFAULT_FLAGS;
55 static void create_id_maps(void)
60 for (i = 0; i < ARRAY_SIZE(id_maps); i++) {
61 conflict = (paging_virt2phys(&hv_paging_structs,
62 id_maps[i].addr, PAGE_PRESENT_FLAGS) !=
66 * TODO: Get the flags, and update them if they are
67 * insufficient. Save the current flags in id_maps.
68 * This extraction should be implemented in the core.
71 paging_create(&hv_paging_structs, id_maps[i].addr,
72 PAGE_SIZE, id_maps[i].addr, id_maps[i].flags,
75 id_maps[i].conflict = conflict;
79 static void destroy_id_maps(void)
83 for (i = 0; i < ARRAY_SIZE(id_maps); i++) {
84 if (id_maps[i].conflict) {
85 /* TODO: Switch back to the original flags */
87 paging_destroy(&hv_paging_structs, id_maps[i].addr,
88 PAGE_SIZE, PAGING_NON_COHERENT);
94 * This code is put in the id-mapped `.trampoline' section, allowing to enable
95 * and disable the MMU in a readable and portable fashion.
96 * This process makes the following function quite fragile: cpu_switch_phys2virt
97 * attempts to translate LR and SP using a call to the virtual address of
99 * Those two registers are thus supposed to be left intact by the whole MMU
100 * setup. The stack is all the same usable, since it is id-mapped as well.
102 static void __attribute__((naked)) __attribute__((section(".trampoline")))
103 setup_mmu_el2(struct per_cpu *cpu_data, phys2virt_t phys2virt, u64 ttbr)
106 | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
107 | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
108 | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
110 u32 sctlr_el1, sctlr_el2;
112 /* Ensure that MMU is disabled. */
113 arm_read_sysreg(SCTLR_EL2, sctlr_el2);
114 if (sctlr_el2 & SCTLR_M_BIT)
118 * This setup code is always preceded by a complete cache flush, so
119 * there is already a few memory barriers between the page table writes
123 arm_write_sysreg(HMAIR0, DEFAULT_HMAIR0);
124 arm_write_sysreg(HMAIR1, DEFAULT_HMAIR1);
125 arm_write_sysreg(TTBR0_EL2, ttbr);
126 arm_write_sysreg(TCR_EL2, tcr);
129 * Flush HYP TLB. It should only be necessary if a previous hypervisor
132 arm_write_sysreg(TLBIALLH, 1);
136 * We need coherency with the kernel in order to use the setup
137 * spinlocks: only enable the caches if they are enabled at EL1.
139 arm_read_sysreg(SCTLR_EL1, sctlr_el1);
140 sctlr_el1 &= (SCTLR_I_BIT | SCTLR_C_BIT);
142 /* Enable stage-1 translation */
143 arm_read_sysreg(SCTLR_EL2, sctlr_el2);
144 sctlr_el2 |= SCTLR_M_BIT | sctlr_el1;
145 arm_write_sysreg(SCTLR_EL2, sctlr_el2);
149 * Inlined epilogue that returns to switch_exception_level.
150 * Must not touch anything else than the stack
152 cpu_switch_phys2virt(phys2virt);
154 /* Not reached (cannot be a while(1), it confuses the compiler) */
159 * Shutdown the MMU and returns to EL1 with the kernel context stored in `regs'
161 static void __attribute__((naked)) __attribute__((section(".trampoline")))
162 shutdown_el2(struct registers *regs, unsigned long vectors)
166 /* Disable stage-1 translation, caches must be cleaned. */
167 arm_read_sysreg(SCTLR_EL2, sctlr_el2);
168 sctlr_el2 &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT);
169 arm_write_sysreg(SCTLR_EL2, sctlr_el2);
172 /* Clean the MMU registers */
173 arm_write_sysreg(HMAIR0, 0);
174 arm_write_sysreg(HMAIR1, 0);
175 arm_write_sysreg(TTBR0_EL2, 0);
176 arm_write_sysreg(TCR_EL2, 0);
179 /* Reset the vectors as late as possible */
180 arm_write_sysreg(HVBAR, vectors);
185 static void check_mmu_map(unsigned long virt_addr, unsigned long phys_addr)
187 unsigned long phys_base;
190 arm_write_sysreg(ATS1HR, virt_addr);
192 arm_read_sysreg(PAR_EL1, par);
193 phys_base = (unsigned long)(par & PAR_PA_MASK);
194 if ((par & PAR_F_BIT) || (phys_base != phys_addr)) {
195 printk("VA->PA check failed, expected %x, got %x\n",
196 phys_addr, phys_base);
202 * Jumping to EL2 in the same C code represents an interesting challenge, since
203 * it will switch from virtual addresses to physical ones, and then back to
204 * virtual after setting up the EL2 MMU.
205 * To this end, the setup_mmu and cpu_switch_el2 functions are naked and must
206 * handle the stack themselves.
208 int switch_exception_level(struct per_cpu *cpu_data)
210 extern unsigned long bootstrap_vectors;
211 extern unsigned long hyp_vectors;
213 /* Save the virtual address of the phys2virt function for later */
214 phys2virt_t phys2virt = paging_phys2hvirt;
215 virt2phys_t virt2phys = paging_hvirt2phys;
216 unsigned long phys_bootstrap = virt2phys(&bootstrap_vectors);
217 struct per_cpu *phys_cpu_data = (struct per_cpu *)virt2phys(cpu_data);
218 unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
219 unsigned long trampoline_size = &trampoline_end - &trampoline_start;
220 unsigned long stack_virt = (unsigned long)cpu_data->stack;
221 unsigned long stack_phys = virt2phys((void *)stack_virt);
224 /* Check the paging structures as well as the MMU initialisation */
225 unsigned long jailhouse_base_phys =
226 paging_virt2phys(&hv_paging_structs, JAILHOUSE_BASE,
230 * The hypervisor stub allows to fetch its current vector base by doing
231 * an HVC with r0 = -1. They will need to be restored when disabling
234 if (saved_vectors == 0)
235 saved_vectors = hvc(-1);
238 * paging struct won't be easily accessible when initializing el2, only
239 * the CPU datas will be readable at their physical address
241 ttbr_el2 = (u64)virt2phys(hv_paging_structs.root_table) & TTBR_MASK;
244 * Mirror the mmu setup code, so that we are able to jump to the virtual
245 * address after enabling it.
246 * Those regions must fit on one page.
249 if (set_id_map(0, trampoline_phys, trampoline_size) != 0)
251 if (set_id_map(1, stack_phys, PAGE_SIZE) != 0)
256 * Before doing anything hairy, we need to sync the caches with memory:
257 * they will be off at EL2. From this point forward and until the caches
258 * are re-enabled, we cannot write anything critical to memory.
260 arch_cpu_dcaches_flush(CACHES_CLEAN);
262 cpu_switch_el2(phys_bootstrap, virt2phys);
264 * At this point, we are at EL2, and we work with physical addresses.
265 * The MMU needs to be initialised and execution must go back to virtual
266 * addresses before returning, or else we are pretty much doomed.
269 setup_mmu_el2(phys_cpu_data, phys2virt, ttbr_el2);
272 check_mmu_map(JAILHOUSE_BASE, jailhouse_base_phys);
274 /* Set the new vectors once we're back to a sane, virtual state */
275 arm_write_sysreg(HVBAR, &hyp_vectors);
277 /* Remove the identity mapping */
283 void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data)
285 static DEFINE_SPINLOCK(map_lock);
287 virt2phys_t virt2phys = paging_hvirt2phys;
288 void *stack_virt = cpu_data->stack;
289 unsigned long stack_phys = virt2phys((void *)stack_virt);
290 unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
291 struct registers *regs_phys =
292 (struct registers *)virt2phys(guest_regs(cpu_data));
294 /* Jump to the identity-mapped trampoline page before shutting down */
295 void (*shutdown_fun_phys)(struct registers*, unsigned long);
296 shutdown_fun_phys = (void*)virt2phys(shutdown_el2);
299 * No need to check for size or overlapping here, it has already be
300 * done, and the paging structures will soon be deleted. However, the
301 * cells' CPUs may execute this concurrently.
303 spin_lock(&map_lock);
304 paging_create(&hv_paging_structs, stack_phys, PAGE_SIZE, stack_phys,
305 PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
306 paging_create(&hv_paging_structs, trampoline_phys, PAGE_SIZE,
307 trampoline_phys, PAGE_DEFAULT_FLAGS,
308 PAGING_NON_COHERENT);
309 spin_unlock(&map_lock);
311 arch_cpu_dcaches_flush(CACHES_CLEAN);
315 * - disable the MMU whilst inside the trampoline page
316 * - reset the vectors
319 shutdown_fun_phys(regs_phys, saved_vectors);
321 __builtin_unreachable();
324 int arch_map_device(void *paddr, void *vaddr, unsigned long size)
326 return paging_create(&hv_paging_structs, (unsigned long)paddr, size,
327 (unsigned long)vaddr,
328 PAGE_DEFAULT_FLAGS | S1_PTE_FLAG_DEVICE,
329 PAGING_NON_COHERENT);
332 int arch_unmap_device(void *vaddr, unsigned long size)
334 return paging_destroy(&hv_paging_structs, (unsigned long)vaddr, size,
335 PAGING_NON_COHERENT);