2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <asm/control.h>
14 #include <asm/irqchip.h>
15 #include <asm/percpu.h>
16 #include <asm/setup.h>
17 #include <asm/sysregs.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/paging.h>
20 #include <jailhouse/string.h>
22 unsigned int cache_line_size;
24 static int arch_check_features(void)
29 arm_read_sysreg(ID_PFR1_EL1, pfr1);
34 arm_read_sysreg(CTR_EL0, ctr);
35 /* Extract the minimal cache line size */
36 cache_line_size = 4 << (ctr >> 16 & 0xf);
41 int arch_init_early(void)
45 if ((err = arch_check_features()) != 0)
48 return arch_mmu_cell_init(&root_cell);
51 int arch_cpu_init(struct per_cpu *cpu_data)
54 unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
55 | HCR_TSC_BIT | HCR_TAC_BIT;
57 cpu_data->psci_mbox.entry = 0;
58 cpu_data->virt_id = cpu_data->cpu_id;
61 * Copy the registers to restore from the linux stack here, because we
62 * won't be able to access it later
64 memcpy(&cpu_data->linux_reg, (void *)cpu_data->linux_sp, NUM_ENTRY_REGS
65 * sizeof(unsigned long));
67 err = switch_exception_level(cpu_data);
72 * Save pointer in the thread local storage
73 * Must be done early in order to handle aborts and errors in the setup
76 arm_write_sysreg(TPIDR_EL2, cpu_data);
78 /* Setup guest traps */
79 arm_write_sysreg(HCR, hcr);
81 err = arch_mmu_cpu_cell_init(cpu_data);
89 err = irqchip_cpu_init(cpu_data);
94 int arch_init_late(void)
98 /* Setup the SPI bitmap */
99 irqchip_cell_init(&root_cell);
101 /* Platform-specific SMP operations */
102 register_smp_ops(&root_cell);
104 err = root_cell.arch.smp->init(&root_cell);
108 return map_root_memory_regions();
111 void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
113 /* Return to the kernel */
114 cpu_prepare_return_el1(cpu_data, 0);
117 /* Reset the hypervisor stack */
120 * We don't care about clobbering the other registers from now
121 * on. Must be in sync with arch_entry.
123 "ldm %1, {r0 - r12}\n\t"
125 * After this, the kernel won't be able to access the hypervisor
130 : "r" (cpu_data->stack + PERCPU_STACK_END),
131 "r" (cpu_data->linux_reg));
133 __builtin_unreachable();
136 void arch_shutdown_self(struct per_cpu *cpu_data)
138 irqchip_cpu_shutdown(cpu_data);
141 arm_write_sysreg(HCR, 0);
142 arm_write_sysreg(TPIDR_EL2, 0);
143 arm_write_sysreg(VTCR_EL2, 0);
145 /* Remove stage-2 mappings */
146 arch_cpu_tlb_flush(cpu_data);
148 /* TLB flush needs the cell's VMID */
150 arm_write_sysreg(VTTBR_EL2, 0);
153 arch_shutdown_mmu(cpu_data);
156 void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
158 struct registers *ctx = guest_regs(cpu_data);
161 * If we haven't reached switch_exception_level yet, there is nothing to
168 * Otherwise, attempt do disable the MMU and return to EL1 using the
169 * arch_shutdown path. cpu_return will fill the banked registers and the
170 * guest regs structure (stored at the beginning of the stack) to
173 cpu_prepare_return_el1(cpu_data, return_code);
175 memcpy(&ctx->usr, &cpu_data->linux_reg,
176 NUM_ENTRY_REGS * sizeof(unsigned long));
178 arch_shutdown_self(cpu_data);