2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <asm/control.h>
14 #include <asm/irqchip.h>
15 #include <asm/percpu.h>
16 #include <asm/setup.h>
17 #include <asm/sysregs.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/paging.h>
20 #include <jailhouse/string.h>
22 unsigned int cache_line_size;
24 static int arch_check_features(void)
29 arm_read_sysreg(ID_PFR1_EL1, pfr1);
34 arm_read_sysreg(CTR_EL0, ctr);
35 /* Extract the minimal cache line size */
36 cache_line_size = 4 << (ctr >> 16 & 0xf);
41 int arch_init_early(void)
45 if ((err = arch_check_features()) != 0)
48 return arch_mmu_cell_init(&root_cell);
51 int arch_cpu_init(struct per_cpu *cpu_data)
54 unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
55 | HCR_TSC_BIT | HCR_TAC_BIT;
57 cpu_data->psci_mbox.entry = 0;
58 cpu_data->virt_id = cpu_data->cpu_id;
61 * Copy the registers to restore from the linux stack here, because we
62 * won't be able to access it later
64 memcpy(&cpu_data->linux_reg, (void *)cpu_data->linux_sp, NUM_ENTRY_REGS
65 * sizeof(unsigned long));
67 err = switch_exception_level(cpu_data);
72 * Save pointer in the thread local storage
73 * Must be done early in order to handle aborts and errors in the setup
76 arm_write_sysreg(TPIDR_EL2, cpu_data);
78 /* Setup guest traps */
79 arm_write_sysreg(HCR, hcr);
81 err = arch_mmu_cpu_cell_init(cpu_data);
89 err = irqchip_cpu_init(cpu_data);
94 int arch_init_late(void)
98 /* Setup the SPI bitmap */
99 err = irqchip_cell_init(&root_cell);
103 /* Platform-specific SMP operations */
104 register_smp_ops(&root_cell);
106 err = root_cell.arch.smp->init(&root_cell);
110 return map_root_memory_regions();
113 void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
115 /* Return to the kernel */
116 cpu_prepare_return_el1(cpu_data, 0);
119 /* Reset the hypervisor stack */
122 * We don't care about clobbering the other registers from now
123 * on. Must be in sync with arch_entry.
125 "ldm %1, {r0 - r12}\n\t"
127 * After this, the kernel won't be able to access the hypervisor
132 : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
133 "r" (cpu_data->linux_reg));
135 __builtin_unreachable();
138 void arch_shutdown_self(struct per_cpu *cpu_data)
140 irqchip_cpu_shutdown(cpu_data);
143 arm_write_sysreg(HCR, 0);
144 arm_write_sysreg(TPIDR_EL2, 0);
145 arm_write_sysreg(VTCR_EL2, 0);
147 /* Remove stage-2 mappings */
148 arch_cpu_tlb_flush(cpu_data);
150 /* TLB flush needs the cell's VMID */
152 arm_write_sysreg(VTTBR_EL2, 0);
155 arch_shutdown_mmu(cpu_data);
158 void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
160 struct registers *ctx = guest_regs(cpu_data);
163 * If we haven't reached switch_exception_level yet, there is nothing to
170 * Otherwise, attempt do disable the MMU and return to EL1 using the
171 * arch_shutdown path. cpu_return will fill the banked registers and the
172 * guest regs structure (stored at the beginning of the stack) to
175 cpu_prepare_return_el1(cpu_data, return_code);
177 memcpy(&ctx->usr, &cpu_data->linux_reg,
178 NUM_ENTRY_REGS * sizeof(unsigned long));
180 arch_shutdown_self(cpu_data);