2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <asm/control.h>
14 #include <asm/irqchip.h>
15 #include <asm/percpu.h>
16 #include <asm/setup.h>
17 #include <asm/sysregs.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/paging.h>
20 #include <jailhouse/processor.h>
21 #include <jailhouse/string.h>
23 unsigned int cache_line_size;
25 static int arch_check_features(void)
30 arm_read_sysreg(ID_PFR1_EL1, pfr1);
35 arm_read_sysreg(CTR_EL0, ctr);
36 /* Extract the minimal cache line size */
37 cache_line_size = 4 << (ctr >> 16 & 0xf);
42 int arch_init_early(void)
46 if ((err = arch_check_features()) != 0)
49 return arch_mmu_cell_init(&root_cell);
52 int arch_cpu_init(struct per_cpu *cpu_data)
55 unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
56 | HCR_TSC_BIT | HCR_TAC_BIT;
58 cpu_data->psci_mbox.entry = 0;
59 cpu_data->virt_id = cpu_data->cpu_id;
60 cpu_data->mpidr = phys_processor_id();
63 * Copy the registers to restore from the linux stack here, because we
64 * won't be able to access it later
66 memcpy(&cpu_data->linux_reg, (void *)cpu_data->linux_sp, NUM_ENTRY_REGS
67 * sizeof(unsigned long));
69 err = switch_exception_level(cpu_data);
74 * Save pointer in the thread local storage
75 * Must be done early in order to handle aborts and errors in the setup
78 arm_write_sysreg(TPIDR_EL2, cpu_data);
80 /* Setup guest traps */
81 arm_write_sysreg(HCR, hcr);
83 err = arch_mmu_cpu_cell_init(cpu_data);
91 err = irqchip_cpu_init(cpu_data);
96 int arch_init_late(void)
100 /* Setup the SPI bitmap */
101 err = irqchip_cell_init(&root_cell);
105 /* Platform-specific SMP operations */
106 register_smp_ops(&root_cell);
108 err = root_cell.arch.smp->init(&root_cell);
112 return map_root_memory_regions();
115 void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
117 /* Return to the kernel */
118 cpu_prepare_return_el1(cpu_data, 0);
121 /* Reset the hypervisor stack */
124 * We don't care about clobbering the other registers from now
125 * on. Must be in sync with arch_entry.
127 "ldm %1, {r0 - r12}\n\t"
129 * After this, the kernel won't be able to access the hypervisor
134 : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
135 "r" (cpu_data->linux_reg));
137 __builtin_unreachable();
140 void arch_shutdown_self(struct per_cpu *cpu_data)
142 irqchip_cpu_shutdown(cpu_data);
145 arm_write_sysreg(HCR, 0);
146 arm_write_sysreg(TPIDR_EL2, 0);
147 arm_write_sysreg(VTCR_EL2, 0);
149 /* Remove stage-2 mappings */
150 arch_cpu_tlb_flush(cpu_data);
152 /* TLB flush needs the cell's VMID */
154 arm_write_sysreg(VTTBR_EL2, 0);
157 arch_shutdown_mmu(cpu_data);
160 void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
162 struct registers *ctx = guest_regs(cpu_data);
165 * If we haven't reached switch_exception_level yet, there is nothing to
172 * Otherwise, attempt do disable the MMU and return to EL1 using the
173 * arch_shutdown path. cpu_return will fill the banked registers and the
174 * guest regs structure (stored at the beginning of the stack) to
177 cpu_prepare_return_el1(cpu_data, return_code);
179 memcpy(&ctx->usr, &cpu_data->linux_reg,
180 NUM_ENTRY_REGS * sizeof(unsigned long));
182 arch_shutdown_self(cpu_data);