2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
16 #include <jailhouse/string.h>
17 #include <asm/control.h>
18 #include <asm/irqchip.h>
19 #include <asm/platform.h>
20 #include <asm/processor.h>
21 #include <asm/sysregs.h>
22 #include <asm/traps.h>
24 static void arch_reset_el1(struct registers *regs)
28 /* Wipe all banked and usr regs */
29 memset(regs, 0, sizeof(struct registers));
31 arm_write_banked_reg(SP_usr, 0);
32 arm_write_banked_reg(SP_svc, 0);
33 arm_write_banked_reg(SP_abt, 0);
34 arm_write_banked_reg(SP_und, 0);
35 arm_write_banked_reg(SP_svc, 0);
36 arm_write_banked_reg(SP_irq, 0);
37 arm_write_banked_reg(SP_fiq, 0);
38 arm_write_banked_reg(LR_svc, 0);
39 arm_write_banked_reg(LR_abt, 0);
40 arm_write_banked_reg(LR_und, 0);
41 arm_write_banked_reg(LR_svc, 0);
42 arm_write_banked_reg(LR_irq, 0);
43 arm_write_banked_reg(LR_fiq, 0);
44 arm_write_banked_reg(R8_fiq, 0);
45 arm_write_banked_reg(R9_fiq, 0);
46 arm_write_banked_reg(R10_fiq, 0);
47 arm_write_banked_reg(R11_fiq, 0);
48 arm_write_banked_reg(R12_fiq, 0);
49 arm_write_banked_reg(SPSR_svc, 0);
50 arm_write_banked_reg(SPSR_abt, 0);
51 arm_write_banked_reg(SPSR_und, 0);
52 arm_write_banked_reg(SPSR_svc, 0);
53 arm_write_banked_reg(SPSR_irq, 0);
54 arm_write_banked_reg(SPSR_fiq, 0);
56 /* Wipe the system registers */
57 arm_read_sysreg(SCTLR_EL1, sctlr);
58 sctlr = sctlr & ~SCTLR_MASK;
59 arm_write_sysreg(SCTLR_EL1, sctlr);
60 arm_write_sysreg(CPACR_EL1, 0);
61 arm_write_sysreg(CONTEXTIDR_EL1, 0);
62 arm_write_sysreg(PAR_EL1, 0);
63 arm_write_sysreg(TTBR0_EL1, 0);
64 arm_write_sysreg(TTBR1_EL1, 0);
65 arm_write_sysreg(CSSELR_EL1, 0);
67 arm_write_sysreg(CNTKCTL_EL1, 0);
68 arm_write_sysreg(CNTP_CTL_EL0, 0);
69 arm_write_sysreg(CNTP_CVAL_EL0, 0);
70 arm_write_sysreg(CNTV_CTL_EL0, 0);
71 arm_write_sysreg(CNTV_CVAL_EL0, 0);
73 /* AArch32 specific */
74 arm_write_sysreg(TTBCR, 0);
75 arm_write_sysreg(DACR, 0);
76 arm_write_sysreg(VBAR, 0);
77 arm_write_sysreg(DFSR, 0);
78 arm_write_sysreg(DFAR, 0);
79 arm_write_sysreg(IFSR, 0);
80 arm_write_sysreg(IFAR, 0);
81 arm_write_sysreg(ADFSR, 0);
82 arm_write_sysreg(AIFSR, 0);
83 arm_write_sysreg(MAIR0, 0);
84 arm_write_sysreg(MAIR1, 0);
85 arm_write_sysreg(AMAIR0, 0);
86 arm_write_sysreg(AMAIR1, 0);
87 arm_write_sysreg(TPIDRURW, 0);
88 arm_write_sysreg(TPIDRURO, 0);
89 arm_write_sysreg(TPIDRPRW, 0);
92 void arch_reset_self(struct per_cpu *cpu_data)
95 unsigned long reset_address;
96 struct cell *cell = cpu_data->cell;
97 struct registers *regs = guest_regs(cpu_data);
98 bool is_shutdown = cpu_data->shutdown;
101 err = arch_mmu_cpu_cell_init(cpu_data);
103 printk("MMU setup failed\n");
105 * On the first CPU to reach this, write all cell datas to memory so it
106 * can be started with caches disabled.
107 * On all CPUs, invalidate the instruction caches to take into account
108 * the potential new instructions.
110 arch_cell_caches_flush(cell);
113 * We come from the IRQ handler, but we won't return there, so the IPI
114 * is deactivated here.
116 irqchip_eoi_irq(SGI_CPU_OFF, true);
119 #ifndef CONFIG_MACH_VEXPRESS
120 if (cell != &root_cell) {
121 irqchip_cpu_shutdown(cpu_data);
123 smc(PSCI_CPU_OFF, 0, 0, 0);
124 smc(PSCI_CPU_OFF_V0_1_UBOOT, 0, 0, 0);
125 panic_printk("FATAL: PSCI_CPU_OFF failed\n");
129 /* arch_shutdown_self resets the GIC on all remaining CPUs. */
131 err = irqchip_cpu_reset(cpu_data);
133 printk("IRQ setup failed\n");
136 /* Wait for the driver to call cpu_up */
137 if (cell == &root_cell || is_shutdown)
138 reset_address = arch_smp_spin(cpu_data, root_cell.arch.smp);
140 reset_address = arch_smp_spin(cpu_data, cell->arch.smp);
142 /* Set the new MPIDR */
143 arm_write_sysreg(VMPIDR_EL2, cpu_data->virt_id | MPIDR_MP_BIT);
145 /* Restore an empty context */
146 arch_reset_el1(regs);
148 arm_write_banked_reg(ELR_hyp, reset_address);
149 arm_write_banked_reg(SPSR_hyp, RESET_PSR);
152 /* Won't return here. */
153 arch_shutdown_self(cpu_data);
158 static void arch_suspend_self(struct per_cpu *cpu_data)
160 psci_suspend(cpu_data);
162 if (cpu_data->flush_vcpu_caches)
163 arch_cpu_tlb_flush(cpu_data);
166 static void arch_dump_exit(struct registers *regs, const char *reason)
171 arm_read_banked_reg(ELR_hyp, pc);
172 panic_printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
173 for (n = 0; n < NUM_USR_REGS; n++)
174 panic_printk("r%d:%s 0x%08lx%s", n, n < 10 ? " " : "",
175 regs->usr[n], n % 4 == 3 ? "\n" : " ");
179 static void arch_dump_abt(bool is_data)
184 arm_read_sysreg(ESR_EL2, esr);
186 arm_read_sysreg(HDFAR, hxfar);
188 arm_read_sysreg(HIFAR, hxfar);
190 panic_printk("Physical address: 0x%08lx ESR: 0x%08x\n", hxfar, esr);
193 struct registers* arch_handle_exit(struct per_cpu *cpu_data,
194 struct registers *regs)
196 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
198 switch (regs->exit_reason) {
199 case EXIT_REASON_IRQ:
200 irqchip_handle_irq(cpu_data);
202 case EXIT_REASON_TRAP:
203 arch_handle_trap(cpu_data, regs);
206 case EXIT_REASON_UNDEF:
207 arch_dump_exit(regs, "undef");
209 case EXIT_REASON_DABT:
210 arch_dump_exit(regs, "data abort");
213 case EXIT_REASON_PABT:
214 arch_dump_exit(regs, "prefetch abort");
215 arch_dump_abt(false);
217 case EXIT_REASON_HVC:
218 arch_dump_exit(regs, "hvc");
220 case EXIT_REASON_FIQ:
221 arch_dump_exit(regs, "fiq");
224 arch_dump_exit(regs, "unknown");
228 if (cpu_data->shutdown)
229 /* Won't return here. */
230 arch_shutdown_self(cpu_data);
235 /* CPU must be stopped */
236 void arch_resume_cpu(unsigned int cpu_id)
239 * Simply get out of the spin loop by returning to handle_sgi
240 * If the CPU is being reset, it already has left the PSCI idle loop.
242 if (psci_cpu_stopped(cpu_id))
246 /* CPU must be stopped */
247 void arch_park_cpu(unsigned int cpu_id)
249 struct per_cpu *cpu_data = per_cpu(cpu_id);
252 * Reset always follows park_cpu, so we just need to make sure that the
255 if (psci_wait_cpu_stopped(cpu_id) != 0)
256 printk("ERROR: CPU%d is supposed to be stopped\n", cpu_id);
258 cpu_data->cell->arch.needs_flush = true;
261 /* CPU must be stopped */
262 void arch_reset_cpu(unsigned int cpu_id)
264 unsigned long cpu_data = (unsigned long)per_cpu(cpu_id);
266 if (psci_cpu_on(cpu_id, (unsigned long)arch_reset_self, cpu_data))
267 printk("ERROR: unable to reset CPU%d (was running)\n", cpu_id);
270 void arch_suspend_cpu(unsigned int cpu_id)
274 if (psci_cpu_stopped(cpu_id))
277 sgi.routing_mode = 0;
281 sgi.targets = 1 << cpu_id;
282 sgi.id = SGI_CPU_OFF;
284 irqchip_send_sgi(&sgi);
286 psci_wait_cpu_stopped(cpu_id);
289 void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn)
291 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
295 irqchip_inject_pending(cpu_data);
298 arch_suspend_self(cpu_data);
301 printk("WARN: unknown SGI received %d\n", irqn);
305 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id)
309 for_each_cpu(cpu, cell->cpu_set) {
310 if (per_cpu(cpu)->virt_id == virt_id)
318 * Handle the maintenance interrupt, the rest is injected into the cell.
319 * Return true when the IRQ has been handled by the hyp.
321 bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
323 if (irqn == MAINTENANCE_IRQ) {
324 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE]++;
326 irqchip_inject_pending(cpu_data);
330 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VIRQ]++;
332 irqchip_set_pending(cpu_data, irqn, true);
337 int arch_cell_create(struct cell *cell)
341 unsigned int virt_id = 0;
343 err = arch_mmu_cell_init(cell);
348 * Generate a virtual CPU id according to the position of each CPU in
351 for_each_cpu(cpu, cell->cpu_set) {
352 per_cpu(cpu)->virt_id = virt_id;
355 cell->arch.last_virt_id = virt_id - 1;
357 err = irqchip_cell_init(cell);
359 arch_mmu_cell_destroy(cell);
362 irqchip_root_cell_shrink(cell);
364 register_smp_ops(cell);
369 void arch_cell_destroy(struct cell *cell)
372 struct per_cpu *percpu;
374 for_each_cpu(cpu, cell->cpu_set) {
375 percpu = per_cpu(cpu);
376 /* Re-assign the physical IDs for the root cell */
377 percpu->virt_id = percpu->cpu_id;
381 irqchip_cell_exit(cell);
383 arch_mmu_cell_destroy(cell);
386 /* Note: only supports synchronous flushing as triggered by config_commit! */
387 void arch_flush_cell_vcpu_caches(struct cell *cell)
391 for_each_cpu(cpu, cell->cpu_set)
392 if (cpu == this_cpu_id())
393 arch_cpu_tlb_flush(per_cpu(cpu));
395 per_cpu(cpu)->flush_vcpu_caches = true;
398 void arch_config_commit(struct cell *cell_added_removed)
402 void __attribute__((noreturn)) arch_panic_stop(void)
404 psci_cpu_off(this_cpu_data());
405 __builtin_unreachable();
408 void arch_panic_park(void)
410 /* Won't return to panic_park */
411 if (phys_processor_id() == panic_cpu)
412 panic_in_progress = 0;
414 psci_cpu_off(this_cpu_data());
415 __builtin_unreachable();
419 * This handler is only used for cells, not for the root. The core already
420 * issued a cpu_suspend. arch_reset_cpu will cause arch_reset_self to be
421 * called on that CPU, which will in turn call arch_shutdown_self.
423 void arch_shutdown_cpu(unsigned int cpu_id)
425 struct per_cpu *cpu_data = per_cpu(cpu_id);
427 cpu_data->virt_id = cpu_id;
428 cpu_data->shutdown = true;
430 if (psci_wait_cpu_stopped(cpu_id))
431 printk("FATAL: unable to stop CPU%d\n", cpu_id);
433 arch_reset_cpu(cpu_id);
436 void arch_shutdown(void)
439 struct cell *cell = root_cell.next;
441 /* Re-route each SPI to CPU0 */
442 for (; cell != NULL; cell = cell->next)
443 irqchip_cell_exit(cell);
446 * Let the exit handler call reset_self to let the core finish its
447 * shutdown function and release its lock.
449 for_each_cpu(cpu, root_cell.cpu_set)
450 per_cpu(cpu)->shutdown = true;