push {r0 - r12}
ldr r1, =__page_pool
- mov r2, #1
- lsl r2, #PERCPU_SIZE_SHIFT
+ mov r4, #1
+ lsl r4, #PERCPU_SIZE_SHIFT
/*
* percpu data = pool + cpuid * shift
* TODO: handle aff1 and aff2
*/
- mla r1, r2, r0, r1
- add r2, r1, #PERCPU_LINUX_SP
+ mla r1, r4, r0, r1
+ add r4, r1, #PERCPU_LINUX_SP
- /* Save SP, LR, CPSR */
- str sp, [r2], #4
- str lr, [r2], #4
+ /*
+ * Save SP, LR, CPSR
+ * r4 is used so that they can be easily retrieved on failure.
+ */
+ str sp, [r4], #4
+ str lr, [r4], #4
mrs r3, cpsr
- str r3, [r2]
+ str r3, [r4]
mov sp, r1
add sp, #PERCPU_STACK_END
+ /*
+ * Keep some space for a struct registers, in case setup fails and needs
+ * to return to the driver through the arch_shutdown_self path.
+ */
+ sub sp, #((NUM_USR_REGS + 1) * 4)
/* Call entry(cpuid, struct per_cpu*) */
- b entry
+ bl entry
+
+ /*
+ * entry only returns here when there is an error before setting up EL2
+ */
+ ldr r3, [r4], #-4
+ msr spsr, r3
+ ldr lr, [r4], #-4
+ ldr sp, [r4]
+
+ /* Keep the return value in r0 */
+ pop {r1}
+ pop {r1 - r12}
+ subs pc, lr, #0
.globl bootstrap_vectors
.align 5
#ifndef __ASSEMBLY__
+#include <jailhouse/string.h>
+
static inline void __attribute__((always_inline))
-cpu_return_el1(struct per_cpu *cpu_data)
+cpu_return_el1(struct per_cpu *cpu_data, bool panic)
{
- /* Return value */
- cpu_data->linux_reg[0] = 0;
-
- asm volatile(
- /* Reset the hypervisor stack */
- "mov sp, %4\n"
+ /*
+ * Return value
+ * FIXME: there is no way, currently, to communicate the precise error
+ * number from the core. A `EDISASTER' would be appropriate here.
+ */
+ cpu_data->linux_reg[0] = (panic ? -EIO : 0);
+ asm volatile (
"msr sp_svc, %0\n"
"msr elr_hyp, %1\n"
"msr spsr_hyp, %2\n"
+ :
+ : "r" (cpu_data->linux_sp + (NUM_ENTRY_REGS * sizeof(unsigned long))),
+ "r" (cpu_data->linux_ret),
+ "r" (cpu_data->linux_flags));
+
+ if (panic) {
+ /* A panicking return needs to shutdown EL2 before the ERET. */
+ struct registers *ctx = guest_regs(cpu_data);
+ memcpy(&ctx->usr, &cpu_data->linux_reg,
+ NUM_ENTRY_REGS * sizeof(unsigned long));
+ return;
+ }
+
+ asm volatile(
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n"
/*
* We don't care about clobbering the other registers from now on. Must
* be in sync with arch_entry.
*/
- "ldm %3, {r0 - r12}\n"
+ "ldm %1, {r0 - r12}\n"
/* After this, the kernel won't be able to access the hypervisor code */
"eret\n"
:
- : "r" (cpu_data->linux_sp + (NUM_ENTRY_REGS * sizeof(unsigned long))),
- "r" (cpu_data->linux_ret),
- "r" (cpu_data->linux_flags),
- "r" (cpu_data->linux_reg),
- "r" (cpu_data->stack + PERCPU_STACK_END)
- :);
+ : "r" (cpu_data->stack + PERCPU_STACK_END),
+ "r" (cpu_data->linux_reg));
}
int switch_exception_level(struct per_cpu *cpu_data);
void irqchip_cpu_shutdown(struct per_cpu *cpu_data)
{
+ /*
+ * The GIC backend must take care of only resetting the hyp interface if
+ * it has been initialised: this function may be executed during the
+ * setup phase.
+ */
if (irqchip.cpu_reset)
irqchip.cpu_reset(cpu_data, true);
}
void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
{
/* Return to the kernel */
- cpu_return_el1(cpu_data);
+ cpu_return_el1(cpu_data, false);
while (1);
}
void arch_cpu_restore(struct per_cpu *cpu_data)
{
+ /*
+ * If we haven't reached switch_exception_level yet, there is nothing to
+ * clean up.
+ */
+ if (!is_el2())
+ return;
+
+ /*
+ * Otherwise, attempt do disable the MMU and return to EL1 using the
+ * arch_shutdown path. cpu_return will fill the banked registers and the
+ * guest regs structure (stored at the begginning of the stack) to
+ * prepare the ERET.
+ */
+ cpu_return_el1(cpu_data, true);
+
+ arch_shutdown_self(cpu_data);
}