#include <jailhouse/string.h>
static inline void __attribute__((always_inline))
-cpu_return_el1(struct per_cpu *cpu_data, bool panic)
+cpu_prepare_return_el1(struct per_cpu *cpu_data, int return_code)
{
- /*
- * Return value
- * FIXME: there is no way, currently, to communicate the precise error
- * number from the core. A `EDISASTER' would be appropriate here.
- */
- cpu_data->linux_reg[0] = (panic ? -EIO : 0);
+ cpu_data->linux_reg[0] = return_code;
asm volatile (
"msr sp_svc, %0\n"
: "r" (cpu_data->linux_sp + (NUM_ENTRY_REGS * sizeof(unsigned long))),
"r" (cpu_data->linux_ret),
"r" (cpu_data->linux_flags));
-
- if (panic) {
- /* A panicking return needs to shutdown EL2 before the ERET. */
- struct registers *ctx = guest_regs(cpu_data);
- memcpy(&ctx->usr, &cpu_data->linux_reg,
- NUM_ENTRY_REGS * sizeof(unsigned long));
- return;
- }
-
- asm volatile(
- /* Reset the hypervisor stack */
- "mov sp, %0\n"
- /*
- * We don't care about clobbering the other registers from now on. Must
- * be in sync with arch_entry.
- */
- "ldm %1, {r0 - r12}\n"
- /* After this, the kernel won't be able to access the hypervisor code */
- "eret\n"
- :
- : "r" (cpu_data->stack + PERCPU_STACK_END),
- "r" (cpu_data->linux_reg));
}
int switch_exception_level(struct per_cpu *cpu_data);
void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
{
/* Return to the kernel */
- cpu_return_el1(cpu_data, false);
+ cpu_prepare_return_el1(cpu_data, 0);
- while (1);
+ asm volatile(
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n"
+ /*
+ * We don't care about clobbering the other registers from now on. Must
+ * be in sync with arch_entry.
+ */
+ "ldm %1, {r0 - r12}\n"
+ /* After this, the kernel won't be able to access the hypervisor code */
+ "eret\n"
+ :
+ : "r" (cpu_data->stack + PERCPU_STACK_END),
+ "r" (cpu_data->linux_reg));
+
+ __builtin_unreachable();
}
void arch_shutdown_self(struct per_cpu *cpu_data)
void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
{
+ struct registers *ctx = guest_regs(cpu_data);
+
/*
* If we haven't reached switch_exception_level yet, there is nothing to
* clean up.
/*
* Otherwise, attempt do disable the MMU and return to EL1 using the
* arch_shutdown path. cpu_return will fill the banked registers and the
- * guest regs structure (stored at the begginning of the stack) to
+ * guest regs structure (stored at the beginning of the stack) to
* prepare the ERET.
*/
- cpu_return_el1(cpu_data, true);
+ cpu_prepare_return_el1(cpu_data, return_code);
+
+ memcpy(&ctx->usr, &cpu_data->linux_reg,
+ NUM_ENTRY_REGS * sizeof(unsigned long));
arch_shutdown_self(cpu_data);
}