#include <asm/sysregs.h>
#include <jailhouse/control.h>
#include <jailhouse/paging.h>
+#include <jailhouse/processor.h>
#include <jailhouse/string.h>
unsigned int cache_line_size;
cpu_data->psci_mbox.entry = 0;
cpu_data->virt_id = cpu_data->cpu_id;
+ cpu_data->mpidr = phys_processor_id();
/*
* Copy the registers to restore from the linux stack here, because we
int err;
/* Setup the SPI bitmap */
- irqchip_cell_init(&root_cell);
+ err = irqchip_cell_init(&root_cell);
+ if (err)
+ return err;
/* Platform-specific SMP operations */
register_smp_ops(&root_cell);
return map_root_memory_regions();
}
-void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
+void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
{
/* Return to the kernel */
- cpu_return_el1(cpu_data, false);
-
- while (1);
+ cpu_prepare_return_el1(cpu_data, 0);
+
+ asm volatile(
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n\t"
+ /*
+ * We don't care about clobbering the other registers from now
+ * on. Must be in sync with arch_entry.
+ */
+ "ldm %1, {r0 - r12}\n\t"
+ /*
+ * After this, the kernel won't be able to access the hypervisor
+ * code.
+ */
+ "eret\n\t"
+ :
+ : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
+ "r" (cpu_data->linux_reg));
+
+ __builtin_unreachable();
}
void arch_shutdown_self(struct per_cpu *cpu_data)
void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
{
+ struct registers *ctx = guest_regs(cpu_data);
+
/*
* If we haven't reached switch_exception_level yet, there is nothing to
* clean up.
/*
* Otherwise, attempt do disable the MMU and return to EL1 using the
* arch_shutdown path. cpu_return will fill the banked registers and the
- * guest regs structure (stored at the begginning of the stack) to
+ * guest regs structure (stored at the beginning of the stack) to
* prepare the ERET.
*/
- cpu_return_el1(cpu_data, true);
+ cpu_prepare_return_el1(cpu_data, return_code);
+
+ memcpy(&ctx->usr, &cpu_data->linux_reg,
+ NUM_ENTRY_REGS * sizeof(unsigned long));
arch_shutdown_self(cpu_data);
}