#include <asm/control.h>
#include <asm/irqchip.h>
#include <asm/percpu.h>
-#include <asm/platform.h>
#include <asm/setup.h>
#include <asm/sysregs.h>
#include <jailhouse/control.h>
-#include <jailhouse/entry.h>
#include <jailhouse/paging.h>
+#include <jailhouse/processor.h>
#include <jailhouse/string.h>
+unsigned int cache_line_size;
+
static int arch_check_features(void)
{
u32 pfr1;
+ u32 ctr;
+
arm_read_sysreg(ID_PFR1_EL1, pfr1);
if (!PFR1_VIRT(pfr1))
return -ENODEV;
+ arm_read_sysreg(CTR_EL0, ctr);
+ /* Extract the minimal cache line size */
+ cache_line_size = 4 << (ctr >> 16 & 0xf);
+
return 0;
}
if ((err = arch_check_features()) != 0)
return err;
- err = arch_mmu_cell_init(&root_cell);
- if (err)
- return err;
-
- err = arch_map_device(UART_BASE_PHYS, UART_BASE_VIRT, PAGE_SIZE);
-
- return err;
+ return arch_mmu_cell_init(&root_cell);
}
int arch_cpu_init(struct per_cpu *cpu_data)
{
int err = 0;
- unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT;
+ unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
+ | HCR_TSC_BIT | HCR_TAC_BIT;
cpu_data->psci_mbox.entry = 0;
+ cpu_data->virt_id = cpu_data->cpu_id;
+ cpu_data->mpidr = phys_processor_id();
/*
* Copy the registers to restore from the linux stack here, because we
int arch_init_late(void)
{
+ int err;
+
+ /* Setup the SPI bitmap */
+ err = irqchip_cell_init(&root_cell);
+ if (err)
+ return err;
+
+ /* Platform-specific SMP operations */
+ register_smp_ops(&root_cell);
+
+ err = root_cell.arch.smp->init(&root_cell);
+ if (err)
+ return err;
+
return map_root_memory_regions();
}
-void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
+void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
{
/* Return to the kernel */
- cpu_return_el1(cpu_data);
-
- while (1);
+ cpu_prepare_return_el1(cpu_data, 0);
+
+ asm volatile(
+ /* Reset the hypervisor stack */
+ "mov sp, %0\n\t"
+ /*
+ * We don't care about clobbering the other registers from now
+ * on. Must be in sync with arch_entry.
+ */
+ "ldm %1, {r0 - r12}\n\t"
+ /*
+ * After this, the kernel won't be able to access the hypervisor
+ * code.
+ */
+ "eret\n\t"
+ :
+ : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
+ "r" (cpu_data->linux_reg));
+
+ __builtin_unreachable();
}
-void arch_cpu_restore(struct per_cpu *cpu_data)
+void arch_shutdown_self(struct per_cpu *cpu_data)
{
+ irqchip_cpu_shutdown(cpu_data);
+
+ /* Free the guest */
+ arm_write_sysreg(HCR, 0);
+ arm_write_sysreg(TPIDR_EL2, 0);
+ arm_write_sysreg(VTCR_EL2, 0);
+
+ /* Remove stage-2 mappings */
+ arch_cpu_tlb_flush(cpu_data);
+
+ /* TLB flush needs the cell's VMID */
+ isb();
+ arm_write_sysreg(VTTBR_EL2, 0);
+
+ /* Return to EL1 */
+ arch_shutdown_mmu(cpu_data);
}
-// catch missing symbols
-void arch_suspend_cpu(unsigned int cpu_id) {}
-void arch_resume_cpu(unsigned int cpu_id) {}
-void arch_reset_cpu(unsigned int cpu_id) {}
-void arch_park_cpu(unsigned int cpu_id) {}
-void arch_shutdown_cpu(unsigned int cpu_id) {}
-int arch_cell_create(struct cell *new_cell)
-{ return -ENOSYS; }
-void arch_flush_cell_vcpu_caches(struct cell *cell) {}
-void arch_cell_destroy(struct cell *new_cell) {}
-void arch_config_commit(struct cell *cell_added_removed) {}
-void arch_shutdown(void) {}
-void arch_panic_stop(void) {__builtin_unreachable();}
-void arch_panic_park(void) {}
+void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
+{
+ struct registers *ctx = guest_regs(cpu_data);
+
+ /*
+ * If we haven't reached switch_exception_level yet, there is nothing to
+ * clean up.
+ */
+ if (!is_el2())
+ return;
+
+ /*
+ * Otherwise, attempt do disable the MMU and return to EL1 using the
+ * arch_shutdown path. cpu_return will fill the banked registers and the
+ * guest regs structure (stored at the beginning of the stack) to
+ * prepare the ERET.
+ */
+ cpu_prepare_return_el1(cpu_data, return_code);
+
+ memcpy(&ctx->usr, &cpu_data->linux_reg,
+ NUM_ENTRY_REGS * sizeof(unsigned long));
+
+ arch_shutdown_self(cpu_data);
+}