]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/setup.c
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / arch / arm / setup.c
index 24ea1f8cb08417fcdc13de3eaeb343428a145702..db5fef187a079655e203a647a1820d6ba53e6933 100644 (file)
 #include <asm/control.h>
 #include <asm/irqchip.h>
 #include <asm/percpu.h>
-#include <asm/platform.h>
 #include <asm/setup.h>
 #include <asm/sysregs.h>
 #include <jailhouse/control.h>
-#include <jailhouse/entry.h>
 #include <jailhouse/paging.h>
+#include <jailhouse/processor.h>
 #include <jailhouse/string.h>
 
 unsigned int cache_line_size;
@@ -47,22 +46,18 @@ int arch_init_early(void)
        if ((err = arch_check_features()) != 0)
                return err;
 
-       err = arch_mmu_cell_init(&root_cell);
-       if (err)
-               return err;
-
-       err = arch_map_device(UART_BASE_PHYS, UART_BASE_VIRT, PAGE_SIZE);
-
-       return err;
+       return arch_mmu_cell_init(&root_cell);
 }
 
 int arch_cpu_init(struct per_cpu *cpu_data)
 {
        int err = 0;
-       unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT;
+       unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
+                         | HCR_TSC_BIT | HCR_TAC_BIT;
 
        cpu_data->psci_mbox.entry = 0;
        cpu_data->virt_id = cpu_data->cpu_id;
+       cpu_data->mpidr = phys_processor_id();
 
        /*
         * Copy the registers to restore from the linux stack here, because we
@@ -85,10 +80,6 @@ int arch_cpu_init(struct per_cpu *cpu_data)
        /* Setup guest traps */
        arm_write_sysreg(HCR, hcr);
 
-       err = arch_spin_init();
-       if (err)
-               return err;
-
        err = arch_mmu_cpu_cell_init(cpu_data);
        if (err)
                return err;
@@ -104,21 +95,89 @@ int arch_cpu_init(struct per_cpu *cpu_data)
 
 int arch_init_late(void)
 {
+       int err;
+
+       /* Setup the SPI bitmap */
+       err = irqchip_cell_init(&root_cell);
+       if (err)
+               return err;
+
+       /* Platform-specific SMP operations */
+       register_smp_ops(&root_cell);
+
+       err = root_cell.arch.smp->init(&root_cell);
+       if (err)
+               return err;
+
        return map_root_memory_regions();
 }
 
-void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
+void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
 {
        /* Return to the kernel */
-       cpu_return_el1(cpu_data);
-
-       while (1);
+       cpu_prepare_return_el1(cpu_data, 0);
+
+       asm volatile(
+               /* Reset the hypervisor stack */
+               "mov    sp, %0\n\t"
+               /*
+                * We don't care about clobbering the other registers from now
+                * on. Must be in sync with arch_entry.
+                */
+               "ldm    %1, {r0 - r12}\n\t"
+               /*
+                * After this, the kernel won't be able to access the hypervisor
+                * code.
+                */
+               "eret\n\t"
+               :
+               : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
+                 "r" (cpu_data->linux_reg));
+
+       __builtin_unreachable();
 }
 
-void arch_cpu_restore(struct per_cpu *cpu_data)
+void arch_shutdown_self(struct per_cpu *cpu_data)
 {
+       irqchip_cpu_shutdown(cpu_data);
+
+       /* Free the guest */
+       arm_write_sysreg(HCR, 0);
+       arm_write_sysreg(TPIDR_EL2, 0);
+       arm_write_sysreg(VTCR_EL2, 0);
+
+       /* Remove stage-2 mappings */
+       arch_cpu_tlb_flush(cpu_data);
+
+       /* TLB flush needs the cell's VMID */
+       isb();
+       arm_write_sysreg(VTTBR_EL2, 0);
+
+       /* Return to EL1 */
+       arch_shutdown_mmu(cpu_data);
 }
 
-// catch missing symbols
-void arch_shutdown_cpu(unsigned int cpu_id) {}
-void arch_shutdown(void) {}
+void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
+{
+       struct registers *ctx = guest_regs(cpu_data);
+
+       /*
+        * If we haven't reached switch_exception_level yet, there is nothing to
+        * clean up.
+        */
+       if (!is_el2())
+               return;
+
+       /*
+        * Otherwise, attempt do disable the MMU and return to EL1 using the
+        * arch_shutdown path. cpu_return will fill the banked registers and the
+        * guest regs structure (stored at the beginning of the stack) to
+        * prepare the ERET.
+        */
+       cpu_prepare_return_el1(cpu_data, return_code);
+
+       memcpy(&ctx->usr, &cpu_data->linux_reg,
+              NUM_ENTRY_REGS * sizeof(unsigned long));
+
+       arch_shutdown_self(cpu_data);
+}