]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/setup.c
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / arch / arm / setup.c
index de49990ad75925364821d89ec5460761f013f2dd..db5fef187a079655e203a647a1820d6ba53e6933 100644 (file)
  * the COPYING file in the top-level directory.
  */
 
-#include <jailhouse/entry.h>
+#include <asm/control.h>
+#include <asm/irqchip.h>
+#include <asm/percpu.h>
+#include <asm/setup.h>
+#include <asm/sysregs.h>
+#include <jailhouse/control.h>
+#include <jailhouse/paging.h>
+#include <jailhouse/processor.h>
+#include <jailhouse/string.h>
+
+unsigned int cache_line_size;
+
+static int arch_check_features(void)
+{
+       u32 pfr1;
+       u32 ctr;
+
+       arm_read_sysreg(ID_PFR1_EL1, pfr1);
+
+       if (!PFR1_VIRT(pfr1))
+               return -ENODEV;
+
+       arm_read_sysreg(CTR_EL0, ctr);
+       /* Extract the minimal cache line size */
+       cache_line_size = 4 << (ctr >> 16 & 0xf);
+
+       return 0;
+}
 
 int arch_init_early(void)
 {
-       return -ENOSYS;
+       int err = 0;
+
+       if ((err = arch_check_features()) != 0)
+               return err;
+
+       return arch_mmu_cell_init(&root_cell);
 }
 
 int arch_cpu_init(struct per_cpu *cpu_data)
 {
-       return -ENOSYS;
+       int err = 0;
+       unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
+                         | HCR_TSC_BIT | HCR_TAC_BIT;
+
+       cpu_data->psci_mbox.entry = 0;
+       cpu_data->virt_id = cpu_data->cpu_id;
+       cpu_data->mpidr = phys_processor_id();
+
+       /*
+        * Copy the registers to restore from the linux stack here, because we
+        * won't be able to access it later
+        */
+       memcpy(&cpu_data->linux_reg, (void *)cpu_data->linux_sp, NUM_ENTRY_REGS
+                       * sizeof(unsigned long));
+
+       err = switch_exception_level(cpu_data);
+       if (err)
+               return err;
+
+       /*
+        * Save pointer in the thread local storage
+        * Must be done early in order to handle aborts and errors in the setup
+        * code.
+        */
+       arm_write_sysreg(TPIDR_EL2, cpu_data);
+
+       /* Setup guest traps */
+       arm_write_sysreg(HCR, hcr);
+
+       err = arch_mmu_cpu_cell_init(cpu_data);
+       if (err)
+               return err;
+
+       err = irqchip_init();
+       if (err)
+               return err;
+
+       err = irqchip_cpu_init(cpu_data);
+
+       return err;
 }
 
 int arch_init_late(void)
 {
-       return -ENOSYS;
+       int err;
+
+       /* Setup the SPI bitmap */
+       err = irqchip_cell_init(&root_cell);
+       if (err)
+               return err;
+
+       /* Platform-specific SMP operations */
+       register_smp_ops(&root_cell);
+
+       err = root_cell.arch.smp->init(&root_cell);
+       if (err)
+               return err;
+
+       return map_root_memory_regions();
 }
 
-void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
+void __attribute__((noreturn)) arch_cpu_activate_vmm(struct per_cpu *cpu_data)
 {
-       while (1);
+       /* Return to the kernel */
+       cpu_prepare_return_el1(cpu_data, 0);
+
+       asm volatile(
+               /* Reset the hypervisor stack */
+               "mov    sp, %0\n\t"
+               /*
+                * We don't care about clobbering the other registers from now
+                * on. Must be in sync with arch_entry.
+                */
+               "ldm    %1, {r0 - r12}\n\t"
+               /*
+                * After this, the kernel won't be able to access the hypervisor
+                * code.
+                */
+               "eret\n\t"
+               :
+               : "r" (cpu_data->stack + sizeof(cpu_data->stack)),
+                 "r" (cpu_data->linux_reg));
+
+       __builtin_unreachable();
 }
 
-void arch_cpu_restore(struct per_cpu *cpu_data)
+void arch_shutdown_self(struct per_cpu *cpu_data)
 {
+       irqchip_cpu_shutdown(cpu_data);
+
+       /* Free the guest */
+       arm_write_sysreg(HCR, 0);
+       arm_write_sysreg(TPIDR_EL2, 0);
+       arm_write_sysreg(VTCR_EL2, 0);
+
+       /* Remove stage-2 mappings */
+       arch_cpu_tlb_flush(cpu_data);
+
+       /* TLB flush needs the cell's VMID */
+       isb();
+       arm_write_sysreg(VTTBR_EL2, 0);
+
+       /* Return to EL1 */
+       arch_shutdown_mmu(cpu_data);
 }
 
-// catch missing symbols
-#include <jailhouse/printk.h>
-#include <jailhouse/processor.h>
-#include <jailhouse/control.h>
-#include <jailhouse/string.h>
-#include <jailhouse/paging.h>
-void arch_dbg_write_init(void) {}
-int phys_processor_id(void) { return 0; }
-void arch_suspend_cpu(unsigned int cpu_id) {}
-void arch_resume_cpu(unsigned int cpu_id) {}
-void arch_reset_cpu(unsigned int cpu_id) {}
-void arch_park_cpu(unsigned int cpu_id) {}
-void arch_shutdown_cpu(unsigned int cpu_id) {}
-int arch_cell_create(struct cell *new_cell)
-{ return -ENOSYS; }
-int arch_map_memory_region(struct cell *cell,
-                          const struct jailhouse_memory *mem)
-{ return -ENOSYS; }
-int arch_unmap_memory_region(struct cell *cell,
-                            const struct jailhouse_memory *mem)
-{ return -ENOSYS; }
-void arch_flush_cell_vcpu_caches(struct cell *cell) {}
-void arch_cell_destroy(struct cell *new_cell) {}
-void arch_config_commit(struct cell *cell_added_removed) {}
-void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
-void arch_dbg_write(const char *msg) {}
-void arch_shutdown(void) {}
-unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
-                                    unsigned long gphys, unsigned long flags)
-{ return INVALID_PHYS_ADDR; }
-void arch_paging_init(void) { }
-
-const struct paging arm_paging[1];
-
-void arch_panic_stop(void) {__builtin_unreachable();}
-void arch_panic_park(void) {}
+void arch_cpu_restore(struct per_cpu *cpu_data, int return_code)
+{
+       struct registers *ctx = guest_regs(cpu_data);
+
+       /*
+        * If we haven't reached switch_exception_level yet, there is nothing to
+        * clean up.
+        */
+       if (!is_el2())
+               return;
+
+       /*
+        * Otherwise, attempt do disable the MMU and return to EL1 using the
+        * arch_shutdown path. cpu_return will fill the banked registers and the
+        * guest regs structure (stored at the beginning of the stack) to
+        * prepare the ERET.
+        */
+       cpu_prepare_return_el1(cpu_data, return_code);
+
+       memcpy(&ctx->usr, &cpu_data->linux_reg,
+              NUM_ENTRY_REGS * sizeof(unsigned long));
+
+       arch_shutdown_self(cpu_data);
+}