]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
arm: implement hypervisor shutdown
authorJean-Philippe Brucker <jean-philippe.brucker@arm.com>
Mon, 21 Jul 2014 09:13:12 +0000 (10:13 +0100)
committerJan Kiszka <jan.kiszka@siemens.com>
Fri, 19 Dec 2014 10:04:08 +0000 (11:04 +0100)
When an HV_DISABLE hypercall is issued on all root CPUs by the driver,
the core `shutdown' function executes the following operations:
- Suspend all non-root cells (all the CPUs are taken to hyp idle mode),
- call arch_shutdown_cpu for all those CPUs,
- call arch_shutdown.
Once the master CPU (the first to take the shutdown lock) did this, the
other root CPUs don't actually perform any operation.

This patch lets the arch_shutdown and arch_shutdown_cpu set a boolean
that is considered by the cores right before returning to EL1: for the
cells' CPUs, arch_shutdown_cpu will trigger a return to arch_reset_self,
that will clean up EL1 and EL2. On the root cpus, the exit handler
checks this boolean and calls the shutdown function.

Once inside arch_shutdown_self, the principle is the same as with the
hypervisor initialisation:
- Create identity mappings of the trampoline page and the stack,
- Jump to the physical address of the shutdown function,
- Disable the MMU,
- Reset the vectors,
- Return to EL1

This patch does not handle hosts using PSCI yet: they will need to issue
a final SMC on secondary CPUs in order to park themselves at EL3, since
the hypervisor won't exist anymore to emulate the wakeup call.

Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
[Jan: moved arch_shutdown_cpu & arch_shutdown to control.c]
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/arm/control.c
hypervisor/arch/arm/include/asm/control.h
hypervisor/arch/arm/include/asm/percpu.h
hypervisor/arch/arm/mmu_hyp.c
hypervisor/arch/arm/setup.c

index 71df67ba8252cf5481df47d9497d995882ab48c2..58e7dc48a6cbc4f6d74ed80ada852e4249cb31a6 100644 (file)
@@ -90,12 +90,14 @@ static void arch_reset_el1(struct registers *regs)
 
 void arch_reset_self(struct per_cpu *cpu_data)
 {
-       int err;
+       int err = 0;
        unsigned long reset_address;
        struct cell *cell = cpu_data->cell;
        struct registers *regs = guest_regs(cpu_data);
+       bool is_shutdown = cpu_data->shutdown;
 
-       err = arch_mmu_cpu_cell_init(cpu_data);
+       if (!is_shutdown)
+               err = arch_mmu_cpu_cell_init(cpu_data);
        if (err)
                printk("MMU setup failed\n");
        /*
@@ -112,12 +114,15 @@ void arch_reset_self(struct per_cpu *cpu_data)
         */
        irqchip_eoi_irq(SGI_CPU_OFF, true);
 
-       err = irqchip_cpu_reset(cpu_data);
-       if (err)
-               printk("IRQ setup failed\n");
+       /* irqchip_cpu_shutdown already resets the GIC on all CPUs. */
+       if (!is_shutdown) {
+               err = irqchip_cpu_reset(cpu_data);
+               if (err)
+                       printk("IRQ setup failed\n");
+       }
 
        /* Wait for the driver to call cpu_up */
-       if (cell == &root_cell)
+       if (cell == &root_cell || is_shutdown)
                reset_address = arch_smp_spin(cpu_data, root_cell.arch.smp);
        else
                reset_address = arch_smp_spin(cpu_data, cell->arch.smp);
@@ -131,6 +136,10 @@ void arch_reset_self(struct per_cpu *cpu_data)
        arm_write_banked_reg(ELR_hyp, reset_address);
        arm_write_banked_reg(SPSR_hyp, RESET_PSR);
 
+       if (is_shutdown)
+               /* Won't return here. */
+               arch_shutdown_self(cpu_data);
+
        vmreturn(regs);
 }
 
@@ -197,6 +206,10 @@ struct registers* arch_handle_exit(struct per_cpu *cpu_data,
                panic_stop();
        }
 
+       if (cpu_data->shutdown)
+               /* Won't return here. */
+               arch_shutdown_self(cpu_data);
+
        return regs;
 }
 
@@ -354,3 +367,38 @@ void arch_panic_park(void)
        psci_cpu_off(this_cpu_data());
        __builtin_unreachable();
 }
+
+/*
+ * This handler is only used for cells, not for the root. The core already
+ * issued a cpu_suspend. arch_reset_cpu will cause arch_reset_self to be
+ * called on that CPU, which will in turn call arch_shutdown_self.
+ */
+void arch_shutdown_cpu(unsigned int cpu_id)
+{
+       struct per_cpu *cpu_data = per_cpu(cpu_id);
+
+       cpu_data->virt_id = cpu_id;
+       cpu_data->shutdown = true;
+
+       if (psci_wait_cpu_stopped(cpu_id))
+               printk("FATAL: unable to stop CPU%d\n", cpu_id);
+
+       arch_reset_cpu(cpu_id);
+}
+
+void arch_shutdown(void)
+{
+       unsigned int cpu;
+       struct cell *cell = root_cell.next;
+
+       /* Re-route each SPI to CPU0 */
+       for (; cell != NULL; cell = cell->next)
+               irqchip_cell_exit(cell);
+
+       /*
+        * Let the exit handler call reset_self to let the core finish its
+        * shutdown function and release its lock.
+        */
+       for_each_cpu(cpu, root_cell.cpu_set)
+               per_cpu(cpu)->shutdown = true;
+}
index f1842ffb65631459c796a3541f9be736b6776c8e..7bff77fe53f3e9d3a647a0ba98fd2c2cf1c9f62a 100644 (file)
@@ -36,8 +36,10 @@ void arch_handle_trap(struct per_cpu *cpu_data, struct registers *guest_regs);
 struct registers* arch_handle_exit(struct per_cpu *cpu_data,
                                   struct registers *regs);
 void arch_reset_self(struct per_cpu *cpu_data);
+void arch_shutdown_self(struct per_cpu *cpu_data);
 
 void __attribute__((noreturn)) vmreturn(struct registers *guest_regs);
+void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data);
 
 #endif /* !__ASSEMBLY__ */
 
index 1a6ae7c09be44a6e0003b3573f82520a4032711c..81fa18015d072256aa886cf42c3862e3d9c186b8 100644 (file)
@@ -62,6 +62,7 @@ struct per_cpu {
 
        bool flush_vcpu_caches;
        int shutdown_state;
+       bool shutdown;
        bool failed;
 } __attribute__((aligned(PAGE_SIZE)));
 
index c35be330dba8cd4ba163057c5065531a77c25de6..e9e0dc491d6649e40d1ae4b4d3566c80ff123194 100644 (file)
@@ -155,6 +155,33 @@ setup_mmu_el2(struct per_cpu *cpu_data, phys2virt_t phys2virt, u64 ttbr)
        asm volatile("b .\n");
 }
 
+/*
+ * Shutdown the MMU and returns to EL1 with the kernel context stored in `regs'
+ */
+static void __attribute__((naked)) __attribute__((section(".trampoline")))
+shutdown_el2(struct registers *regs, unsigned long vectors)
+{
+       u32 sctlr_el2;
+
+       /* Disable stage-1 translation, caches must be cleaned. */
+       arm_read_sysreg(SCTLR_EL2, sctlr_el2);
+       sctlr_el2 &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT);
+       arm_write_sysreg(SCTLR_EL2, sctlr_el2);
+       isb();
+
+       /* Clean the MMU registers */
+       arm_write_sysreg(HMAIR0, 0);
+       arm_write_sysreg(HMAIR1, 0);
+       arm_write_sysreg(TTBR0_EL2, 0);
+       arm_write_sysreg(TCR_EL2, 0);
+       isb();
+
+       /* Reset the vectors as late as possible */
+       arm_write_sysreg(HVBAR, vectors);
+
+       vmreturn(regs);
+}
+
 static void check_mmu_map(unsigned long virt_addr, unsigned long phys_addr)
 {
        unsigned long phys_base;
@@ -253,6 +280,47 @@ int switch_exception_level(struct per_cpu *cpu_data)
        return 0;
 }
 
+void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data)
+{
+       static DEFINE_SPINLOCK(map_lock);
+
+       virt2phys_t virt2phys = paging_hvirt2phys;
+       void *stack_virt = cpu_data->stack;
+       unsigned long stack_phys = virt2phys((void *)stack_virt);
+       unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
+       struct registers *regs_phys =
+                       (struct registers *)virt2phys(guest_regs(cpu_data));
+
+       /* Jump to the identity-mapped trampoline page before shutting down */
+       void (*shutdown_fun_phys)(struct registers*, unsigned long);
+       shutdown_fun_phys = (void*)virt2phys(shutdown_el2);
+
+       /*
+        * No need to check for size or overlapping here, it has already be
+        * done, and the paging structures will soon be deleted. However, the
+        * cells' CPUs may execute this concurrently.
+        */
+       spin_lock(&map_lock);
+       paging_create(&hv_paging_structs, stack_phys, PAGE_SIZE, stack_phys,
+                     PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
+       paging_create(&hv_paging_structs, trampoline_phys, PAGE_SIZE,
+                     trampoline_phys, PAGE_DEFAULT_FLAGS,
+                     PAGING_NON_COHERENT);
+       spin_unlock(&map_lock);
+
+       arch_cpu_dcaches_flush(CACHES_CLEAN);
+
+       /*
+        * Final shutdown:
+        * - disable the MMU whilst inside the trampoline page
+        * - reset the vectors
+        * - return to EL1
+        */
+       shutdown_fun_phys(regs_phys, saved_vectors);
+
+       __builtin_unreachable();
+}
+
 int arch_map_device(void *paddr, void *vaddr, unsigned long size)
 {
        return paging_create(&hv_paging_structs, (unsigned long)paddr, size,
index 279b7037adf44a961f1eba97396b918e50cf3186..83fdac827ae06e038a346ad191ee77fc4b9004af 100644 (file)
@@ -124,10 +124,26 @@ void arch_cpu_activate_vmm(struct per_cpu *cpu_data)
        while (1);
 }
 
-void arch_cpu_restore(struct per_cpu *cpu_data)
+void arch_shutdown_self(struct per_cpu *cpu_data)
 {
+       irqchip_cpu_shutdown(cpu_data);
+
+       /* Free the guest */
+       arm_write_sysreg(HCR, 0);
+       arm_write_sysreg(TPIDR_EL2, 0);
+       arm_write_sysreg(VTCR_EL2, 0);
+
+       /* Remove stage-2 mappings */
+       arch_cpu_tlb_flush(cpu_data);
+
+       /* TLB flush needs the cell's VMID */
+       isb();
+       arm_write_sysreg(VTTBR_EL2, 0);
+
+       /* Return to EL1 */
+       arch_shutdown_mmu(cpu_data);
 }
 
-// catch missing symbols
-void arch_shutdown_cpu(unsigned int cpu_id) {}
-void arch_shutdown(void) {}
+void arch_cpu_restore(struct per_cpu *cpu_data)
+{
+}