void arch_reset_self(struct per_cpu *cpu_data)
{
- int err;
+ int err = 0;
unsigned long reset_address;
struct cell *cell = cpu_data->cell;
struct registers *regs = guest_regs(cpu_data);
+ bool is_shutdown = cpu_data->shutdown;
- err = arch_mmu_cpu_cell_init(cpu_data);
+ if (!is_shutdown)
+ err = arch_mmu_cpu_cell_init(cpu_data);
if (err)
printk("MMU setup failed\n");
/*
*/
irqchip_eoi_irq(SGI_CPU_OFF, true);
- err = irqchip_cpu_reset(cpu_data);
- if (err)
- printk("IRQ setup failed\n");
+ /* irqchip_cpu_shutdown already resets the GIC on all CPUs. */
+ if (!is_shutdown) {
+ err = irqchip_cpu_reset(cpu_data);
+ if (err)
+ printk("IRQ setup failed\n");
+ }
/* Wait for the driver to call cpu_up */
- if (cell == &root_cell)
+ if (cell == &root_cell || is_shutdown)
reset_address = arch_smp_spin(cpu_data, root_cell.arch.smp);
else
reset_address = arch_smp_spin(cpu_data, cell->arch.smp);
arm_write_banked_reg(ELR_hyp, reset_address);
arm_write_banked_reg(SPSR_hyp, RESET_PSR);
+ if (is_shutdown)
+ /* Won't return here. */
+ arch_shutdown_self(cpu_data);
+
vmreturn(regs);
}
panic_stop();
}
+ if (cpu_data->shutdown)
+ /* Won't return here. */
+ arch_shutdown_self(cpu_data);
+
return regs;
}
psci_cpu_off(this_cpu_data());
__builtin_unreachable();
}
+
+/*
+ * This handler is only used for cells, not for the root. The core already
+ * issued a cpu_suspend. arch_reset_cpu will cause arch_reset_self to be
+ * called on that CPU, which will in turn call arch_shutdown_self.
+ */
+void arch_shutdown_cpu(unsigned int cpu_id)
+{
+ struct per_cpu *cpu_data = per_cpu(cpu_id);
+
+ cpu_data->virt_id = cpu_id;
+ cpu_data->shutdown = true;
+
+ if (psci_wait_cpu_stopped(cpu_id))
+ printk("FATAL: unable to stop CPU%d\n", cpu_id);
+
+ arch_reset_cpu(cpu_id);
+}
+
+void arch_shutdown(void)
+{
+ unsigned int cpu;
+ struct cell *cell = root_cell.next;
+
+ /* Re-route each SPI to CPU0 */
+ for (; cell != NULL; cell = cell->next)
+ irqchip_cell_exit(cell);
+
+ /*
+ * Let the exit handler call reset_self to let the core finish its
+ * shutdown function and release its lock.
+ */
+ for_each_cpu(cpu, root_cell.cpu_set)
+ per_cpu(cpu)->shutdown = true;
+}
struct registers* arch_handle_exit(struct per_cpu *cpu_data,
struct registers *regs);
void arch_reset_self(struct per_cpu *cpu_data);
+void arch_shutdown_self(struct per_cpu *cpu_data);
void __attribute__((noreturn)) vmreturn(struct registers *guest_regs);
+void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data);
#endif /* !__ASSEMBLY__ */
bool flush_vcpu_caches;
int shutdown_state;
+ bool shutdown;
bool failed;
} __attribute__((aligned(PAGE_SIZE)));
asm volatile("b .\n");
}
+/*
+ * Shutdown the MMU and returns to EL1 with the kernel context stored in `regs'
+ */
+static void __attribute__((naked)) __attribute__((section(".trampoline")))
+shutdown_el2(struct registers *regs, unsigned long vectors)
+{
+ u32 sctlr_el2;
+
+ /* Disable stage-1 translation, caches must be cleaned. */
+ arm_read_sysreg(SCTLR_EL2, sctlr_el2);
+ sctlr_el2 &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT);
+ arm_write_sysreg(SCTLR_EL2, sctlr_el2);
+ isb();
+
+ /* Clean the MMU registers */
+ arm_write_sysreg(HMAIR0, 0);
+ arm_write_sysreg(HMAIR1, 0);
+ arm_write_sysreg(TTBR0_EL2, 0);
+ arm_write_sysreg(TCR_EL2, 0);
+ isb();
+
+ /* Reset the vectors as late as possible */
+ arm_write_sysreg(HVBAR, vectors);
+
+ vmreturn(regs);
+}
+
static void check_mmu_map(unsigned long virt_addr, unsigned long phys_addr)
{
unsigned long phys_base;
return 0;
}
+void __attribute__((noreturn)) arch_shutdown_mmu(struct per_cpu *cpu_data)
+{
+ static DEFINE_SPINLOCK(map_lock);
+
+ virt2phys_t virt2phys = paging_hvirt2phys;
+ void *stack_virt = cpu_data->stack;
+ unsigned long stack_phys = virt2phys((void *)stack_virt);
+ unsigned long trampoline_phys = virt2phys((void *)&trampoline_start);
+ struct registers *regs_phys =
+ (struct registers *)virt2phys(guest_regs(cpu_data));
+
+ /* Jump to the identity-mapped trampoline page before shutting down */
+ void (*shutdown_fun_phys)(struct registers*, unsigned long);
+ shutdown_fun_phys = (void*)virt2phys(shutdown_el2);
+
+ /*
+ * No need to check for size or overlapping here, it has already be
+ * done, and the paging structures will soon be deleted. However, the
+ * cells' CPUs may execute this concurrently.
+ */
+ spin_lock(&map_lock);
+ paging_create(&hv_paging_structs, stack_phys, PAGE_SIZE, stack_phys,
+ PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
+ paging_create(&hv_paging_structs, trampoline_phys, PAGE_SIZE,
+ trampoline_phys, PAGE_DEFAULT_FLAGS,
+ PAGING_NON_COHERENT);
+ spin_unlock(&map_lock);
+
+ arch_cpu_dcaches_flush(CACHES_CLEAN);
+
+ /*
+ * Final shutdown:
+ * - disable the MMU whilst inside the trampoline page
+ * - reset the vectors
+ * - return to EL1
+ */
+ shutdown_fun_phys(regs_phys, saved_vectors);
+
+ __builtin_unreachable();
+}
+
int arch_map_device(void *paddr, void *vaddr, unsigned long size)
{
return paging_create(&hv_paging_structs, (unsigned long)paddr, size,
while (1);
}
-void arch_cpu_restore(struct per_cpu *cpu_data)
+void arch_shutdown_self(struct per_cpu *cpu_data)
{
+ irqchip_cpu_shutdown(cpu_data);
+
+ /* Free the guest */
+ arm_write_sysreg(HCR, 0);
+ arm_write_sysreg(TPIDR_EL2, 0);
+ arm_write_sysreg(VTCR_EL2, 0);
+
+ /* Remove stage-2 mappings */
+ arch_cpu_tlb_flush(cpu_data);
+
+ /* TLB flush needs the cell's VMID */
+ isb();
+ arm_write_sysreg(VTTBR_EL2, 0);
+
+ /* Return to EL1 */
+ arch_shutdown_mmu(cpu_data);
}
-// catch missing symbols
-void arch_shutdown_cpu(unsigned int cpu_id) {}
-void arch_shutdown(void) {}
+void arch_cpu_restore(struct per_cpu *cpu_data)
+{
+}