arm_write_sysreg(TPIDRPRW, 0);
}
-static void arch_reset_self(struct per_cpu *cpu_data)
+void arch_reset_self(struct per_cpu *cpu_data)
{
int err;
unsigned long reset_address;
unsigned long arch_cpu_spin(void);
struct registers* arch_handle_exit(struct per_cpu *cpu_data,
struct registers *regs);
+void arch_reset_self(struct per_cpu *cpu_data);
void __attribute__((noreturn)) vmreturn(struct registers *guest_regs);
/* The mbox will be accessed with a ldrd, which requires alignment */
__attribute__((aligned(8))) struct psci_mbox psci_mbox;
+ struct psci_mbox guest_mbox;
bool flush_vcpu_caches;
int shutdown_state;
#define wfi() asm volatile("wfi\n")
#define sev() asm volatile("sev\n")
+unsigned int smc(unsigned int r0, ...);
+
static inline void cpu_relax(void)
{
asm volatile("" : : : "memory");
#define PSCI_NOT_PRESENT (-7)
#define PSCI_DISABLED (-8)
+#define IS_PSCI_FN(hvc) ((((hvc) >> 24) & 0x84) == 0x84)
+
#define PSCI_INVALID_ADDRESS 0xffffffff
#ifndef __ASSEMBLY__
long psci_resume(unsigned int target);
long psci_try_resume(unsigned int cpu_id);
+long psci_dispatch(struct per_cpu *cpu_data, struct trap_context *ctx);
+
+int psci_cell_init(struct cell *cell);
+unsigned long psci_emulate_spin(struct per_cpu *cpu_data);
+
#endif /* !__ASSEMBLY__ */
#endif /* _JAILHOUSE_ASM_PSCI_H */
*/
#include <asm/control.h>
+#include <asm/percpu.h>
#include <asm/psci.h>
#include <asm/traps.h>
+#include <jailhouse/control.h>
void _psci_cpu_off(struct psci_mbox *);
long _psci_cpu_on(struct psci_mbox *, unsigned long, unsigned long);
return -EBUSY;
}
+
+static long psci_emulate_cpu_on(struct per_cpu *cpu_data,
+ struct trap_context *ctx)
+{
+ unsigned int target = ctx->regs[1];
+ unsigned int cpu;
+ struct psci_mbox *mbox;
+
+ cpu = arm_cpu_virt2phys(cpu_data->cell, target);
+ if (cpu == -1)
+ /* Virtual id not in set */
+ return PSCI_DENIED;
+
+ mbox = &(per_cpu(cpu)->guest_mbox);
+ mbox->entry = ctx->regs[2];
+ mbox->context = ctx->regs[3];
+
+ return psci_resume(cpu);
+}
+
+/* Returns the secondary address set by the guest */
+unsigned long psci_emulate_spin(struct per_cpu *cpu_data)
+{
+ struct psci_mbox *mbox = &(cpu_data->guest_mbox);
+
+ mbox->entry = 0;
+
+ /* Wait for emulate_cpu_on or a trapped mmio to the mbox */
+ while (mbox->entry == 0)
+ psci_suspend(cpu_data);
+
+ return mbox->entry;
+}
+
+int psci_cell_init(struct cell *cell)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cell->cpu_set) {
+ per_cpu(cpu)->guest_mbox.entry = 0;
+ per_cpu(cpu)->guest_mbox.context = 0;
+ }
+
+ return 0;
+}
+
+long psci_dispatch(struct per_cpu *cpu_data, struct trap_context *ctx)
+{
+ u32 function_id = ctx->regs[0];
+
+ switch (function_id) {
+ case PSCI_VERSION:
+ /* Major[31:16], minor[15:0] */
+ return 2;
+
+ case PSCI_CPU_OFF:
+ /*
+ * The reset function will take care of calling
+ * psci_emulate_spin
+ */
+ arch_reset_self(cpu_data);
+
+ /* Not reached */
+ return 0;
+
+ case PSCI_CPU_ON_32:
+ return psci_emulate_cpu_on(cpu_data, ctx);
+
+ default:
+ return PSCI_NOT_SUPPORTED;
+ }
+}
#include <asm/head.h>
#include <asm/psci.h>
+ .arch_extension sec
+ .globl smc
+ /*
+ * Since we trap all SMC instructions, it may be useful to forward them
+ * when it isn't a PSCI call. The shutdown code will also have to issue
+ * a real PSCI_OFF call on secondary CPUs.
+ */
+smc:
+ smc #0
+ bx lr
+
.global _psci_cpu_off
/* r0: struct psci_mbox* */
_psci_cpu_off:
int arch_cpu_init(struct per_cpu *cpu_data)
{
int err = 0;
- unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT;
+ unsigned long hcr = HCR_VM_BIT | HCR_IMO_BIT | HCR_FMO_BIT
+ | HCR_TSC_BIT;
cpu_data->psci_mbox.entry = 0;
cpu_data->virt_id = cpu_data->cpu_id;
#include <asm/control.h>
#include <asm/gic_common.h>
#include <asm/platform.h>
+#include <asm/psci.h>
#include <asm/traps.h>
#include <asm/sysregs.h>
#include <jailhouse/printk.h>
panic_printk("\n");
}
+static int arch_handle_smc(struct per_cpu *cpu_data, struct trap_context *ctx)
+{
+ unsigned long *regs = ctx->regs;
+
+ if (IS_PSCI_FN(regs[0]))
+ regs[0] = psci_dispatch(cpu_data, ctx);
+ else
+ regs[0] = smc(regs[0], regs[1], regs[2], regs[3]);
+
+ arch_skip_instruction(ctx);
+
+ return TRAP_HANDLED;
+}
+
static int arch_handle_hvc(struct per_cpu *cpu_data, struct trap_context *ctx)
{
unsigned long *regs = ctx->regs;
- regs[0] = hypercall(regs[0], regs[1], regs[2]);
+ if (IS_PSCI_FN(regs[0]))
+ regs[0] = psci_dispatch(cpu_data, ctx);
+ else
+ regs[0] = hypercall(regs[0], regs[1], regs[2]);
return TRAP_HANDLED;
}
{
[ESR_EC_CP15_64] = arch_handle_cp15_64,
[ESR_EC_HVC] = arch_handle_hvc,
+ [ESR_EC_SMC] = arch_handle_smc,
[ESR_EC_DABT] = arch_handle_dabt,
};