*/
#include <asm/control.h>
+#include <asm/gic_common.h>
+#include <asm/platform.h>
+#include <asm/psci.h>
#include <asm/traps.h>
#include <asm/sysregs.h>
#include <jailhouse/printk.h>
ctx->cpsr = cpsr;
}
-static void arch_skip_instruction(struct trap_context *ctx)
+void arch_skip_instruction(struct trap_context *ctx)
{
u32 instruction_length = ESR_IL(ctx->esr);
arch_advance_itstate(ctx);
}
-static void access_cell_reg(struct trap_context *ctx, u8 reg,
- unsigned long *val, bool is_read)
+void access_cell_reg(struct trap_context *ctx, u8 reg, unsigned long *val,
+ bool is_read)
{
unsigned long mode = ctx->cpsr & PSR_MODE_MASK;
}
}
-static int arch_handle_hvc(struct per_cpu *cpu_data, struct trap_context *ctx)
+static void dump_guest_regs(struct trap_context *ctx)
+{
+ u8 reg;
+ unsigned long reg_val;
+
+ panic_printk("pc=0x%08x cpsr=0x%08x esr=0x%08x\n", ctx->pc, ctx->cpsr,
+ ctx->esr);
+ for (reg = 0; reg < 15; reg++) {
+ access_cell_reg(ctx, reg, ®_val, true);
+ panic_printk("r%d=0x%08x ", reg, reg_val);
+ if ((reg + 1) % 4 == 0)
+ panic_printk("\n");
+ }
+ panic_printk("\n");
+}
+
+static int arch_handle_smc(struct trap_context *ctx)
+{
+ unsigned long *regs = ctx->regs;
+
+ if (IS_PSCI_32(regs[0]) || IS_PSCI_UBOOT(regs[0]))
+ regs[0] = psci_dispatch(ctx);
+ else
+ regs[0] = smc(regs[0], regs[1], regs[2], regs[3]);
+
+ arch_skip_instruction(ctx);
+
+ return TRAP_HANDLED;
+}
+
+static int arch_handle_hvc(struct trap_context *ctx)
{
unsigned long *regs = ctx->regs;
- regs[0] = hypercall(regs[0], regs[1], regs[2]);
+ if (IS_PSCI_32(regs[0]) || IS_PSCI_UBOOT(regs[0]))
+ regs[0] = psci_dispatch(ctx);
+ else
+ regs[0] = hypercall(regs[0], regs[1], regs[2]);
return TRAP_HANDLED;
}
+static int arch_handle_cp15_32(struct trap_context *ctx)
+{
+ u32 opc2 = ctx->esr >> 17 & 0x7;
+ u32 opc1 = ctx->esr >> 14 & 0x7;
+ u32 crn = ctx->esr >> 10 & 0xf;
+ u32 rt = ctx->esr >> 5 & 0xf;
+ u32 crm = ctx->esr >> 1 & 0xf;
+ u32 read = ctx->esr & 1;
+
+ if (opc1 == 0 && crn == 1 && crm == 0 && opc2 == 1) {
+ /* Do not let the guest disable coherency by writing ACTLR... */
+ if (read) {
+ unsigned long val;
+ arm_read_sysreg(ACTLR_EL1, val);
+ access_cell_reg(ctx, rt, &val, false);
+ }
+ arch_skip_instruction(ctx);
+
+ return TRAP_HANDLED;
+ }
+
+ return TRAP_UNHANDLED;
+}
+
+static int arch_handle_cp15_64(struct trap_context *ctx)
+{
+ unsigned long rt_val, rt2_val;
+ u32 opc1 = ctx->esr >> 16 & 0x7;
+ u32 rt2 = ctx->esr >> 10 & 0xf;
+ u32 rt = ctx->esr >> 5 & 0xf;
+ u32 crm = ctx->esr >> 1 & 0xf;
+ u32 read = ctx->esr & 1;
+
+ if (!read) {
+ access_cell_reg(ctx, rt, &rt_val, true);
+ access_cell_reg(ctx, rt2, &rt2_val, true);
+ }
+
+#ifdef CONFIG_ARM_GIC_V3
+ /* Trapped ICC_SGI1R write */
+ if (!read && opc1 == 0 && crm == 12) {
+ arch_skip_instruction(ctx);
+ gicv3_handle_sgir_write((u64)rt2_val << 32 | rt_val);
+ return TRAP_HANDLED;
+ }
+#else
+ /* Avoid `unused' warning... */
+ crm = crm;
+ opc1 = opc1;
+#endif
+
+ return TRAP_UNHANDLED;
+}
+
static const trap_handler trap_handlers[38] =
{
+ [ESR_EC_CP15_32] = arch_handle_cp15_32,
+ [ESR_EC_CP15_64] = arch_handle_cp15_64,
[ESR_EC_HVC] = arch_handle_hvc,
+ [ESR_EC_SMC] = arch_handle_smc,
+ [ESR_EC_DABT] = arch_handle_dabt,
};
void arch_handle_trap(struct per_cpu *cpu_data, struct registers *guest_regs)
}
if (trap_handlers[exception_class])
- ret = trap_handlers[exception_class](cpu_data, &ctx);
+ ret = trap_handlers[exception_class](&ctx);
- if (ret != TRAP_HANDLED) {
- panic_printk("CPU%d: Unhandled HYP trap, syndrome 0x%x\n",
- cpu_data->cpu_id, ctx.esr);
- while(1);
+ switch (ret) {
+ case TRAP_UNHANDLED:
+ case TRAP_FORBIDDEN:
+ panic_printk("FATAL: %s (exception class 0x%02x)\n",
+ (ret == TRAP_UNHANDLED ? "unhandled trap" :
+ "forbidden access"),
+ exception_class);
+ dump_guest_regs(&ctx);
+ panic_park();
}
restore_context: