In GICv3, IPIs are sent by writing the system register `ICC_SGIR'.
This patch moderates those writes by injecting the IPIs into the
appropriate cells, and issues an hypervisor IPI to let the cell's CPUs
fill their list registers.
Since there shouldn't be many cases where Jailhouse needs to emulate
system register accesses, this patch keeps it simple, by calling directly
the GICv3 function from the trap handler, without abstracting it through
irqchip.
However, this change adds an ungraceful ifdef, since the GICv2 and v3
headers are mutually exclusive for the moment.
In GICv2, the SGIR register is 32bit and will be handled directly in the
gic-common.c code, using an MMIO trap of the distributor accesses.
Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn)
{
-
+ switch (irqn) {
+ case SGI_INJECT:
+ irqchip_inject_pending(cpu_data);
+ break;
+ }
}
void arch_handle_exit(struct per_cpu *cpu_data, struct registers *regs)
#include <asm/irqchip.h>
#include <asm/platform.h>
#include <asm/setup.h>
+#include <asm/traps.h>
/*
* This implementation assumes that the kernel driver already initialised most
return 0;
}
+int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
+{
+ struct sgi sgi;
+ struct cell *cell = cpu_data->cell;
+ unsigned int cpu;
+ unsigned long this_cpu = cpu_data->cpu_id;
+ unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
+ unsigned long targets = sgir & ICC_SGIR_TARGET_MASK;
+ u32 irq = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
+
+ /* FIXME: clusters are not supported yet. */
+ sgi.targets = 0;
+ sgi.routing_mode = routing_mode;
+ sgi.aff1 = sgir >> ICC_SGIR_AFF1_SHIFT & 0xff;
+ sgi.aff2 = sgir >> ICC_SGIR_AFF2_SHIFT & 0xff;
+ sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
+ sgi.id = SGI_INJECT;
+
+ for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
+ if (routing_mode == 0 && !test_bit(cpu, &targets))
+ continue;
+ else if (routing_mode == 1 && cpu == this_cpu)
+ continue;
+
+ irqchip_set_pending(per_cpu(cpu), irq, false);
+ sgi.targets |= (1 << cpu);
+ }
+
+ /* Let the other CPUS inject their SGIs */
+ gic_send_sgi(&sgi);
+
+ return TRAP_HANDLED;
+}
+
/*
* Handle the maintenance interrupt, the rest is injected into the cell.
* Return true when the IRQ has been handled by the hyp.
#include <asm/cell.h>
#include <asm/percpu.h>
+#define SGI_INJECT 0
+
#ifndef __ASSEMBLY__
int arch_mmu_cell_init(struct cell *cell);
}
}
+struct per_cpu;
+int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir);
+
#endif /* __ASSEMBLY__ */
#endif /* _JAILHOUSE_ASM_GIC_V3_H */
enum trap_return {
TRAP_HANDLED = 1,
TRAP_UNHANDLED = 0,
+ TRAP_FORBIDDEN = -1,
};
struct trap_context {
*/
#include <asm/control.h>
+#include <asm/gic_common.h>
+#include <asm/platform.h>
#include <asm/traps.h>
#include <asm/sysregs.h>
#include <jailhouse/printk.h>
return TRAP_HANDLED;
}
+static int arch_handle_cp15_64(struct per_cpu *cpu_data, struct trap_context *ctx)
+{
+ unsigned long rt_val, rt2_val;
+ u32 opc1 = ctx->esr >> 16 & 0x7;
+ u32 rt2 = ctx->esr >> 10 & 0xf;
+ u32 rt = ctx->esr >> 5 & 0xf;
+ u32 crm = ctx->esr >> 1 & 0xf;
+ u32 read = ctx->esr & 1;
+
+ if (!read) {
+ access_cell_reg(ctx, rt, &rt_val, true);
+ access_cell_reg(ctx, rt2, &rt2_val, true);
+ }
+
+#ifdef CONFIG_ARM_GIC_V3
+ /* Trapped ICC_SGI1R write */
+ if (!read && opc1 == 0 && crm == 12) {
+ arch_skip_instruction(ctx);
+ return gicv3_handle_sgir_write(cpu_data,
+ (u64)rt2_val << 32 | rt_val);
+ }
+#else
+ /* Avoid `unused' warning... */
+ crm = crm;
+ opc1 = opc1;
+#endif
+
+ return TRAP_UNHANDLED;
+}
+
static const trap_handler trap_handlers[38] =
{
+ [ESR_EC_CP15_64] = arch_handle_cp15_64,
[ESR_EC_HVC] = arch_handle_hvc,
};