* the COPYING file in the top-level directory.
*/
+#include <jailhouse/cell.h>
#include <jailhouse/control.h>
#include <jailhouse/mmio.h>
-#include <asm/cell.h>
+#include <jailhouse/printk.h>
#include <asm/control.h>
#include <asm/gic_common.h>
#include <asm/irqchip.h>
static DEFINE_SPINLOCK(dist_lock);
/* The GIC interface numbering does not necessarily match the logical map */
-u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+static u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
/*
* Most of the GIC distributor writes only reconfigure the IRQs corresponding to
* the bits of the written value, by using separate `set' and `clear' registers.
* Such registers can be handled by setting the `is_poke' boolean, which allows
- * to simply restrict the access->val with the cell configuration mask.
+ * to simply restrict the mmio->value with the cell configuration mask.
* Others, such as the priority registers, will need to be read and written back
* with a restricted value, by using the distributor lock.
*/
-static int restrict_bitmask_access(struct per_cpu *cpu_data,
- struct mmio_access *access,
- unsigned int reg_index,
- unsigned int bits_per_irq,
- bool is_poke)
+static enum mmio_result
+restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
+ unsigned int bits_per_irq, bool is_poke)
{
- unsigned int spi;
+ struct cell *cell = this_cell();
+ unsigned int irq;
unsigned long access_mask = 0;
/*
* In order to avoid division, the number of bits per irq is limited
* to powers of 2 for the moment.
*/
unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
- unsigned long spi_bits = (1 << bits_per_irq) - 1;
+ unsigned long irq_bits = (1 << bits_per_irq) - 1;
/* First, extract the first interrupt affected by this access */
unsigned int first_irq = reg_index * irqs_per_reg;
- /* For SGIs or PPIs, let the caller do the mmio access */
- if (!is_spi(first_irq))
- return TRAP_UNHANDLED;
+ for (irq = first_irq; irq < first_irq + irqs_per_reg; irq++) {
+ unsigned int bit_nr = (irq - first_irq) * bits_per_irq;
- /* For SPIs, compare against the cell config mask */
- first_irq -= 32;
- for (spi = first_irq; spi < first_irq + irqs_per_reg; spi++) {
- unsigned int bit_nr = (spi - first_irq) * bits_per_irq;
- if (spi_in_cell(cpu_data->cell, spi))
- access_mask |= spi_bits << bit_nr;
+ if ((is_spi(irq) && spi_in_cell(cell, irq - 32)) ||
+ irq == SGI_INJECT || irq == SGI_CPU_OFF ||
+ irq == MAINTENANCE_IRQ)
+ access_mask |= irq_bits << bit_nr;
}
- if (!access->is_write) {
+ if (!mmio->is_write) {
/* Restrict the read value */
- arch_mmio_access(access);
- access->val &= access_mask;
- return TRAP_HANDLED;
+ mmio_perform_access(gicd_base, mmio);
+ mmio->value &= access_mask;
+ return MMIO_HANDLED;
}
if (!is_poke) {
/*
* Modify the existing value of this register by first reading
- * it into access->val
+ * it into mmio->value
* Relies on a spinlock since we need two mmio accesses.
*/
- unsigned long access_val = access->val;
+ unsigned long access_val = mmio->value;
spin_lock(&dist_lock);
- access->is_write = false;
- arch_mmio_access(access);
- access->is_write = true;
+ mmio->is_write = false;
+ mmio_perform_access(gicd_base, mmio);
+ mmio->is_write = true;
/* Clear 0 bits */
- access->val &= ~(access_mask & ~access_val);
- access->val |= access_val;
- arch_mmio_access(access);
+ mmio->value &= ~(access_mask & ~access_val);
+ mmio->value |= access_val;
+ mmio_perform_access(gicd_base, mmio);
spin_unlock(&dist_lock);
-
- return TRAP_HANDLED;
} else {
- access->val &= access_mask;
- /* Do the access */
- return TRAP_UNHANDLED;
+ mmio->value &= access_mask;
+ mmio_perform_access(gicd_base, mmio);
}
+ return MMIO_HANDLED;
}
/*
* GICv3 uses a 64bit register IROUTER for each IRQ
*/
-static int handle_irq_route(struct per_cpu *cpu_data,
- struct mmio_access *access, unsigned int irq)
+static enum mmio_result handle_irq_route(struct mmio_access *mmio,
+ unsigned int irq)
{
- struct cell *cell = cpu_data->cell;
+ struct cell *cell = this_cell();
unsigned int cpu;
/* Ignore aff3 on AArch32 (return 0) */
- if (access->size == 4 && (access->addr % 8))
- return TRAP_HANDLED;
+ if (mmio->size == 4 && (mmio->address % 8))
+ return MMIO_HANDLED;
/* SGIs and PPIs are res0 */
if (!is_spi(irq))
- return TRAP_HANDLED;
+ return MMIO_HANDLED;
/*
* Ignore accesses to SPIs that do not belong to the cell. This isn't
* registers at initialisation
*/
if (!spi_in_cell(cell, irq - 32))
- return TRAP_HANDLED;
+ return MMIO_HANDLED;
/* Translate the virtual cpu id into the physical one */
- if (access->is_write) {
- access->val = arm_cpu_virt2phys(cell, access->val);
- if (access->val == -1) {
+ if (mmio->is_write) {
+ mmio->value = arm_cpu_virt2phys(cell, mmio->value);
+ if (mmio->value == -1) {
printk("Attempt to route IRQ%d outside of cell\n", irq);
- return TRAP_FORBIDDEN;
+ return MMIO_ERROR;
}
- /* And do the access */
- return TRAP_UNHANDLED;
+ mmio_perform_access(gicd_base, mmio);
} else {
cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
- access->val = arm_cpu_phys2virt(cpu);
- return TRAP_HANDLED;
+ mmio->value = arm_cpu_phys2virt(cpu);
}
+ return MMIO_HANDLED;
}
/*
* GICv2 uses 8bit values for each IRQ in the ITARGETRs registers
*/
-static int handle_irq_target(struct per_cpu *cpu_data,
- struct mmio_access *access, unsigned int reg)
+static enum mmio_result handle_irq_target(struct mmio_access *mmio,
+ unsigned int reg)
{
/*
* ITARGETSR contain one byte per IRQ, so the first one affected by this
* access corresponds to the reg index
*/
+ struct cell *cell = this_cell();
unsigned int i, cpu;
unsigned int spi = reg - 32;
unsigned int offset;
* Let the guest freely access its SGIs and PPIs, which may be used to
* fill its CPU interface map.
*/
- if (!is_spi(reg))
- return TRAP_UNHANDLED;
+ if (!is_spi(reg)) {
+ mmio_perform_access(gicd_base, mmio);
+ return MMIO_HANDLED;
+ }
/*
- * The registers are byte-accessible, extend the access to a word if
- * necessary.
+ * The registers are byte-accessible, but we always do word accesses.
*/
offset = spi % 4;
- access->val <<= 8 * offset;
- access->size = 4;
+ mmio->address &= ~0x3;
+ mmio->value <<= 8 * offset;
+ mmio->size = 4;
spi -= offset;
for (i = 0; i < 4; i++, spi++) {
- if (spi_in_cell(cpu_data->cell, spi))
+ if (spi_in_cell(cell, spi))
access_mask |= 0xff << (8 * i);
else
continue;
- if (!access->is_write)
+ if (!mmio->is_write)
continue;
- targets = (access->val >> (8 * i)) & 0xff;
+ targets = (mmio->value >> (8 * i)) & 0xff;
/* Check that the targeted interface belongs to the cell */
for (cpu = 0; cpu < 8; cpu++) {
if (!(targets & target_cpu_map[cpu]))
continue;
- if (per_cpu(cpu)->cell == cpu_data->cell)
+ if (per_cpu(cpu)->cell == cell)
continue;
printk("Attempt to route SPI%d outside of cell\n", spi);
- return TRAP_FORBIDDEN;
+ return MMIO_ERROR;
}
}
- if (access->is_write) {
+ if (mmio->is_write) {
spin_lock(&dist_lock);
u32 itargetsr =
mmio_read32(gicd_base + GICD_ITARGETSR + reg + offset);
- access->val &= access_mask;
+ mmio->value &= access_mask;
/* Combine with external SPIs */
- access->val |= (itargetsr & ~access_mask);
+ mmio->value |= (itargetsr & ~access_mask);
/* And do the access */
- arch_mmio_access(access);
+ mmio_perform_access(gicd_base, mmio);
spin_unlock(&dist_lock);
} else {
- arch_mmio_access(access);
- access->val &= access_mask;
+ mmio_perform_access(gicd_base, mmio);
+ mmio->value &= access_mask;
}
- return TRAP_HANDLED;
+ return MMIO_HANDLED;
}
-static int handle_sgir_access(struct per_cpu *cpu_data,
- struct mmio_access *access)
+static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
{
struct sgi sgi;
- unsigned long val = access->val;
+ unsigned long val = mmio->value;
- if (!access->is_write)
- return TRAP_HANDLED;
+ if (!mmio->is_write)
+ return MMIO_HANDLED;
sgi.targets = (val >> 16) & 0xff;
sgi.routing_mode = (val >> 24) & 0x3;
sgi.aff3 = 0;
sgi.id = val & 0xf;
- return gic_handle_sgir_write(cpu_data, &sgi, false);
+ gic_handle_sgir_write(&sgi, false);
+ return MMIO_HANDLED;
}
/*
return 0;
}
-int gic_handle_sgir_write(struct per_cpu *cpu_data, struct sgi *sgi,
- bool virt_input)
+void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
{
+ struct per_cpu *cpu_data = this_cpu_data();
unsigned int cpu;
unsigned long targets;
unsigned int this_cpu = cpu_data->cpu_id;
if (sgi->routing_mode == 0 && !is_target)
continue;
- irqchip_set_pending(per_cpu(cpu), sgi->id, false);
+ irqchip_set_pending(per_cpu(cpu), sgi->id);
sgi->targets |= (1 << cpu);
}
/* Let the other CPUS inject their SGIs */
sgi->id = SGI_INJECT;
irqchip_send_sgi(sgi);
-
- return TRAP_HANDLED;
}
-int gic_handle_dist_access(struct per_cpu *cpu_data,
- struct mmio_access *access)
+enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio)
{
- int ret;
- unsigned long reg = access->addr - (unsigned long)gicd_base;
+ unsigned long reg = mmio->address;
+ enum mmio_result ret;
switch (reg) {
case REG_RANGE(GICD_IROUTER, 1024, 8):
- ret = handle_irq_route(cpu_data, access,
- (reg - GICD_IROUTER) / 8);
+ ret = handle_irq_route(mmio, (reg - GICD_IROUTER) / 8);
break;
case REG_RANGE(GICD_ITARGETSR, 1024, 1):
- ret = handle_irq_target(cpu_data, access, reg - GICD_ITARGETSR);
+ ret = handle_irq_target(mmio, reg - GICD_ITARGETSR);
break;
case REG_RANGE(GICD_ICENABLER, 32, 4):
case REG_RANGE(GICD_ISPENDR, 32, 4):
case REG_RANGE(GICD_ICACTIVER, 32, 4):
case REG_RANGE(GICD_ISACTIVER, 32, 4):
- ret = restrict_bitmask_access(cpu_data, access,
- (reg & 0x7f) / 4, 1, true);
+ ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, true);
break;
case REG_RANGE(GICD_IGROUPR, 32, 4):
- ret = restrict_bitmask_access(cpu_data, access,
- (reg & 0x7f) / 4, 1, false);
+ ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, false);
break;
case REG_RANGE(GICD_ICFGR, 64, 4):
- ret = restrict_bitmask_access(cpu_data, access,
- (reg & 0xff) / 4, 2, false);
+ ret = restrict_bitmask_access(mmio, (reg & 0xff) / 4, 2, false);
break;
case REG_RANGE(GICD_IPRIORITYR, 255, 4):
- ret = restrict_bitmask_access(cpu_data, access,
- (reg & 0x3ff) / 4, 8, false);
+ ret = restrict_bitmask_access(mmio, (reg & 0x3ff) / 4, 8,
+ false);
break;
case GICD_SGIR:
- ret = handle_sgir_access(cpu_data, access);
+ ret = handle_sgir_access(mmio);
break;
case GICD_CTLR:
case REG_RANGE(GICD_PIDR4, 4, 4):
case REG_RANGE(GICD_CIDR0, 4, 4):
/* Allow read access, ignore write */
- ret = (access->is_write ? TRAP_HANDLED : TRAP_UNHANDLED);
- break;
-
+ if (!mmio->is_write)
+ mmio_perform_access(gicd_base, mmio);
+ /* fall through */
default:
/* Ignore access. */
- ret = TRAP_HANDLED;
- }
-
- /* The sub-handlers return TRAP_UNHANDLED to allow the access */
- if (ret == TRAP_UNHANDLED) {
- arch_mmio_access(access);
- ret = TRAP_HANDLED;
+ ret = MMIO_HANDLED;
}
return ret;