arm_write_sysreg(ICC_DIR_EL1, i);
}
+ /* Ensure all IPIs and the maintenance PPI are enabled. */
+ mmio_write32(gicr + GICR_ISENABLER,
+ 0x0000ffff | (1 << MAINTENANCE_IRQ));
+
/*
- * Disable all PPIs, ensure IPIs are enabled.
- * On shutdown, the root cell expects to find all its PPIs still enabled
- * when returning to the driver.
+ * Disable PPIs, except for the maintenance interrupt.
+ * On shutdown, the root cell expects to find all its PPIs still
+ * enabled - except for the maintenance interrupt we used.
*/
- if (!root_shutdown)
- mmio_write32(gicr + GICR_ICENABLER, 0xffff0000);
- mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff);
+ mmio_write32(gicr + GICR_ICENABLER,
+ root_shutdown ? 1 << MAINTENANCE_IRQ :
+ 0xffff0000 & ~(1 << MAINTENANCE_IRQ));
if (root_shutdown) {
/* Restore the root config */
return -ENODEV;
}
- /* Ensure all IPIs are enabled */
- mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
+ /* Ensure all IPIs and the maintenance PPI are enabled. */
+ mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER,
+ 0x0000ffff | (1 << MAINTENANCE_IRQ));
/*
* Set EOIMode to 1
gic_handle_sgir_write(&sgi, true);
}
+/*
+ * GICv3 uses a 64bit register IROUTER for each IRQ
+ */
+enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
+ unsigned int irq)
+{
+ struct cell *cell = this_cell();
+ unsigned int cpu;
+
+ /* Ignore aff3 on AArch32 (return 0) */
+ if (mmio->size == 4 && (mmio->address % 8))
+ return MMIO_HANDLED;
+
+ /* SGIs and PPIs are res0 */
+ if (!is_spi(irq))
+ return MMIO_HANDLED;
+
+ /*
+ * Ignore accesses to SPIs that do not belong to the cell. This isn't
+ * forbidden, because the guest driver may simply iterate over all
+ * registers at initialisation
+ */
+ if (!spi_in_cell(cell, irq - 32))
+ return MMIO_HANDLED;
+
+ /* Translate the virtual cpu id into the physical one */
+ if (mmio->is_write) {
+ mmio->value = arm_cpu_virt2phys(cell, mmio->value);
+ if (mmio->value == -1) {
+ printk("Attempt to route IRQ%d outside of cell\n", irq);
+ return MMIO_ERROR;
+ }
+ mmio_perform_access(gicd_base, mmio);
+ } else {
+ cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
+ mmio->value = arm_cpu_phys2virt(cpu);
+ }
+ return MMIO_HANDLED;
+}
+
static void gic_eoi_irq(u32 irq_id, bool deactivate)
{
arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
arm_write_sysreg(ICC_DIR_EL1, irq_id);
}
-static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
+static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
{
int i;
int free_lr = -1;
* A strict phys->virt id mapping is used for SPIs, so this test
* should be sufficient.
*/
- if ((u32)lr == irq->virt_id)
- return -EINVAL;
+ if ((u32)lr == irq_id)
+ return -EEXIST;
}
- if (free_lr == -1) {
- u32 hcr;
- /*
- * All list registers are in use, trigger a maintenance
- * interrupt once they are available again.
- */
- arm_read_sysreg(ICH_HCR_EL2, hcr);
- hcr |= ICH_HCR_UIE;
- arm_write_sysreg(ICH_HCR_EL2, hcr);
-
+ if (free_lr == -1)
+ /* All list registers are in use */
return -EBUSY;
- }
- lr = irq->virt_id;
+ lr = irq_id;
/* Only group 1 interrupts */
lr |= ICH_LR_GROUP_BIT;
lr |= ICH_LR_PENDING;
- if (irq->hw) {
+ if (!is_sgi(irq_id)) {
lr |= ICH_LR_HW_BIT;
- lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
+ lr |= (u64)irq_id << ICH_LR_PHYS_ID_SHIFT;
}
gic_write_lr(free_lr, lr);
return 0;
}
+static void gicv3_enable_maint_irq(bool enable)
+{
+ u32 hcr;
+
+ arm_read_sysreg(ICH_HCR_EL2, hcr);
+ if (enable)
+ hcr |= ICH_HCR_UIE;
+ else
+ hcr &= ~ICH_HCR_UIE;
+ arm_write_sysreg(ICH_HCR_EL2, hcr);
+}
+
unsigned int irqchip_mmio_count_regions(struct cell *cell)
{
return 2;
}
-struct irqchip_ops gic_irqchip = {
+struct irqchip_ops irqchip = {
.init = gic_init,
.cpu_init = gic_cpu_init,
.cpu_reset = gic_cpu_reset,
.send_sgi = gic_send_sgi,
.handle_irq = gic_handle_irq,
.inject_irq = gic_inject_irq,
+ .enable_maint_irq = gicv3_enable_maint_irq,
.eoi_irq = gic_eoi_irq,
};