mmio_write32(gicc_base + GICC_DIR, i);
}
- /* Disable PPIs if necessary */
- if (!root_shutdown)
- mmio_write32(gicd_base + GICD_ICENABLER, 0xffff0000);
- /* Ensure IPIs are enabled */
- mmio_write32(gicd_base + GICD_ISENABLER, 0x0000ffff);
+ /* Ensure all IPIs and the maintenance PPI are enabled */
+ mmio_write32(gicd_base + GICD_ISENABLER,
+ 0x0000ffff | (1 << MAINTENANCE_IRQ));
+
+ /*
+ * Disable PPIs, except for the maintenance interrupt.
+ * On shutdown, the root cell expects to find all its PPIs still
+ * enabled - except for the maintenance interrupt we used.
+ */
+ mmio_write32(gicd_base + GICD_ICENABLER,
+ root_shutdown ? 1 << MAINTENANCE_IRQ :
+ 0xffff0000 & ~(1 << MAINTENANCE_IRQ));
if (is_shutdown)
mmio_write32(gich_base + GICH_HCR, 0);
u32 vtr, vmcr;
u32 cell_gicc_ctlr, cell_gicc_pmr;
- /* Ensure all IPIs are enabled */
- mmio_write32(gicd_base + GICD_ISENABLER, 0x0000ffff);
+ /* Ensure all IPIs and the maintenance PPI are enabled. */
+ mmio_write32(gicd_base + GICD_ISENABLER,
+ 0x0000ffff | (1 << MAINTENANCE_IRQ));
cell_gicc_ctlr = mmio_read32(gicc_base + GICC_CTLR);
cell_gicc_pmr = mmio_read32(gicc_base + GICC_PMR);
{
int err;
- /*
- * target_cpu_map has not been populated by all available CPUs when the
- * setup code initialises the root cell. It is assumed that the kernel
- * already has configured all its SPIs anyway, and that it will redirect
- * them when unplugging a CPU.
- */
- if (cell != &root_cell)
- gic_target_spis(cell, cell);
-
/*
* Let the guest access the virtual CPU interface instead of the
* physical one.
{
paging_destroy(&cell->arch.mm, (unsigned long)gicc_base, gicc_size,
PAGING_NON_COHERENT);
- /* Reset interrupt routing of the cell's spis */
- gic_target_spis(cell, &root_cell);
+}
+
+static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
+{
+ void *itargetsr = gicd_base + GICD_ITARGETSR + (irq_id & ~0x3);
+ u32 targets = mmio_read32(itargetsr);
+ unsigned int shift = irq_id % 4;
+
+ if (gic_targets_in_cell(cell, (u8)(targets >> shift)))
+ return;
+
+ targets &= ~(0xff << shift);
+ targets |= target_cpu_map[first_cpu(cell->cpu_set)] << shift;
+
+ mmio_write32(itargetsr, targets);
}
static int gic_send_sgi(struct sgi *sgi)
return 0;
}
-static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
+static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
{
int i;
int first_free = -1;
/* Check that there is no overlapping */
lr = gic_read_lr(i);
- if ((lr & GICH_LR_VIRT_ID_MASK) == irq->virt_id)
- return -EINVAL;
+ if ((lr & GICH_LR_VIRT_ID_MASK) == irq_id)
+ return -EEXIST;
}
- if (first_free == -1) {
- /* Enable maintenance IRQ */
- u32 hcr;
- hcr = mmio_read32(gich_base + GICH_HCR);
- hcr |= GICH_HCR_UIE;
- mmio_write32(gich_base + GICH_HCR, hcr);
-
+ if (first_free == -1)
return -EBUSY;
- }
/* Inject group 0 interrupt (seen as IRQ by the guest) */
- lr = irq->virt_id;
+ lr = irq_id;
lr |= GICH_LR_PENDING_BIT;
- if (!is_sgi(irq->virt_id)) {
+ if (!is_sgi(irq_id)) {
lr |= GICH_LR_HW_BIT;
- lr |= irq->type.irq << GICH_LR_PHYS_ID_SHIFT;
- } else {
- lr |= irq->type.sgi.cpuid << GICH_LR_CPUID_SHIFT;
+ lr |= (u32)irq_id << GICH_LR_PHYS_ID_SHIFT;
}
gic_write_lr(first_free, lr);
return 0;
}
+static void gic_enable_maint_irq(bool enable)
+{
+ u32 hcr;
+
+ hcr = mmio_read32(gich_base + GICH_HCR);
+ if (enable)
+ hcr |= GICH_HCR_UIE;
+ else
+ hcr &= ~GICH_HCR_UIE;
+ mmio_write32(gich_base + GICH_HCR, hcr);
+}
+
+enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
+ unsigned int irq)
+{
+ /* doesn't exist in v2 - ignore access */
+ return MMIO_HANDLED;
+}
+
unsigned int irqchip_mmio_count_regions(struct cell *cell)
{
return 1;
}
-struct irqchip_ops gic_irqchip = {
+struct irqchip_ops irqchip = {
.init = gic_init,
.cpu_init = gic_cpu_init,
.cpu_reset = gic_cpu_reset,
.cell_init = gic_cell_init,
.cell_exit = gic_cell_exit,
+ .adjust_irq_target = gic_adjust_irq_target,
.send_sgi = gic_send_sgi,
.handle_irq = gic_handle_irq,
.inject_irq = gic_inject_irq,
+ .enable_maint_irq = gic_enable_maint_irq,
.eoi_irq = gic_eoi_irq,
};