]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/gic-v3.c
arm: move the handle_irq_route function to the GICv3 module
[jailhouse.git] / hypervisor / arch / arm / gic-v3.c
index 1c008e34d7f7ef9b9aa97b2596aead9e793bc3a0..e7d5e6612b393fae604267aa2bd1eff6cc698ec4 100644 (file)
@@ -92,14 +92,18 @@ static int gic_cpu_reset(struct per_cpu *cpu_data, bool is_shutdown)
                        arm_write_sysreg(ICC_DIR_EL1, i);
        }
 
+       /* Ensure all IPIs and the maintenance PPI are enabled. */
+       mmio_write32(gicr + GICR_ISENABLER,
+                    0x0000ffff | (1 << MAINTENANCE_IRQ));
+
        /*
-        * Disable all PPIs, ensure IPIs are enabled.
-        * On shutdown, the root cell expects to find all its PPIs still enabled
-        * when returning to the driver.
+        * Disable PPIs, except for the maintenance interrupt.
+        * On shutdown, the root cell expects to find all its PPIs still
+        * enabled - except for the maintenance interrupt we used.
         */
-       if (!root_shutdown)
-               mmio_write32(gicr + GICR_ICENABLER, 0xffff0000);
-       mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff);
+       mmio_write32(gicr + GICR_ICENABLER,
+                    root_shutdown ? 1 << MAINTENANCE_IRQ :
+                                    0xffff0000 & ~(1 << MAINTENANCE_IRQ));
 
        if (root_shutdown) {
                /* Restore the root config */
@@ -152,8 +156,9 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
                return -ENODEV;
        }
 
-       /* Ensure all IPIs are enabled */
-       mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
+       /* Ensure all IPIs and the maintenance PPI are enabled. */
+       mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER,
+                    0x0000ffff | (1 << MAINTENANCE_IRQ));
 
        /*
         * Set EOIMode to 1
@@ -213,9 +218,69 @@ static void gic_route_spis(struct cell *config_cell, struct cell *dest_cell)
        }
 }
 
+static enum mmio_result gic_handle_redist_access(void *arg,
+                                                struct mmio_access *mmio)
+{
+       struct cell *cell = this_cell();
+       unsigned int cpu;
+       unsigned int virt_id;
+       void *virt_redist = 0;
+       void *phys_redist = 0;
+       unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
+       void *address = (void *)(mmio->address + (unsigned long)gicr_base);
+
+       /*
+        * The redistributor accessed by the cell is not the one stored in these
+        * cpu_datas, but the one associated to its virtual id. So we first
+        * need to translate the redistributor address.
+        */
+       for_each_cpu(cpu, cell->cpu_set) {
+               virt_id = arm_cpu_phys2virt(cpu);
+               virt_redist = per_cpu(virt_id)->gicr_base;
+               if (address >= virt_redist && address < virt_redist
+                               + redist_size) {
+                       phys_redist = per_cpu(cpu)->gicr_base;
+                       break;
+               }
+       }
+
+       if (phys_redist == NULL)
+               return MMIO_ERROR;
+
+       mmio->address = address - virt_redist;
+
+       /* Change the ID register, all other accesses are allowed. */
+       if (!mmio->is_write) {
+               switch (mmio->address) {
+               case GICR_TYPER:
+                       if (virt_id == cell->arch.last_virt_id)
+                               mmio->value = GICR_TYPER_Last;
+                       else
+                               mmio->value = 0;
+                       /* AArch64 can use a writeq for this register */
+                       if (mmio->size == 8)
+                               mmio->value |= (u64)virt_id << 32;
+
+                       return MMIO_HANDLED;
+               case GICR_TYPER + 4:
+                       /* Upper bits contain the affinity */
+                       mmio->value = virt_id;
+                       return MMIO_HANDLED;
+               }
+       }
+       mmio_perform_access(phys_redist, mmio);
+       return MMIO_HANDLED;
+}
+
 static int gic_cell_init(struct cell *cell)
 {
        gic_route_spis(cell, cell);
+
+       mmio_region_register(cell, (unsigned long)gicd_base, gicd_size,
+                            gic_handle_dist_access, NULL);
+       mmio_region_register(cell, (unsigned long)gicr_base, gicr_size,
+                            gic_handle_redist_access, NULL);
+
        return 0;
 }
 
@@ -257,7 +322,7 @@ static int gic_send_sgi(struct sgi *sgi)
        return 0;
 }
 
-int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
+void gicv3_handle_sgir_write(u64 sgir)
 {
        struct sgi sgi;
        unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
@@ -270,7 +335,47 @@ int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
        sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
        sgi.id = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
 
-       return gic_handle_sgir_write(cpu_data, &sgi, true);
+       gic_handle_sgir_write(&sgi, true);
+}
+
+/*
+ * GICv3 uses a 64bit register IROUTER for each IRQ
+ */
+enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
+                                     unsigned int irq)
+{
+       struct cell *cell = this_cell();
+       unsigned int cpu;
+
+       /* Ignore aff3 on AArch32 (return 0) */
+       if (mmio->size == 4 && (mmio->address % 8))
+               return MMIO_HANDLED;
+
+       /* SGIs and PPIs are res0 */
+       if (!is_spi(irq))
+               return MMIO_HANDLED;
+
+       /*
+        * Ignore accesses to SPIs that do not belong to the cell. This isn't
+        * forbidden, because the guest driver may simply iterate over all
+        * registers at initialisation
+        */
+       if (!spi_in_cell(cell, irq - 32))
+               return MMIO_HANDLED;
+
+       /* Translate the virtual cpu id into the physical one */
+       if (mmio->is_write) {
+               mmio->value = arm_cpu_virt2phys(cell, mmio->value);
+               if (mmio->value == -1) {
+                       printk("Attempt to route IRQ%d outside of cell\n", irq);
+                       return MMIO_ERROR;
+               }
+               mmio_perform_access(gicd_base, mmio);
+       } else {
+               cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
+               mmio->value = arm_cpu_phys2virt(cpu);
+       }
+       return MMIO_HANDLED;
 }
 
 static void gic_eoi_irq(u32 irq_id, bool deactivate)
@@ -280,7 +385,7 @@ static void gic_eoi_irq(u32 irq_id, bool deactivate)
                arm_write_sysreg(ICC_DIR_EL1, irq_id);
 }
 
-static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
+static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
 {
        int i;
        int free_lr = -1;
@@ -306,32 +411,21 @@ static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
                 * A strict phys->virt id mapping is used for SPIs, so this test
                 * should be sufficient.
                 */
-               if ((u32)lr == irq->virt_id)
-                       return -EINVAL;
+               if ((u32)lr == irq_id)
+                       return -EEXIST;
        }
 
-       if (free_lr == -1) {
-               u32 hcr;
-               /*
-                * All list registers are in use, trigger a maintenance
-                * interrupt once they are available again.
-                */
-               arm_read_sysreg(ICH_HCR_EL2, hcr);
-               hcr |= ICH_HCR_UIE;
-               arm_write_sysreg(ICH_HCR_EL2, hcr);
-
+       if (free_lr == -1)
+               /* All list registers are in use */
                return -EBUSY;
-       }
 
-       lr = irq->virt_id;
+       lr = irq_id;
        /* Only group 1 interrupts */
        lr |= ICH_LR_GROUP_BIT;
        lr |= ICH_LR_PENDING;
-       if (irq->hw) {
+       if (!is_sgi(irq_id)) {
                lr |= ICH_LR_HW_BIT;
-               lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
-       } else if (irq->type.sgi.maintenance) {
-               lr |= ICH_LR_SGI_EOI;
+               lr |= (u64)irq_id << ICH_LR_PHYS_ID_SHIFT;
        }
 
        gic_write_lr(free_lr, lr);
@@ -339,82 +433,24 @@ static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
        return 0;
 }
 
-static int gic_handle_redist_access(struct per_cpu *cpu_data,
-                                   struct mmio_access *mmio)
+static void gicv3_enable_maint_irq(bool enable)
 {
-       unsigned int cpu;
-       unsigned int reg;
-       int ret = TRAP_UNHANDLED;
-       unsigned int virt_id;
-       void *virt_redist = 0;
-       void *phys_redist = 0;
-       unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
-       void *address = (void *)mmio->address;
+       u32 hcr;
 
-       /*
-        * The redistributor accessed by the cell is not the one stored in these
-        * cpu_datas, but the one associated to its virtual id. So we first
-        * need to translate the redistributor address.
-        */
-       for_each_cpu(cpu, cpu_data->cell->cpu_set) {
-               virt_id = arm_cpu_phys2virt(cpu);
-               virt_redist = per_cpu(virt_id)->gicr_base;
-               if (address >= virt_redist && address < virt_redist
-                               + redist_size) {
-                       phys_redist = per_cpu(cpu)->gicr_base;
-                       break;
-               }
-       }
-
-       if (phys_redist == NULL)
-               return TRAP_FORBIDDEN;
-
-       reg = address - virt_redist;
-       mmio->address = (unsigned long)phys_redist + reg;
-
-       /* Change the ID register, all other accesses are allowed. */
-       if (!mmio->is_write) {
-               switch (reg) {
-               case GICR_TYPER:
-                       if (virt_id == cpu_data->cell->arch.last_virt_id)
-                               mmio->value = GICR_TYPER_Last;
-                       else
-                               mmio->value = 0;
-                       /* AArch64 can use a writeq for this register */
-                       if (mmio->size == 8)
-                               mmio->value |= (u64)virt_id << 32;
-
-                       ret = TRAP_HANDLED;
-                       break;
-               case GICR_TYPER + 4:
-                       /* Upper bits contain the affinity */
-                       mmio->value = virt_id;
-                       ret = TRAP_HANDLED;
-                       break;
-               }
-       }
-       if (ret == TRAP_HANDLED)
-               return ret;
-
-       arm_mmio_perform_access(mmio);
-       return TRAP_HANDLED;
+       arm_read_sysreg(ICH_HCR_EL2, hcr);
+       if (enable)
+               hcr |= ICH_HCR_UIE;
+       else
+               hcr &= ~ICH_HCR_UIE;
+       arm_write_sysreg(ICH_HCR_EL2, hcr);
 }
 
-static int gic_mmio_access(struct per_cpu *cpu_data,
-                          struct mmio_access *mmio)
+unsigned int irqchip_mmio_count_regions(struct cell *cell)
 {
-       void *address = (void *)mmio->address;
-
-       if (address >= gicd_base && address < gicd_base + gicd_size)
-               return gic_handle_dist_access(cpu_data, mmio);
-
-       if (address >= gicr_base && address < gicr_base + gicr_size)
-               return gic_handle_redist_access(cpu_data, mmio);
-
-       return TRAP_UNHANDLED;
+       return 2;
 }
 
-struct irqchip_ops gic_irqchip = {
+struct irqchip_ops irqchip = {
        .init = gic_init,
        .cpu_init = gic_cpu_init,
        .cpu_reset = gic_cpu_reset,
@@ -423,6 +459,6 @@ struct irqchip_ops gic_irqchip = {
        .send_sgi = gic_send_sgi,
        .handle_irq = gic_handle_irq,
        .inject_irq = gic_inject_irq,
+       .enable_maint_irq = gicv3_enable_maint_irq,
        .eoi_irq = gic_eoi_irq,
-       .mmio_access = gic_mmio_access,
 };