]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/gic-v3.c
Merge remote-tracking branch 'kiszka/master'
[jailhouse.git] / hypervisor / arch / arm / gic-v3.c
index aaff5f1baf03800c47b77463ad87e0f0f97b4d39..25e560dea92547c1cf0aee883fe932a844b1e6f5 100644 (file)
@@ -33,35 +33,56 @@ static unsigned int gic_num_lr;
 static unsigned int gic_num_priority_bits;
 static u32 gic_version;
 
+extern void *gicd_base;
+extern unsigned int gicd_size;
 static void *gicr_base;
 static unsigned int gicr_size;
 
 static int gic_init(void)
 {
-       int err;
+       /* TODO: need to validate more? */
+       if (!(mmio_read32(gicd_base + GICD_CTLR) & GICD_CTLR_ARE_NS))
+               return trace_error(-EIO);
 
        /* FIXME: parse a dt */
        gicr_base = GICR_BASE;
        gicr_size = GICR_SIZE;
 
        /* Let the per-cpu code access the redistributors */
-       err = arch_map_device(gicr_base, gicr_base, gicr_size);
+       return arch_map_device(gicr_base, gicr_base, gicr_size);
+}
+
+static void gic_clear_pending_irqs(void)
+{
+       unsigned int n;
 
-       return err;
+       /* Clear list registers. */
+       for (n = 0; n < gic_num_lr; n++)
+               gic_write_lr(n, 0);
+
+       /* Clear active priority bits */
+       if (gic_num_priority_bits >= 5)
+               arm_write_sysreg(ICH_AP1R0_EL2, 0);
+       if (gic_num_priority_bits >= 6)
+               arm_write_sysreg(ICH_AP1R1_EL2, 0);
+       if (gic_num_priority_bits > 6) {
+               arm_write_sysreg(ICH_AP1R2_EL2, 0);
+               arm_write_sysreg(ICH_AP1R3_EL2, 0);
+       }
 }
 
-static int gic_cpu_reset(struct per_cpu *cpu_data)
+static int gic_cpu_reset(struct per_cpu *cpu_data, bool is_shutdown)
 {
        unsigned int i;
        void *gicr = cpu_data->gicr_base;
        unsigned long active;
+       bool root_shutdown = is_shutdown && (cpu_data->cell == &root_cell);
+       u32 ich_vmcr;
 
        if (gicr == 0)
                return -ENODEV;
 
-       /* Clear list registers */
-       for (i = 0; i < gic_num_lr; i++)
-               gic_write_lr(i, 0);
+       gic_clear_pending_irqs();
 
        gicr += GICR_SGI_BASE;
        active = mmio_read32(gicr + GICR_ICACTIVER);
@@ -71,22 +92,34 @@ static int gic_cpu_reset(struct per_cpu *cpu_data)
                        arm_write_sysreg(ICC_DIR_EL1, i);
        }
 
-       /* Disable all PPIs, ensure IPIs are enabled */
-       mmio_write32(gicr + GICR_ICENABLER, 0xffff0000);
-       mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff);
+       /* Ensure all IPIs and the maintenance PPI are enabled. */
+       mmio_write32(gicr + GICR_ISENABLER,
+                    0x0000ffff | (1 << MAINTENANCE_IRQ));
 
-       /* Clear active priority bits */
-       if (gic_num_priority_bits >= 5)
-               arm_write_sysreg(ICH_AP1R0_EL2, 0);
-       if (gic_num_priority_bits >= 6)
-               arm_write_sysreg(ICH_AP1R1_EL2, 0);
-       if (gic_num_priority_bits > 6) {
-               arm_write_sysreg(ICH_AP1R2_EL2, 0);
-               arm_write_sysreg(ICH_AP1R3_EL2, 0);
+       /*
+        * Disable PPIs, except for the maintenance interrupt.
+        * On shutdown, the root cell expects to find all its PPIs still
+        * enabled - except for the maintenance interrupt we used.
+        */
+       mmio_write32(gicr + GICR_ICENABLER,
+                    root_shutdown ? 1 << MAINTENANCE_IRQ :
+                                    0xffff0000 & ~(1 << MAINTENANCE_IRQ));
+
+       if (root_shutdown) {
+               /* Restore the root config */
+               arm_read_sysreg(ICH_VMCR_EL2, ich_vmcr);
+
+               if (!(ich_vmcr & ICH_VMCR_VEOIM)) {
+                       u32 icc_ctlr;
+                       arm_read_sysreg(ICC_CTLR_EL1, icc_ctlr);
+                       icc_ctlr &= ~ICC_CTLR_EOImode;
+                       arm_write_sysreg(ICC_CTLR_EL1, icc_ctlr);
+               }
+
+               arm_write_sysreg(ICH_HCR_EL2, 0);
        }
 
        arm_write_sysreg(ICH_VMCR_EL2, 0);
-       arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
 
        return 0;
 }
@@ -123,8 +156,9 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
                return -ENODEV;
        }
 
-       /* Ensure all IPIs are enabled */
-       mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
+       /* Ensure all IPIs and the maintenance PPI are enabled. */
+       mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER,
+                    0x0000ffff | (1 << MAINTENANCE_IRQ));
 
        /*
         * Set EOIMode to 1
@@ -148,6 +182,13 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
        gic_num_lr = (ich_vtr & 0xf) + 1;
        gic_num_priority_bits = (ich_vtr >> 29) + 1;
 
+       /*
+        * Clear pending virtual IRQs in case anything is left from previous
+        * use. Physically pending IRQs will be forwarded to Linux once we
+        * enable interrupts for the hypervisor.
+        */
+       gic_clear_pending_irqs();
+
        ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
        if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
                ich_vmcr |= ICH_VMCR_VENG1;
@@ -161,6 +202,79 @@ static int gic_cpu_init(struct per_cpu *cpu_data)
        return 0;
 }
 
+static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
+{
+       void *irouter = gicd_base + GICD_IROUTER + irq_id;
+       u32 route = mmio_read32(irouter);
+
+       if (!cell_owns_cpu(cell, route))
+               mmio_write32(irouter, first_cpu(cell->cpu_set));
+}
+
+static enum mmio_result gic_handle_redist_access(void *arg,
+                                                struct mmio_access *mmio)
+{
+       struct cell *cell = this_cell();
+       unsigned int cpu;
+       unsigned int virt_id;
+       void *virt_redist = 0;
+       void *phys_redist = 0;
+       unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
+       void *address = (void *)(mmio->address + (unsigned long)gicr_base);
+
+       /*
+        * The redistributor accessed by the cell is not the one stored in these
+        * cpu_datas, but the one associated to its virtual id. So we first
+        * need to translate the redistributor address.
+        */
+       for_each_cpu(cpu, cell->cpu_set) {
+               virt_id = arm_cpu_phys2virt(cpu);
+               virt_redist = per_cpu(virt_id)->gicr_base;
+               if (address >= virt_redist && address < virt_redist
+                               + redist_size) {
+                       phys_redist = per_cpu(cpu)->gicr_base;
+                       break;
+               }
+       }
+
+       if (phys_redist == NULL)
+               return MMIO_ERROR;
+
+       mmio->address = address - virt_redist;
+
+       /* Change the ID register, all other accesses are allowed. */
+       if (!mmio->is_write) {
+               switch (mmio->address) {
+               case GICR_TYPER:
+                       if (virt_id == cell->arch.last_virt_id)
+                               mmio->value = GICR_TYPER_Last;
+                       else
+                               mmio->value = 0;
+                       /* AArch64 can use a writeq for this register */
+                       if (mmio->size == 8)
+                               mmio->value |= (u64)virt_id << 32;
+
+                       return MMIO_HANDLED;
+               case GICR_TYPER + 4:
+                       /* Upper bits contain the affinity */
+                       mmio->value = virt_id;
+                       return MMIO_HANDLED;
+               }
+       }
+       mmio_perform_access(phys_redist, mmio);
+       return MMIO_HANDLED;
+}
+
+static int gic_cell_init(struct cell *cell)
+{
+       mmio_region_register(cell, (unsigned long)gicd_base, gicd_size,
+                            gic_handle_dist_access, NULL);
+       mmio_region_register(cell, (unsigned long)gicr_base, gicr_size,
+                            gic_handle_redist_access, NULL);
+
+       return 0;
+}
+
 static int gic_send_sgi(struct sgi *sgi)
 {
        u64 val;
@@ -193,56 +307,60 @@ static int gic_send_sgi(struct sgi *sgi)
        return 0;
 }
 
-int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
+void gicv3_handle_sgir_write(u64 sgir)
 {
        struct sgi sgi;
-       struct cell *cell = cpu_data->cell;
-       unsigned int cpu, virt_id;
-       unsigned long this_cpu = cpu_data->cpu_id;
        unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
-       unsigned long targets = sgir & ICC_SGIR_TARGET_MASK;
-       u32 irq = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
 
        /* FIXME: clusters are not supported yet. */
-       sgi.targets = 0;
+       sgi.targets = sgir & ICC_SGIR_TARGET_MASK;
        sgi.routing_mode = routing_mode;
        sgi.aff1 = sgir >> ICC_SGIR_AFF1_SHIFT & 0xff;
        sgi.aff2 = sgir >> ICC_SGIR_AFF2_SHIFT & 0xff;
        sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
-       sgi.id = SGI_INJECT;
-
-       for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
-               virt_id = arm_cpu_phys2virt(cpu);
-
-               if (routing_mode == 0 && !test_bit(virt_id, &targets))
-                       continue;
-               else if (routing_mode == 1 && cpu == this_cpu)
-                       continue;
-
-               irqchip_set_pending(per_cpu(cpu), irq, false);
-               sgi.targets |= (1 << cpu);
-       }
+       sgi.id = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
 
-       /* Let the other CPUS inject their SGIs */
-       gic_send_sgi(&sgi);
-
-       return TRAP_HANDLED;
+       gic_handle_sgir_write(&sgi, true);
 }
 
 /*
- * Handle the maintenance interrupt, the rest is injected into the cell.
- * Return true when the IRQ has been handled by the hyp.
+ * GICv3 uses a 64bit register IROUTER for each IRQ
  */
-static bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
+enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
+                                     unsigned int irq)
 {
-       if (irqn == MAINTENANCE_IRQ) {
-               irqchip_inject_pending(cpu_data);
-               return true;
-       }
+       struct cell *cell = this_cell();
+       unsigned int cpu;
 
-       irqchip_set_pending(cpu_data, irqn, true);
+       /* Ignore aff3 on AArch32 (return 0) */
+       if (mmio->size == 4 && (mmio->address % 8))
+               return MMIO_HANDLED;
 
-       return false;
+       /* SGIs and PPIs are res0 */
+       if (!is_spi(irq))
+               return MMIO_HANDLED;
+
+       /*
+        * Ignore accesses to SPIs that do not belong to the cell. This isn't
+        * forbidden, because the guest driver may simply iterate over all
+        * registers at initialisation
+        */
+       if (!irqchip_irq_in_cell(cell, irq))
+               return MMIO_HANDLED;
+
+       /* Translate the virtual cpu id into the physical one */
+       if (mmio->is_write) {
+               mmio->value = arm_cpu_virt2phys(cell, mmio->value);
+               if (mmio->value == -1) {
+                       printk("Attempt to route IRQ%d outside of cell\n", irq);
+                       return MMIO_ERROR;
+               }
+               mmio_perform_access(gicd_base, mmio);
+       } else {
+               cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
+               mmio->value = arm_cpu_phys2virt(cpu);
+       }
+       return MMIO_HANDLED;
 }
 
 static void gic_eoi_irq(u32 irq_id, bool deactivate)
@@ -252,37 +370,7 @@ static void gic_eoi_irq(u32 irq_id, bool deactivate)
                arm_write_sysreg(ICC_DIR_EL1, irq_id);
 }
 
-static void gic_handle_irq(struct per_cpu *cpu_data)
-{
-       bool handled = false;
-       u32 irq_id;
-
-       while (1) {
-               /* Read ICC_IAR1: set 'active' state */
-               arm_read_sysreg(ICC_IAR1_EL1, irq_id);
-
-               if (irq_id == 0x3ff) /* Spurious IRQ */
-                       break;
-
-               /* Handle IRQ */
-               if (is_sgi(irq_id)) {
-                       arch_handle_sgi(cpu_data, irq_id);
-                       handled = true;
-               } else {
-                       handled = arch_handle_phys_irq(cpu_data, irq_id);
-               }
-
-               /*
-                * Write ICC_EOIR1: drop priority, but stay active if handled is
-                * false.
-                * This allows to not be re-interrupted by a level-triggered
-                * interrupt that needs handling in the guest (e.g. timer)
-                */
-               gic_eoi_irq(irq_id, handled);
-       }
-}
-
-static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
+static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
 {
        int i;
        int free_lr = -1;
@@ -308,32 +396,21 @@ static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
                 * A strict phys->virt id mapping is used for SPIs, so this test
                 * should be sufficient.
                 */
-               if ((u32)lr == irq->virt_id)
-                       return -EINVAL;
+               if ((u32)lr == irq_id)
+                       return -EEXIST;
        }
 
-       if (free_lr == -1) {
-               u32 hcr;
-               /*
-                * All list registers are in use, trigger a maintenance
-                * interrupt once they are available again.
-                */
-               arm_read_sysreg(ICH_HCR_EL2, hcr);
-               hcr |= ICH_HCR_UIE;
-               arm_write_sysreg(ICH_HCR_EL2, hcr);
-
+       if (free_lr == -1)
+               /* All list registers are in use */
                return -EBUSY;
-       }
 
-       lr = irq->virt_id;
+       lr = irq_id;
        /* Only group 1 interrupts */
        lr |= ICH_LR_GROUP_BIT;
        lr |= ICH_LR_PENDING;
-       if (irq->hw) {
+       if (!is_sgi(irq_id)) {
                lr |= ICH_LR_HW_BIT;
-               lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
-       } else if (irq->type.sgi.maintenance) {
-               lr |= ICH_LR_SGI_EOI;
+               lr |= (u64)irq_id << ICH_LR_PHYS_ID_SHIFT;
        }
 
        gic_write_lr(free_lr, lr);
@@ -341,85 +418,32 @@ static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
        return 0;
 }
 
-static int gic_handle_redist_access(struct per_cpu *cpu_data,
-                                   struct mmio_access *access)
+static void gicv3_enable_maint_irq(bool enable)
 {
-       unsigned int cpu;
-       unsigned int reg;
-       int ret = TRAP_UNHANDLED;
-       unsigned int virt_id;
-       void *virt_redist = 0;
-       void *phys_redist = 0;
-       unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
-       void *address = (void *)access->addr;
-
-       /*
-        * The redistributor accessed by the cell is not the one stored in these
-        * cpu_datas, but the one associated to its virtual id. So we first
-        * need to translate the redistributor address.
-        */
-       for_each_cpu(cpu, cpu_data->cell->cpu_set) {
-               virt_id = arm_cpu_phys2virt(cpu);
-               virt_redist = per_cpu(virt_id)->gicr_base;
-               if (address >= virt_redist && address < virt_redist
-                               + redist_size) {
-                       phys_redist = per_cpu(cpu)->gicr_base;
-                       break;
-               }
-       }
+       u32 hcr;
 
-       if (phys_redist == NULL)
-               return TRAP_FORBIDDEN;
-
-       reg = address - virt_redist;
-       access->addr = (unsigned long)phys_redist + reg;
-
-       /* Change the ID register, all other accesses are allowed. */
-       if (!access->is_write) {
-               switch (reg) {
-               case GICR_TYPER:
-                       if (virt_id == cpu_data->cell->arch.last_virt_id)
-                               access->val = GICR_TYPER_Last;
-                       else
-                               access->val = 0;
-                       /* AArch64 can use a writeq for this register */
-                       if (access->size == 8)
-                               access->val |= (u64)virt_id << 32;
-
-                       ret = TRAP_HANDLED;
-                       break;
-               case GICR_TYPER + 4:
-                       /* Upper bits contain the affinity */
-                       access->val = virt_id;
-                       ret = TRAP_HANDLED;
-                       break;
-               }
-       }
-       if (ret == TRAP_HANDLED)
-               return ret;
-
-       arch_mmio_access(access);
-       return TRAP_HANDLED;
+       arm_read_sysreg(ICH_HCR_EL2, hcr);
+       if (enable)
+               hcr |= ICH_HCR_UIE;
+       else
+               hcr &= ~ICH_HCR_UIE;
+       arm_write_sysreg(ICH_HCR_EL2, hcr);
 }
 
-static int gic_mmio_access(struct per_cpu *cpu_data,
-                          struct mmio_access *access)
+unsigned int irqchip_mmio_count_regions(struct cell *cell)
 {
-       void *address = (void *)access->addr;
-
-       if (address >= gicr_base && address < gicr_base + gicr_size)
-               return gic_handle_redist_access(cpu_data, access);
-
-       return TRAP_UNHANDLED;
+       return 2;
 }
 
-struct irqchip_ops gic_irqchip = {
+struct irqchip_ops irqchip = {
        .init = gic_init,
        .cpu_init = gic_cpu_init,
        .cpu_reset = gic_cpu_reset,
+       .cell_init = gic_cell_init,
+       .adjust_irq_target = gic_adjust_irq_target,
        .send_sgi = gic_send_sgi,
        .handle_irq = gic_handle_irq,
        .inject_irq = gic_inject_irq,
+       .enable_maint_irq = gicv3_enable_maint_irq,
        .eoi_irq = gic_eoi_irq,
-       .mmio_access = gic_mmio_access,
 };