2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/processor.h>
17 #include <jailhouse/types.h>
18 #include <asm/control.h>
19 #include <asm/gic_common.h>
20 #include <asm/irqchip.h>
21 #include <asm/platform.h>
22 #include <asm/setup.h>
25 * This implementation assumes that the kernel driver already initialised most
27 * There is almost no instruction barrier, since IRQs are always disabled in the
28 * hyp, and ERET serves as the context synchronization event.
31 static unsigned int gic_num_lr;
33 static void *gicr_base;
34 static unsigned int gicr_size;
36 static int gic_init(void)
40 /* FIXME: parse a dt */
41 gicr_base = GICR_BASE;
42 gicr_size = GICR_SIZE;
44 /* Let the per-cpu code access the redistributors */
45 err = arch_map_device(gicr_base, gicr_base, gicr_size);
50 static int gic_cpu_init(struct per_cpu *cpu_data)
55 u32 cell_icc_ctlr, cell_icc_pmr, cell_icc_igrpen1;
58 void *redist_base = gicr_base;
60 /* Find redistributor */
62 pidr = mmio_read32(redist_base + GICR_PIDR2);
63 gic_version = GICR_PIDR2_ARCH(pidr);
64 if (gic_version != 3 && gic_version != 4)
67 typer = mmio_read64(redist_base + GICR_TYPER);
68 if ((typer >> 32) == cpu_data->cpu_id) {
69 cpu_data->gicr_base = redist_base;
73 redist_base += 0x20000;
75 redist_base += 0x20000;
76 } while (!(typer & GICR_TYPER_Last));
78 if (cpu_data->gicr_base == 0) {
79 printk("GIC: No redist found for CPU%d\n", cpu_data->cpu_id);
83 /* Ensure all IPIs are enabled */
84 mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
88 * This allow to drop the priority of level-triggered interrupts without
89 * deactivating them, and thus ensure that they won't be immediately
90 * re-triggered. (e.g. timer)
91 * They can then be injected into the guest using the LR.HW bit, and
92 * will be deactivated once the guest does an EOI after handling the
95 arm_read_sysreg(ICC_CTLR_EL1, cell_icc_ctlr);
96 arm_write_sysreg(ICC_CTLR_EL1, ICC_CTLR_EOImode);
98 arm_read_sysreg(ICC_PMR_EL1, cell_icc_pmr);
99 arm_write_sysreg(ICC_PMR_EL1, ICC_PMR_DEFAULT);
101 arm_read_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
102 arm_write_sysreg(ICC_IGRPEN1_EL1, ICC_IGRPEN1_EN);
104 arm_read_sysreg(ICH_VTR_EL2, ich_vtr);
105 gic_num_lr = (ich_vtr & 0xf) + 1;
107 ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
108 if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
109 ich_vmcr |= ICH_VMCR_VENG1;
110 if (cell_icc_ctlr & ICC_CTLR_EOImode)
111 ich_vmcr |= ICH_VMCR_VEOIM;
112 arm_write_sysreg(ICH_VMCR_EL2, ich_vmcr);
114 /* After this, the cells access the virtual interface of the GIC. */
115 arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
120 static int gic_send_sgi(struct sgi *sgi)
123 u16 targets = sgi->targets;
125 if (!is_sgi(sgi->id))
128 if (sgi->routing_mode == 2)
129 targets = 1 << phys_processor_id();
131 val = (u64)sgi->aff3 << ICC_SGIR_AFF3_SHIFT
132 | (u64)sgi->aff2 << ICC_SGIR_AFF2_SHIFT
133 | sgi->aff1 << ICC_SGIR_AFF1_SHIFT
134 | (targets & ICC_SGIR_TARGET_MASK)
135 | (sgi->id & 0xf) << ICC_SGIR_IRQN_SHIFT;
137 if (sgi->routing_mode == 1)
138 val |= ICC_SGIR_ROUTING_BIT;
141 * Ensure the targets see our modifications to their per-cpu
146 arm_write_sysreg(ICC_SGI1R_EL1, val);
153 * Handle the maintenance interrupt, the rest is injected into the cell.
154 * Return true when the IRQ has been handled by the hyp.
156 static bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
158 if (irqn == MAINTENANCE_IRQ) {
159 irqchip_inject_pending(cpu_data);
163 irqchip_set_pending(cpu_data, irqn, true);
168 static void gic_handle_irq(struct per_cpu *cpu_data)
170 bool handled = false;
174 /* Read ICC_IAR1: set 'active' state */
175 arm_read_sysreg(ICC_IAR1_EL1, irq_id);
177 if (irq_id == 0x3ff) /* Spurious IRQ */
181 if (is_sgi(irq_id)) {
182 arch_handle_sgi(cpu_data, irq_id);
185 handled = arch_handle_phys_irq(cpu_data, irq_id);
189 * Write ICC_EOIR1: drop priority, but stay active if handled is
191 * This allows to not be re-interrupted by a level-triggered
192 * interrupt that needs handling in the guest (e.g. timer)
194 arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
195 /* Deactivate if necessary */
197 arm_write_sysreg(ICC_DIR_EL1, irq_id);
201 static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
208 arm_read_sysreg(ICH_ELSR_EL2, elsr);
209 for (i = 0; i < gic_num_lr; i++) {
210 if ((elsr >> i) & 1) {
211 /* Entry is invalid, candidate for injection */
218 * Entry is in use, check that it doesn't match the one we want
224 * A strict phys->virt id mapping is used for SPIs, so this test
225 * should be sufficient.
227 if ((u32)lr == irq->virt_id)
234 * All list registers are in use, trigger a maintenance
235 * interrupt once they are available again.
237 arm_read_sysreg(ICH_HCR_EL2, hcr);
239 arm_write_sysreg(ICH_HCR_EL2, hcr);
245 /* Only group 1 interrupts */
246 lr |= ICH_LR_GROUP_BIT;
247 lr |= ICH_LR_PENDING;
250 lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
251 } else if (irq->type.sgi.maintenance) {
252 lr |= ICH_LR_SGI_EOI;
255 gic_write_lr(free_lr, lr);
260 struct irqchip_ops gic_irqchip = {
262 .cpu_init = gic_cpu_init,
263 .send_sgi = gic_send_sgi,
264 .handle_irq = gic_handle_irq,
265 .inject_irq = gic_inject_irq,