2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/processor.h>
17 #include <jailhouse/types.h>
18 #include <asm/control.h>
19 #include <asm/gic_common.h>
20 #include <asm/irqchip.h>
21 #include <asm/platform.h>
22 #include <asm/setup.h>
23 #include <asm/traps.h>
26 * This implementation assumes that the kernel driver already initialised most
28 * There is almost no instruction barrier, since IRQs are always disabled in the
29 * hyp, and ERET serves as the context synchronization event.
32 static unsigned int gic_num_lr;
33 static unsigned int gic_num_priority_bits;
35 static void *gicr_base;
36 static unsigned int gicr_size;
38 static int gic_init(void)
42 /* FIXME: parse a dt */
43 gicr_base = GICR_BASE;
44 gicr_size = GICR_SIZE;
46 /* Let the per-cpu code access the redistributors */
47 err = arch_map_device(gicr_base, gicr_base, gicr_size);
52 static int gic_cpu_reset(struct per_cpu *cpu_data)
55 void *gicr = cpu_data->gicr_base;
61 /* Clear list registers */
62 for (i = 0; i < gic_num_lr; i++)
65 gicr += GICR_SGI_BASE;
66 active = mmio_read32(gicr + GICR_ICACTIVER);
67 /* Deactivate all active PPIs */
68 for (i = 16; i < 32; i++) {
69 if (test_bit(i, &active))
70 arm_write_sysreg(ICC_DIR_EL1, i);
73 /* Disable all PPIs, ensure IPIs are enabled */
74 mmio_write32(gicr + GICR_ICENABLER, 0xffff0000);
75 mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff);
77 /* Clear active priority bits */
78 if (gic_num_priority_bits >= 5)
79 arm_write_sysreg(ICH_AP1R0_EL2, 0);
80 if (gic_num_priority_bits >= 6)
81 arm_write_sysreg(ICH_AP1R1_EL2, 0);
82 if (gic_num_priority_bits > 6) {
83 arm_write_sysreg(ICH_AP1R2_EL2, 0);
84 arm_write_sysreg(ICH_AP1R3_EL2, 0);
87 arm_write_sysreg(ICH_VMCR_EL2, 0);
88 arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
93 static int gic_cpu_init(struct per_cpu *cpu_data)
98 u32 cell_icc_ctlr, cell_icc_pmr, cell_icc_igrpen1;
101 void *redist_base = gicr_base;
103 /* Find redistributor */
105 pidr = mmio_read32(redist_base + GICR_PIDR2);
106 gic_version = GICR_PIDR2_ARCH(pidr);
107 if (gic_version != 3 && gic_version != 4)
110 typer = mmio_read64(redist_base + GICR_TYPER);
111 if ((typer >> 32) == cpu_data->cpu_id) {
112 cpu_data->gicr_base = redist_base;
116 redist_base += 0x20000;
117 if (gic_version == 4)
118 redist_base += 0x20000;
119 } while (!(typer & GICR_TYPER_Last));
121 if (cpu_data->gicr_base == 0) {
122 printk("GIC: No redist found for CPU%d\n", cpu_data->cpu_id);
126 /* Ensure all IPIs are enabled */
127 mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
131 * This allow to drop the priority of level-triggered interrupts without
132 * deactivating them, and thus ensure that they won't be immediately
133 * re-triggered. (e.g. timer)
134 * They can then be injected into the guest using the LR.HW bit, and
135 * will be deactivated once the guest does an EOI after handling the
138 arm_read_sysreg(ICC_CTLR_EL1, cell_icc_ctlr);
139 arm_write_sysreg(ICC_CTLR_EL1, ICC_CTLR_EOImode);
141 arm_read_sysreg(ICC_PMR_EL1, cell_icc_pmr);
142 arm_write_sysreg(ICC_PMR_EL1, ICC_PMR_DEFAULT);
144 arm_read_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
145 arm_write_sysreg(ICC_IGRPEN1_EL1, ICC_IGRPEN1_EN);
147 arm_read_sysreg(ICH_VTR_EL2, ich_vtr);
148 gic_num_lr = (ich_vtr & 0xf) + 1;
149 gic_num_priority_bits = (ich_vtr >> 29) + 1;
151 ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
152 if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
153 ich_vmcr |= ICH_VMCR_VENG1;
154 if (cell_icc_ctlr & ICC_CTLR_EOImode)
155 ich_vmcr |= ICH_VMCR_VEOIM;
156 arm_write_sysreg(ICH_VMCR_EL2, ich_vmcr);
158 /* After this, the cells access the virtual interface of the GIC. */
159 arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
164 static int gic_send_sgi(struct sgi *sgi)
167 u16 targets = sgi->targets;
169 if (!is_sgi(sgi->id))
172 if (sgi->routing_mode == 2)
173 targets = 1 << phys_processor_id();
175 val = (u64)sgi->aff3 << ICC_SGIR_AFF3_SHIFT
176 | (u64)sgi->aff2 << ICC_SGIR_AFF2_SHIFT
177 | sgi->aff1 << ICC_SGIR_AFF1_SHIFT
178 | (targets & ICC_SGIR_TARGET_MASK)
179 | (sgi->id & 0xf) << ICC_SGIR_IRQN_SHIFT;
181 if (sgi->routing_mode == 1)
182 val |= ICC_SGIR_ROUTING_BIT;
185 * Ensure the targets see our modifications to their per-cpu
190 arm_write_sysreg(ICC_SGI1R_EL1, val);
196 int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
199 struct cell *cell = cpu_data->cell;
201 unsigned long this_cpu = cpu_data->cpu_id;
202 unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
203 unsigned long targets = sgir & ICC_SGIR_TARGET_MASK;
204 u32 irq = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
206 /* FIXME: clusters are not supported yet. */
208 sgi.routing_mode = routing_mode;
209 sgi.aff1 = sgir >> ICC_SGIR_AFF1_SHIFT & 0xff;
210 sgi.aff2 = sgir >> ICC_SGIR_AFF2_SHIFT & 0xff;
211 sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
214 for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
215 if (routing_mode == 0 && !test_bit(cpu, &targets))
217 else if (routing_mode == 1 && cpu == this_cpu)
220 irqchip_set_pending(per_cpu(cpu), irq, false);
221 sgi.targets |= (1 << cpu);
224 /* Let the other CPUS inject their SGIs */
231 * Handle the maintenance interrupt, the rest is injected into the cell.
232 * Return true when the IRQ has been handled by the hyp.
234 static bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
236 if (irqn == MAINTENANCE_IRQ) {
237 irqchip_inject_pending(cpu_data);
241 irqchip_set_pending(cpu_data, irqn, true);
246 static void gic_eoi_irq(u32 irq_id, bool deactivate)
248 arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
250 arm_write_sysreg(ICC_DIR_EL1, irq_id);
253 static void gic_handle_irq(struct per_cpu *cpu_data)
255 bool handled = false;
259 /* Read ICC_IAR1: set 'active' state */
260 arm_read_sysreg(ICC_IAR1_EL1, irq_id);
262 if (irq_id == 0x3ff) /* Spurious IRQ */
266 if (is_sgi(irq_id)) {
267 arch_handle_sgi(cpu_data, irq_id);
270 handled = arch_handle_phys_irq(cpu_data, irq_id);
274 * Write ICC_EOIR1: drop priority, but stay active if handled is
276 * This allows to not be re-interrupted by a level-triggered
277 * interrupt that needs handling in the guest (e.g. timer)
279 gic_eoi_irq(irq_id, handled);
283 static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
290 arm_read_sysreg(ICH_ELSR_EL2, elsr);
291 for (i = 0; i < gic_num_lr; i++) {
292 if ((elsr >> i) & 1) {
293 /* Entry is invalid, candidate for injection */
300 * Entry is in use, check that it doesn't match the one we want
306 * A strict phys->virt id mapping is used for SPIs, so this test
307 * should be sufficient.
309 if ((u32)lr == irq->virt_id)
316 * All list registers are in use, trigger a maintenance
317 * interrupt once they are available again.
319 arm_read_sysreg(ICH_HCR_EL2, hcr);
321 arm_write_sysreg(ICH_HCR_EL2, hcr);
327 /* Only group 1 interrupts */
328 lr |= ICH_LR_GROUP_BIT;
329 lr |= ICH_LR_PENDING;
332 lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
333 } else if (irq->type.sgi.maintenance) {
334 lr |= ICH_LR_SGI_EOI;
337 gic_write_lr(free_lr, lr);
342 struct irqchip_ops gic_irqchip = {
344 .cpu_init = gic_cpu_init,
345 .cpu_reset = gic_cpu_reset,
346 .send_sgi = gic_send_sgi,
347 .handle_irq = gic_handle_irq,
348 .inject_irq = gic_inject_irq,
349 .eoi_irq = gic_eoi_irq,