2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/processor.h>
17 #include <jailhouse/types.h>
18 #include <asm/control.h>
19 #include <asm/gic_common.h>
20 #include <asm/irqchip.h>
21 #include <asm/platform.h>
22 #include <asm/setup.h>
23 #include <asm/traps.h>
26 * This implementation assumes that the kernel driver already initialised most
28 * There is almost no instruction barrier, since IRQs are always disabled in the
29 * hyp, and ERET serves as the context synchronization event.
32 static unsigned int gic_num_lr;
33 static unsigned int gic_num_priority_bits;
34 static u32 gic_version;
36 extern void *gicd_base;
37 extern unsigned int gicd_size;
38 static void *gicr_base;
39 static unsigned int gicr_size;
41 static int gic_init(void)
43 /* TODO: need to validate more? */
44 if (!(mmio_read32(gicd_base + GICD_CTLR) & GICD_CTLR_ARE_NS))
45 return trace_error(-EIO);
47 /* FIXME: parse a dt */
48 gicr_base = GICR_BASE;
49 gicr_size = GICR_SIZE;
51 /* Let the per-cpu code access the redistributors */
52 return arch_map_device(gicr_base, gicr_base, gicr_size);
55 static void gic_clear_pending_irqs(void)
59 /* Clear list registers. */
60 for (n = 0; n < gic_num_lr; n++)
63 /* Clear active priority bits */
64 if (gic_num_priority_bits >= 5)
65 arm_write_sysreg(ICH_AP1R0_EL2, 0);
66 if (gic_num_priority_bits >= 6)
67 arm_write_sysreg(ICH_AP1R1_EL2, 0);
68 if (gic_num_priority_bits > 6) {
69 arm_write_sysreg(ICH_AP1R2_EL2, 0);
70 arm_write_sysreg(ICH_AP1R3_EL2, 0);
74 static int gic_cpu_reset(struct per_cpu *cpu_data, bool is_shutdown)
77 void *gicr = cpu_data->gicr_base;
79 bool root_shutdown = is_shutdown && (cpu_data->cell == &root_cell);
85 gic_clear_pending_irqs();
87 gicr += GICR_SGI_BASE;
88 active = mmio_read32(gicr + GICR_ICACTIVER);
89 /* Deactivate all active PPIs */
90 for (i = 16; i < 32; i++) {
91 if (test_bit(i, &active))
92 arm_write_sysreg(ICC_DIR_EL1, i);
95 /* Ensure all IPIs and the maintenance PPI are enabled. */
96 mmio_write32(gicr + GICR_ISENABLER,
97 0x0000ffff | (1 << MAINTENANCE_IRQ));
100 * Disable PPIs, except for the maintenance interrupt.
101 * On shutdown, the root cell expects to find all its PPIs still
102 * enabled - except for the maintenance interrupt we used.
104 mmio_write32(gicr + GICR_ICENABLER,
105 root_shutdown ? 1 << MAINTENANCE_IRQ :
106 0xffff0000 & ~(1 << MAINTENANCE_IRQ));
109 /* Restore the root config */
110 arm_read_sysreg(ICH_VMCR_EL2, ich_vmcr);
112 if (!(ich_vmcr & ICH_VMCR_VEOIM)) {
114 arm_read_sysreg(ICC_CTLR_EL1, icc_ctlr);
115 icc_ctlr &= ~ICC_CTLR_EOImode;
116 arm_write_sysreg(ICC_CTLR_EL1, icc_ctlr);
119 arm_write_sysreg(ICH_HCR_EL2, 0);
122 arm_write_sysreg(ICH_VMCR_EL2, 0);
127 static int gic_cpu_init(struct per_cpu *cpu_data)
131 u32 cell_icc_ctlr, cell_icc_pmr, cell_icc_igrpen1;
134 void *redist_base = gicr_base;
136 /* Find redistributor */
138 pidr = mmio_read32(redist_base + GICR_PIDR2);
139 gic_version = GICR_PIDR2_ARCH(pidr);
140 if (gic_version != 3 && gic_version != 4)
143 typer = mmio_read64(redist_base + GICR_TYPER);
144 if ((typer >> 32) == cpu_data->cpu_id) {
145 cpu_data->gicr_base = redist_base;
149 redist_base += 0x20000;
150 if (gic_version == 4)
151 redist_base += 0x20000;
152 } while (!(typer & GICR_TYPER_Last));
154 if (cpu_data->gicr_base == 0) {
155 printk("GIC: No redist found for CPU%d\n", cpu_data->cpu_id);
159 /* Ensure all IPIs and the maintenance PPI are enabled. */
160 mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER,
161 0x0000ffff | (1 << MAINTENANCE_IRQ));
165 * This allow to drop the priority of level-triggered interrupts without
166 * deactivating them, and thus ensure that they won't be immediately
167 * re-triggered. (e.g. timer)
168 * They can then be injected into the guest using the LR.HW bit, and
169 * will be deactivated once the guest does an EOI after handling the
172 arm_read_sysreg(ICC_CTLR_EL1, cell_icc_ctlr);
173 arm_write_sysreg(ICC_CTLR_EL1, ICC_CTLR_EOImode);
175 arm_read_sysreg(ICC_PMR_EL1, cell_icc_pmr);
176 arm_write_sysreg(ICC_PMR_EL1, ICC_PMR_DEFAULT);
178 arm_read_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
179 arm_write_sysreg(ICC_IGRPEN1_EL1, ICC_IGRPEN1_EN);
181 arm_read_sysreg(ICH_VTR_EL2, ich_vtr);
182 gic_num_lr = (ich_vtr & 0xf) + 1;
183 gic_num_priority_bits = (ich_vtr >> 29) + 1;
186 * Clear pending virtual IRQs in case anything is left from previous
187 * use. Physically pending IRQs will be forwarded to Linux once we
188 * enable interrupts for the hypervisor.
190 gic_clear_pending_irqs();
192 ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
193 if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
194 ich_vmcr |= ICH_VMCR_VENG1;
195 if (cell_icc_ctlr & ICC_CTLR_EOImode)
196 ich_vmcr |= ICH_VMCR_VEOIM;
197 arm_write_sysreg(ICH_VMCR_EL2, ich_vmcr);
199 /* After this, the cells access the virtual interface of the GIC. */
200 arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
205 static void gic_adjust_irq_target(struct cell *cell, u16 irq_id)
207 void *irouter = gicd_base + GICD_IROUTER + irq_id;
208 u32 route = mmio_read32(irouter);
210 if (!cell_owns_cpu(cell, route))
211 mmio_write32(irouter, first_cpu(cell->cpu_set));
214 static enum mmio_result gic_handle_redist_access(void *arg,
215 struct mmio_access *mmio)
217 struct cell *cell = this_cell();
219 unsigned int virt_id;
220 void *virt_redist = 0;
221 void *phys_redist = 0;
222 unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
223 void *address = (void *)(mmio->address + (unsigned long)gicr_base);
226 * The redistributor accessed by the cell is not the one stored in these
227 * cpu_datas, but the one associated to its virtual id. So we first
228 * need to translate the redistributor address.
230 for_each_cpu(cpu, cell->cpu_set) {
231 virt_id = arm_cpu_phys2virt(cpu);
232 virt_redist = per_cpu(virt_id)->gicr_base;
233 if (address >= virt_redist && address < virt_redist
235 phys_redist = per_cpu(cpu)->gicr_base;
240 if (phys_redist == NULL)
243 mmio->address = address - virt_redist;
245 /* Change the ID register, all other accesses are allowed. */
246 if (!mmio->is_write) {
247 switch (mmio->address) {
249 if (virt_id == cell->arch.last_virt_id)
250 mmio->value = GICR_TYPER_Last;
253 /* AArch64 can use a writeq for this register */
255 mmio->value |= (u64)virt_id << 32;
259 /* Upper bits contain the affinity */
260 mmio->value = virt_id;
264 mmio_perform_access(phys_redist, mmio);
268 static int gic_cell_init(struct cell *cell)
270 mmio_region_register(cell, (unsigned long)gicd_base, gicd_size,
271 gic_handle_dist_access, NULL);
272 mmio_region_register(cell, (unsigned long)gicr_base, gicr_size,
273 gic_handle_redist_access, NULL);
278 static int gic_send_sgi(struct sgi *sgi)
281 u16 targets = sgi->targets;
283 if (!is_sgi(sgi->id))
286 if (sgi->routing_mode == 2)
287 targets = 1 << phys_processor_id();
289 val = (u64)sgi->aff3 << ICC_SGIR_AFF3_SHIFT
290 | (u64)sgi->aff2 << ICC_SGIR_AFF2_SHIFT
291 | sgi->aff1 << ICC_SGIR_AFF1_SHIFT
292 | (targets & ICC_SGIR_TARGET_MASK)
293 | (sgi->id & 0xf) << ICC_SGIR_IRQN_SHIFT;
295 if (sgi->routing_mode == 1)
296 val |= ICC_SGIR_ROUTING_BIT;
299 * Ensure the targets see our modifications to their per-cpu
304 arm_write_sysreg(ICC_SGI1R_EL1, val);
310 void gicv3_handle_sgir_write(u64 sgir)
313 unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
315 /* FIXME: clusters are not supported yet. */
316 sgi.targets = sgir & ICC_SGIR_TARGET_MASK;
317 sgi.routing_mode = routing_mode;
318 sgi.aff1 = sgir >> ICC_SGIR_AFF1_SHIFT & 0xff;
319 sgi.aff2 = sgir >> ICC_SGIR_AFF2_SHIFT & 0xff;
320 sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
321 sgi.id = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
323 gic_handle_sgir_write(&sgi, true);
327 * GICv3 uses a 64bit register IROUTER for each IRQ
329 enum mmio_result gic_handle_irq_route(struct mmio_access *mmio,
332 struct cell *cell = this_cell();
335 /* Ignore aff3 on AArch32 (return 0) */
336 if (mmio->size == 4 && (mmio->address % 8))
339 /* SGIs and PPIs are res0 */
344 * Ignore accesses to SPIs that do not belong to the cell. This isn't
345 * forbidden, because the guest driver may simply iterate over all
346 * registers at initialisation
348 if (!irqchip_irq_in_cell(cell, irq))
351 /* Translate the virtual cpu id into the physical one */
352 if (mmio->is_write) {
353 mmio->value = arm_cpu_virt2phys(cell, mmio->value);
354 if (mmio->value == -1) {
355 printk("Attempt to route IRQ%d outside of cell\n", irq);
358 mmio_perform_access(gicd_base, mmio);
360 cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
361 mmio->value = arm_cpu_phys2virt(cpu);
366 static void gic_eoi_irq(u32 irq_id, bool deactivate)
368 arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
370 arm_write_sysreg(ICC_DIR_EL1, irq_id);
373 static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
380 arm_read_sysreg(ICH_ELSR_EL2, elsr);
381 for (i = 0; i < gic_num_lr; i++) {
382 if ((elsr >> i) & 1) {
383 /* Entry is invalid, candidate for injection */
390 * Entry is in use, check that it doesn't match the one we want
396 * A strict phys->virt id mapping is used for SPIs, so this test
397 * should be sufficient.
399 if ((u32)lr == irq_id)
404 /* All list registers are in use */
408 /* Only group 1 interrupts */
409 lr |= ICH_LR_GROUP_BIT;
410 lr |= ICH_LR_PENDING;
411 if (!is_sgi(irq_id)) {
413 lr |= (u64)irq_id << ICH_LR_PHYS_ID_SHIFT;
416 gic_write_lr(free_lr, lr);
421 static void gicv3_enable_maint_irq(bool enable)
425 arm_read_sysreg(ICH_HCR_EL2, hcr);
430 arm_write_sysreg(ICH_HCR_EL2, hcr);
433 unsigned int irqchip_mmio_count_regions(struct cell *cell)
438 struct irqchip_ops irqchip = {
440 .cpu_init = gic_cpu_init,
441 .cpu_reset = gic_cpu_reset,
442 .cell_init = gic_cell_init,
443 .adjust_irq_target = gic_adjust_irq_target,
444 .send_sgi = gic_send_sgi,
445 .handle_irq = gic_handle_irq,
446 .inject_irq = gic_inject_irq,
447 .enable_maint_irq = gicv3_enable_maint_irq,
448 .eoi_irq = gic_eoi_irq,