2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <asm/gic_common.h>
16 #include <asm/irqchip.h>
17 #include <asm/platform.h>
18 #include <asm/setup.h>
20 static unsigned int gic_num_lr;
22 extern void *gicd_base;
23 extern unsigned int gicd_size;
25 unsigned int gicc_size;
28 unsigned int gich_size;
30 static int gic_init(void)
34 /* FIXME: parse device tree */
35 gicc_base = GICC_BASE;
36 gicc_size = GICC_SIZE;
37 gich_base = GICH_BASE;
38 gich_size = GICH_SIZE;
39 gicv_base = GICV_BASE;
41 err = arch_map_device(gicc_base, gicc_base, gicc_size);
45 err = arch_map_device(gich_base, gich_base, gich_size);
50 static void gic_clear_pending_irqs(void)
54 /* Clear list registers. */
55 for (n = 0; n < gic_num_lr; n++)
58 /* Clear active priority bits. */
59 mmio_write32(gich_base + GICH_APR, 0);
62 static int gic_cpu_reset(struct per_cpu *cpu_data, bool is_shutdown)
65 bool root_shutdown = is_shutdown && (cpu_data->cell == &root_cell);
68 u32 gicc_ctlr, gicc_pmr;
70 gic_clear_pending_irqs();
72 /* Deactivate all PPIs */
73 active = mmio_read32(gicd_base + GICD_ISACTIVER);
74 for (i = 16; i < 32; i++) {
75 if (test_bit(i, (unsigned long *)&active))
76 mmio_write32(gicc_base + GICC_DIR, i);
79 /* Ensure all IPIs and the maintenance PPI are enabled */
80 mmio_write32(gicd_base + GICD_ISENABLER,
81 0x0000ffff | (1 << MAINTENANCE_IRQ));
84 * Disable PPIs, except for the maintenance interrupt.
85 * On shutdown, the root cell expects to find all its PPIs still
86 * enabled - except for the maintenance interrupt we used.
88 mmio_write32(gicd_base + GICD_ICENABLER,
89 root_shutdown ? 1 << MAINTENANCE_IRQ :
90 0xffff0000 & ~(1 << MAINTENANCE_IRQ));
93 mmio_write32(gich_base + GICH_HCR, 0);
96 gich_vmcr = mmio_read32(gich_base + GICH_VMCR);
98 gicc_pmr = (gich_vmcr >> GICH_VMCR_PMR_SHIFT) << GICV_PMR_SHIFT;
100 if (gich_vmcr & GICH_VMCR_EN0)
101 gicc_ctlr |= GICC_CTLR_GRPEN1;
102 if (gich_vmcr & GICH_VMCR_EOImode)
103 gicc_ctlr |= GICC_CTLR_EOImode;
105 mmio_write32(gicc_base + GICC_CTLR, gicc_ctlr);
106 mmio_write32(gicc_base + GICC_PMR, gicc_pmr);
110 mmio_write32(gich_base + GICH_VMCR, gich_vmcr);
115 static int gic_cpu_init(struct per_cpu *cpu_data)
118 u32 cell_gicc_ctlr, cell_gicc_pmr;
120 /* Ensure all IPIs and the maintenance PPI are enabled. */
121 mmio_write32(gicd_base + GICD_ISENABLER,
122 0x0000ffff | (1 << MAINTENANCE_IRQ));
124 cell_gicc_ctlr = mmio_read32(gicc_base + GICC_CTLR);
125 cell_gicc_pmr = mmio_read32(gicc_base + GICC_PMR);
127 mmio_write32(gicc_base + GICC_CTLR,
128 GICC_CTLR_GRPEN1 | GICC_CTLR_EOImode);
129 mmio_write32(gicc_base + GICC_PMR, GICC_PMR_DEFAULT);
131 vtr = mmio_read32(gich_base + GICH_VTR);
132 gic_num_lr = (vtr & 0x3f) + 1;
134 /* VMCR only contains 5 bits of priority */
135 vmcr = (cell_gicc_pmr >> GICV_PMR_SHIFT) << GICH_VMCR_PMR_SHIFT;
137 * All virtual interrupts are group 0 in this driver since the GICV
138 * layout seen by the guest corresponds to GICC without security
140 * - A read from GICV_IAR doesn't acknowledge group 1 interrupts
141 * (GICV_AIAR does it, but the guest never attempts to accesses it)
142 * - A write to GICV_CTLR.GRP0EN corresponds to the GICC_CTLR.GRP1EN bit
143 * Since the guest's driver thinks that it is accessing a GIC with
144 * security extensions, a write to GPR1EN will enable group 0
146 * - Group 0 interrupts are presented as virtual IRQs (FIQEn = 0)
148 if (cell_gicc_ctlr & GICC_CTLR_GRPEN1)
149 vmcr |= GICH_VMCR_EN0;
150 if (cell_gicc_ctlr & GICC_CTLR_EOImode)
151 vmcr |= GICH_VMCR_EOImode;
153 mmio_write32(gich_base + GICH_VMCR, vmcr);
154 mmio_write32(gich_base + GICH_HCR, GICH_HCR_EN);
157 * Clear pending virtual IRQs in case anything is left from previous
158 * use. Physically pending IRQs will be forwarded to Linux once we
159 * enable interrupts for the hypervisor.
161 gic_clear_pending_irqs();
163 /* Register ourselves into the CPU itf map */
164 gic_probe_cpu_id(cpu_data->cpu_id);
169 static void gic_eoi_irq(u32 irq_id, bool deactivate)
172 * The GIC doesn't seem to care about the CPUID value written to EOIR,
173 * which is rather convenient...
175 mmio_write32(gicc_base + GICC_EOIR, irq_id);
177 mmio_write32(gicc_base + GICC_DIR, irq_id);
180 static int gic_cell_init(struct cell *cell)
185 * target_cpu_map has not been populated by all available CPUs when the
186 * setup code initialises the root cell. It is assumed that the kernel
187 * already has configured all its SPIs anyway, and that it will redirect
188 * them when unplugging a CPU.
190 if (cell != &root_cell)
191 gic_target_spis(cell, cell);
194 * Let the guest access the virtual CPU interface instead of the
197 * WARN: some SoCs (EXYNOS4) use a modified GIC which doesn't have any
198 * banked CPU interface, so we should map per-CPU physical addresses
200 * As for now, none of them seem to have virtualization extensions.
202 err = paging_create(&cell->arch.mm, (unsigned long)gicv_base,
203 gicc_size, (unsigned long)gicc_base,
204 (PTE_FLAG_VALID | PTE_ACCESS_FLAG |
205 S2_PTE_ACCESS_RW | S2_PTE_FLAG_DEVICE),
206 PAGING_NON_COHERENT);
210 mmio_region_register(cell, (unsigned long)gicd_base, gicd_size,
211 gic_handle_dist_access, NULL);
215 static void gic_cell_exit(struct cell *cell)
217 paging_destroy(&cell->arch.mm, (unsigned long)gicc_base, gicc_size,
218 PAGING_NON_COHERENT);
219 /* Reset interrupt routing of the cell's spis */
220 gic_target_spis(cell, &root_cell);
223 static int gic_send_sgi(struct sgi *sgi)
227 if (!is_sgi(sgi->id))
230 val = (sgi->routing_mode & 0x3) << 24
231 | (sgi->targets & 0xff) << 16
234 mmio_write32(gicd_base + GICD_SGIR, val);
239 static int gic_inject_irq(struct per_cpu *cpu_data, u16 irq_id)
244 unsigned long elsr[2];
246 elsr[0] = mmio_read32(gich_base + GICH_ELSR0);
247 elsr[1] = mmio_read32(gich_base + GICH_ELSR1);
248 for (i = 0; i < gic_num_lr; i++) {
249 if (test_bit(i, elsr)) {
250 /* Entry is available */
251 if (first_free == -1)
256 /* Check that there is no overlapping */
258 if ((lr & GICH_LR_VIRT_ID_MASK) == irq_id)
262 if (first_free == -1)
265 /* Inject group 0 interrupt (seen as IRQ by the guest) */
267 lr |= GICH_LR_PENDING_BIT;
269 if (!is_sgi(irq_id)) {
270 lr |= GICH_LR_HW_BIT;
271 lr |= (u32)irq_id << GICH_LR_PHYS_ID_SHIFT;
274 gic_write_lr(first_free, lr);
279 static void gic_enable_maint_irq(bool enable)
283 hcr = mmio_read32(gich_base + GICH_HCR);
287 hcr &= ~GICH_HCR_UIE;
288 mmio_write32(gich_base + GICH_HCR, hcr);
291 unsigned int irqchip_mmio_count_regions(struct cell *cell)
296 struct irqchip_ops gic_irqchip = {
298 .cpu_init = gic_cpu_init,
299 .cpu_reset = gic_cpu_reset,
300 .cell_init = gic_cell_init,
301 .cell_exit = gic_cell_exit,
303 .send_sgi = gic_send_sgi,
304 .handle_irq = gic_handle_irq,
305 .inject_irq = gic_inject_irq,
306 .enable_maint_irq = gic_enable_maint_irq,
307 .eoi_irq = gic_eoi_irq,