2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/processor.h>
17 #include <jailhouse/types.h>
18 #include <asm/control.h>
19 #include <asm/gic_common.h>
20 #include <asm/irqchip.h>
21 #include <asm/platform.h>
22 #include <asm/setup.h>
23 #include <asm/traps.h>
26 * This implementation assumes that the kernel driver already initialised most
28 * There is almost no instruction barrier, since IRQs are always disabled in the
29 * hyp, and ERET serves as the context synchronization event.
32 static unsigned int gic_num_lr;
33 static unsigned int gic_num_priority_bits;
34 static u32 gic_version;
36 extern void *gicd_base;
37 extern unsigned int gicd_size;
38 static void *gicr_base;
39 static unsigned int gicr_size;
41 static int gic_init(void)
45 /* FIXME: parse a dt */
46 gicr_base = GICR_BASE;
47 gicr_size = GICR_SIZE;
49 /* Let the per-cpu code access the redistributors */
50 err = arch_map_device(gicr_base, gicr_base, gicr_size);
55 static int gic_cpu_reset(struct per_cpu *cpu_data, bool is_shutdown)
58 void *gicr = cpu_data->gicr_base;
60 bool root_shutdown = is_shutdown && (cpu_data->cell == &root_cell);
66 /* Clear list registers */
67 for (i = 0; i < gic_num_lr; i++)
70 gicr += GICR_SGI_BASE;
71 active = mmio_read32(gicr + GICR_ICACTIVER);
72 /* Deactivate all active PPIs */
73 for (i = 16; i < 32; i++) {
74 if (test_bit(i, &active))
75 arm_write_sysreg(ICC_DIR_EL1, i);
79 * Disable all PPIs, ensure IPIs are enabled.
80 * On shutdown, the root cell expects to find all its PPIs still enabled
81 * when returning to the driver.
84 mmio_write32(gicr + GICR_ICENABLER, 0xffff0000);
85 mmio_write32(gicr + GICR_ISENABLER, 0x0000ffff);
87 /* Clear active priority bits */
88 if (gic_num_priority_bits >= 5)
89 arm_write_sysreg(ICH_AP1R0_EL2, 0);
90 if (gic_num_priority_bits >= 6)
91 arm_write_sysreg(ICH_AP1R1_EL2, 0);
92 if (gic_num_priority_bits > 6) {
93 arm_write_sysreg(ICH_AP1R2_EL2, 0);
94 arm_write_sysreg(ICH_AP1R3_EL2, 0);
98 /* Restore the root config */
99 arm_read_sysreg(ICH_VMCR_EL2, ich_vmcr);
101 if (!(ich_vmcr & ICH_VMCR_VEOIM)) {
103 arm_read_sysreg(ICC_CTLR_EL1, icc_ctlr);
104 icc_ctlr &= ~ICC_CTLR_EOImode;
105 arm_write_sysreg(ICC_CTLR_EL1, icc_ctlr);
108 arm_write_sysreg(ICH_HCR_EL2, 0);
111 arm_write_sysreg(ICH_VMCR_EL2, 0);
116 static int gic_cpu_init(struct per_cpu *cpu_data)
120 u32 cell_icc_ctlr, cell_icc_pmr, cell_icc_igrpen1;
123 void *redist_base = gicr_base;
125 /* Find redistributor */
127 pidr = mmio_read32(redist_base + GICR_PIDR2);
128 gic_version = GICR_PIDR2_ARCH(pidr);
129 if (gic_version != 3 && gic_version != 4)
132 typer = mmio_read64(redist_base + GICR_TYPER);
133 if ((typer >> 32) == cpu_data->cpu_id) {
134 cpu_data->gicr_base = redist_base;
138 redist_base += 0x20000;
139 if (gic_version == 4)
140 redist_base += 0x20000;
141 } while (!(typer & GICR_TYPER_Last));
143 if (cpu_data->gicr_base == 0) {
144 printk("GIC: No redist found for CPU%d\n", cpu_data->cpu_id);
148 /* Ensure all IPIs are enabled */
149 mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
153 * This allow to drop the priority of level-triggered interrupts without
154 * deactivating them, and thus ensure that they won't be immediately
155 * re-triggered. (e.g. timer)
156 * They can then be injected into the guest using the LR.HW bit, and
157 * will be deactivated once the guest does an EOI after handling the
160 arm_read_sysreg(ICC_CTLR_EL1, cell_icc_ctlr);
161 arm_write_sysreg(ICC_CTLR_EL1, ICC_CTLR_EOImode);
163 arm_read_sysreg(ICC_PMR_EL1, cell_icc_pmr);
164 arm_write_sysreg(ICC_PMR_EL1, ICC_PMR_DEFAULT);
166 arm_read_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
167 arm_write_sysreg(ICC_IGRPEN1_EL1, ICC_IGRPEN1_EN);
169 arm_read_sysreg(ICH_VTR_EL2, ich_vtr);
170 gic_num_lr = (ich_vtr & 0xf) + 1;
171 gic_num_priority_bits = (ich_vtr >> 29) + 1;
173 ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
174 if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
175 ich_vmcr |= ICH_VMCR_VENG1;
176 if (cell_icc_ctlr & ICC_CTLR_EOImode)
177 ich_vmcr |= ICH_VMCR_VEOIM;
178 arm_write_sysreg(ICH_VMCR_EL2, ich_vmcr);
180 /* After this, the cells access the virtual interface of the GIC. */
181 arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
186 static void gic_route_spis(struct cell *config_cell, struct cell *dest_cell)
189 void *irouter = gicd_base + GICD_IROUTER;
190 unsigned int first_cpu;
192 /* Use the core functions to retrieve the first physical id */
193 for_each_cpu(first_cpu, dest_cell->cpu_set)
196 for (i = 0; i < 64; i++, irouter += 8) {
197 if (spi_in_cell(config_cell, i))
198 mmio_write64(irouter, first_cpu);
202 static void gic_cell_init(struct cell *cell)
204 gic_route_spis(cell, cell);
207 static void gic_cell_exit(struct cell *cell)
209 /* Reset interrupt routing of the cell's spis*/
210 gic_route_spis(cell, &root_cell);
213 static int gic_send_sgi(struct sgi *sgi)
216 u16 targets = sgi->targets;
218 if (!is_sgi(sgi->id))
221 if (sgi->routing_mode == 2)
222 targets = 1 << phys_processor_id();
224 val = (u64)sgi->aff3 << ICC_SGIR_AFF3_SHIFT
225 | (u64)sgi->aff2 << ICC_SGIR_AFF2_SHIFT
226 | sgi->aff1 << ICC_SGIR_AFF1_SHIFT
227 | (targets & ICC_SGIR_TARGET_MASK)
228 | (sgi->id & 0xf) << ICC_SGIR_IRQN_SHIFT;
230 if (sgi->routing_mode == 1)
231 val |= ICC_SGIR_ROUTING_BIT;
234 * Ensure the targets see our modifications to their per-cpu
239 arm_write_sysreg(ICC_SGI1R_EL1, val);
245 int gicv3_handle_sgir_write(struct per_cpu *cpu_data, u64 sgir)
248 unsigned long routing_mode = !!(sgir & ICC_SGIR_ROUTING_BIT);
250 /* FIXME: clusters are not supported yet. */
251 sgi.targets = sgir & ICC_SGIR_TARGET_MASK;
252 sgi.routing_mode = routing_mode;
253 sgi.aff1 = sgir >> ICC_SGIR_AFF1_SHIFT & 0xff;
254 sgi.aff2 = sgir >> ICC_SGIR_AFF2_SHIFT & 0xff;
255 sgi.aff3 = sgir >> ICC_SGIR_AFF3_SHIFT & 0xff;
256 sgi.id = sgir >> ICC_SGIR_IRQN_SHIFT & 0xf;
258 return gic_handle_sgir_write(cpu_data, &sgi, true);
261 static void gic_eoi_irq(u32 irq_id, bool deactivate)
263 arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
265 arm_write_sysreg(ICC_DIR_EL1, irq_id);
268 static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
275 arm_read_sysreg(ICH_ELSR_EL2, elsr);
276 for (i = 0; i < gic_num_lr; i++) {
277 if ((elsr >> i) & 1) {
278 /* Entry is invalid, candidate for injection */
285 * Entry is in use, check that it doesn't match the one we want
291 * A strict phys->virt id mapping is used for SPIs, so this test
292 * should be sufficient.
294 if ((u32)lr == irq->virt_id)
301 * All list registers are in use, trigger a maintenance
302 * interrupt once they are available again.
304 arm_read_sysreg(ICH_HCR_EL2, hcr);
306 arm_write_sysreg(ICH_HCR_EL2, hcr);
312 /* Only group 1 interrupts */
313 lr |= ICH_LR_GROUP_BIT;
314 lr |= ICH_LR_PENDING;
317 lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
318 } else if (irq->type.sgi.maintenance) {
319 lr |= ICH_LR_SGI_EOI;
322 gic_write_lr(free_lr, lr);
327 static int gic_handle_redist_access(struct per_cpu *cpu_data,
328 struct mmio_access *access)
332 int ret = TRAP_UNHANDLED;
333 unsigned int virt_id;
334 void *virt_redist = 0;
335 void *phys_redist = 0;
336 unsigned int redist_size = (gic_version == 4) ? 0x40000 : 0x20000;
337 void *address = (void *)access->addr;
340 * The redistributor accessed by the cell is not the one stored in these
341 * cpu_datas, but the one associated to its virtual id. So we first
342 * need to translate the redistributor address.
344 for_each_cpu(cpu, cpu_data->cell->cpu_set) {
345 virt_id = arm_cpu_phys2virt(cpu);
346 virt_redist = per_cpu(virt_id)->gicr_base;
347 if (address >= virt_redist && address < virt_redist
349 phys_redist = per_cpu(cpu)->gicr_base;
354 if (phys_redist == NULL)
355 return TRAP_FORBIDDEN;
357 reg = address - virt_redist;
358 access->addr = (unsigned long)phys_redist + reg;
360 /* Change the ID register, all other accesses are allowed. */
361 if (!access->is_write) {
364 if (virt_id == cpu_data->cell->arch.last_virt_id)
365 access->val = GICR_TYPER_Last;
368 /* AArch64 can use a writeq for this register */
369 if (access->size == 8)
370 access->val |= (u64)virt_id << 32;
375 /* Upper bits contain the affinity */
376 access->val = virt_id;
381 if (ret == TRAP_HANDLED)
384 arch_mmio_access(access);
388 static int gic_mmio_access(struct per_cpu *cpu_data,
389 struct mmio_access *access)
391 void *address = (void *)access->addr;
393 if (address >= gicd_base && address < gicd_base + gicd_size)
394 return gic_handle_dist_access(cpu_data, access);
396 if (address >= gicr_base && address < gicr_base + gicr_size)
397 return gic_handle_redist_access(cpu_data, access);
399 return TRAP_UNHANDLED;
402 struct irqchip_ops gic_irqchip = {
404 .cpu_init = gic_cpu_init,
405 .cpu_reset = gic_cpu_reset,
406 .cell_init = gic_cell_init,
407 .cell_exit = gic_cell_exit,
408 .send_sgi = gic_send_sgi,
409 .handle_irq = gic_handle_irq,
410 .inject_irq = gic_inject_irq,
411 .eoi_irq = gic_eoi_irq,
412 .mmio_access = gic_mmio_access,