2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/cell.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <asm/control.h>
17 #include <asm/gic_common.h>
18 #include <asm/irqchip.h>
19 #include <asm/percpu.h>
20 #include <asm/platform.h>
21 #include <asm/spinlock.h>
22 #include <asm/traps.h>
24 #define REG_RANGE(base, n, size) \
25 (base) ... ((base) + (n - 1) * (size))
27 extern void *gicd_base;
28 extern unsigned int gicd_size;
30 static DEFINE_SPINLOCK(dist_lock);
32 /* The GIC interface numbering does not necessarily match the logical map */
33 static u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
36 * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
37 * the bits of the written value, by using separate `set' and `clear' registers.
38 * Such registers can be handled by setting the `is_poke' boolean, which allows
39 * to simply restrict the mmio->value with the cell configuration mask.
40 * Others, such as the priority registers, will need to be read and written back
41 * with a restricted value, by using the distributor lock.
43 static enum mmio_result
44 restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
45 unsigned int bits_per_irq, bool is_poke)
47 struct cell *cell = this_cell();
49 unsigned long access_mask = 0;
51 * In order to avoid division, the number of bits per irq is limited
52 * to powers of 2 for the moment.
54 unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
55 unsigned long spi_bits = (1 << bits_per_irq) - 1;
56 /* First, extract the first interrupt affected by this access */
57 unsigned int first_irq = reg_index * irqs_per_reg;
59 /* For SGIs or PPIs, let the caller do the mmio access */
60 if (!is_spi(first_irq)) {
61 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
65 /* For SPIs, compare against the cell config mask */
67 for (spi = first_irq; spi < first_irq + irqs_per_reg; spi++) {
68 unsigned int bit_nr = (spi - first_irq) * bits_per_irq;
69 if (spi_in_cell(cell, spi))
70 access_mask |= spi_bits << bit_nr;
73 if (!mmio->is_write) {
74 /* Restrict the read value */
75 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
76 mmio->value &= access_mask;
82 * Modify the existing value of this register by first reading
84 * Relies on a spinlock since we need two mmio accesses.
86 unsigned long access_val = mmio->value;
88 spin_lock(&dist_lock);
90 mmio->is_write = false;
91 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
92 mmio->is_write = true;
95 mmio->value &= ~(access_mask & ~access_val);
96 mmio->value |= access_val;
97 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
99 spin_unlock(&dist_lock);
101 mmio->value &= access_mask;
102 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
108 * GICv3 uses a 64bit register IROUTER for each IRQ
110 static enum mmio_result handle_irq_route(struct mmio_access *mmio,
113 struct cell *cell = this_cell();
116 /* Ignore aff3 on AArch32 (return 0) */
117 if (mmio->size == 4 && (mmio->address % 8))
120 /* SGIs and PPIs are res0 */
125 * Ignore accesses to SPIs that do not belong to the cell. This isn't
126 * forbidden, because the guest driver may simply iterate over all
127 * registers at initialisation
129 if (!spi_in_cell(cell, irq - 32))
132 /* Translate the virtual cpu id into the physical one */
133 if (mmio->is_write) {
134 mmio->value = arm_cpu_virt2phys(cell, mmio->value);
135 if (mmio->value == -1) {
136 printk("Attempt to route IRQ%d outside of cell\n", irq);
139 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
141 cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
142 mmio->value = arm_cpu_phys2virt(cpu);
148 * GICv2 uses 8bit values for each IRQ in the ITARGETRs registers
150 static enum mmio_result handle_irq_target(struct mmio_access *mmio,
154 * ITARGETSR contain one byte per IRQ, so the first one affected by this
155 * access corresponds to the reg index
157 struct cell *cell = this_cell();
159 unsigned int spi = reg - 32;
165 * Let the guest freely access its SGIs and PPIs, which may be used to
166 * fill its CPU interface map.
169 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
174 * The registers are byte-accessible, extend the access to a word if
178 mmio->value <<= 8 * offset;
182 for (i = 0; i < 4; i++, spi++) {
183 if (spi_in_cell(cell, spi))
184 access_mask |= 0xff << (8 * i);
191 targets = (mmio->value >> (8 * i)) & 0xff;
193 /* Check that the targeted interface belongs to the cell */
194 for (cpu = 0; cpu < 8; cpu++) {
195 if (!(targets & target_cpu_map[cpu]))
198 if (per_cpu(cpu)->cell == cell)
201 printk("Attempt to route SPI%d outside of cell\n", spi);
206 if (mmio->is_write) {
207 spin_lock(&dist_lock);
209 mmio_read32(gicd_base + GICD_ITARGETSR + reg + offset);
210 mmio->value &= access_mask;
211 /* Combine with external SPIs */
212 mmio->value |= (itargetsr & ~access_mask);
213 /* And do the access */
214 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
215 spin_unlock(&dist_lock);
217 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
218 mmio->value &= access_mask;
224 static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
227 unsigned long val = mmio->value;
232 sgi.targets = (val >> 16) & 0xff;
233 sgi.routing_mode = (val >> 24) & 0x3;
239 gic_handle_sgir_write(&sgi, false);
244 * Get the CPU interface ID for this cpu. It can be discovered by reading
245 * the banked value of the PPI and IPI TARGET registers
246 * Patch 2bb3135 in Linux explains why the probe may need to scans the first 8
247 * registers: some early implementation returned 0 for the first ITARGETSR
249 * Since those didn't have virtualization extensions, we can safely ignore that
252 int gic_probe_cpu_id(unsigned int cpu)
254 if (cpu >= ARRAY_SIZE(target_cpu_map))
257 target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);
259 if (target_cpu_map[cpu] == 0)
265 void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
267 struct per_cpu *cpu_data = this_cpu_data();
269 unsigned long targets;
270 unsigned int this_cpu = cpu_data->cpu_id;
271 struct cell *cell = cpu_data->cell;
272 bool is_target = false;
274 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VSGI]++;
276 targets = sgi->targets;
279 /* Filter the targets */
280 for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
282 * When using a cpu map to target the different CPUs (GICv2),
283 * they are independent from the physical CPU IDs, so there is
284 * no need to translate them to the hypervisor's virtual IDs.
287 is_target = !!test_bit(arm_cpu_phys2virt(cpu),
290 is_target = !!(targets & target_cpu_map[cpu]);
292 if (sgi->routing_mode == 0 && !is_target)
295 irqchip_set_pending(per_cpu(cpu), sgi->id, false);
296 sgi->targets |= (1 << cpu);
299 /* Let the other CPUS inject their SGIs */
300 sgi->id = SGI_INJECT;
301 irqchip_send_sgi(sgi);
304 enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio)
306 unsigned long reg = mmio->address;
307 enum mmio_result ret;
310 case REG_RANGE(GICD_IROUTER, 1024, 8):
311 ret = handle_irq_route(mmio, (reg - GICD_IROUTER) / 8);
314 case REG_RANGE(GICD_ITARGETSR, 1024, 1):
315 ret = handle_irq_target(mmio, reg - GICD_ITARGETSR);
318 case REG_RANGE(GICD_ICENABLER, 32, 4):
319 case REG_RANGE(GICD_ISENABLER, 32, 4):
320 case REG_RANGE(GICD_ICPENDR, 32, 4):
321 case REG_RANGE(GICD_ISPENDR, 32, 4):
322 case REG_RANGE(GICD_ICACTIVER, 32, 4):
323 case REG_RANGE(GICD_ISACTIVER, 32, 4):
324 ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, true);
327 case REG_RANGE(GICD_IGROUPR, 32, 4):
328 ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, false);
331 case REG_RANGE(GICD_ICFGR, 64, 4):
332 ret = restrict_bitmask_access(mmio, (reg & 0xff) / 4, 2, false);
335 case REG_RANGE(GICD_IPRIORITYR, 255, 4):
336 ret = restrict_bitmask_access(mmio, (reg & 0x3ff) / 4, 8,
341 ret = handle_sgir_access(mmio);
347 case REG_RANGE(GICD_PIDR0, 4, 4):
348 case REG_RANGE(GICD_PIDR4, 4, 4):
349 case REG_RANGE(GICD_CIDR0, 4, 4):
350 /* Allow read access, ignore write */
352 arm_mmio_perform_access((unsigned long)gicd_base, mmio);
362 void gic_handle_irq(struct per_cpu *cpu_data)
364 bool handled = false;
368 /* Read IAR1: set 'active' state */
369 irq_id = gic_read_iar();
371 if (irq_id == 0x3ff) /* Spurious IRQ */
375 if (is_sgi(irq_id)) {
376 arch_handle_sgi(cpu_data, irq_id);
379 handled = arch_handle_phys_irq(cpu_data, irq_id);
383 * Write EOIR1: drop priority, but stay active if handled is
385 * This allows to not be re-interrupted by a level-triggered
386 * interrupt that needs handling in the guest (e.g. timer)
388 irqchip_eoi_irq(irq_id, handled);
392 void gic_target_spis(struct cell *config_cell, struct cell *dest_cell)
394 unsigned int i, first_cpu, cpu_itf;
395 unsigned int shift = 0;
396 void *itargetsr = gicd_base + GICD_ITARGETSR;
401 /* Always route to the first logical CPU on reset */
402 for_each_cpu(first_cpu, dest_cell->cpu_set)
405 cpu_itf = target_cpu_map[first_cpu];
407 /* ITARGETSR0-7 contain the PPIs and SGIs, and are read-only. */
410 for (i = 0; i < 64; i++, shift = (shift + 8) % 32) {
411 if (spi_in_cell(config_cell, i)) {
412 mask |= (0xff << shift);
413 bits |= (cpu_itf << shift);
416 /* ITARGETRs have 4 IRQ per register */
417 if ((i + 1) % 4 == 0) {
418 targets = mmio_read32(itargetsr);
421 mmio_write32(itargetsr, targets);