2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
16 #include <asm/control.h>
17 #include <asm/gic_common.h>
18 #include <asm/irqchip.h>
19 #include <asm/percpu.h>
20 #include <asm/platform.h>
21 #include <asm/spinlock.h>
22 #include <asm/traps.h>
24 #define REG_RANGE(base, n, size) \
25 (base) ... ((base) + (n - 1) * (size))
27 extern void *gicd_base;
28 extern unsigned int gicd_size;
30 static DEFINE_SPINLOCK(dist_lock);
32 /* The GIC interface numbering does not necessarily match the logical map */
33 u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
36 * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
37 * the bits of the written value, by using separate `set' and `clear' registers.
38 * Such registers can be handled by setting the `is_poke' boolean, which allows
39 * to simply restrict the access->val with the cell configuration mask.
40 * Others, such as the priority registers, will need to be read and written back
41 * with a restricted value, by using the distributor lock.
43 static int restrict_bitmask_access(struct per_cpu *cpu_data,
44 struct mmio_access *access,
45 unsigned int reg_index,
46 unsigned int bits_per_irq,
50 unsigned long access_mask = 0;
52 * In order to avoid division, the number of bits per irq is limited
53 * to powers of 2 for the moment.
55 unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
56 unsigned long spi_bits = (1 << bits_per_irq) - 1;
57 /* First, extract the first interrupt affected by this access */
58 unsigned int first_irq = reg_index * irqs_per_reg;
60 /* For SGIs or PPIs, let the caller do the mmio access */
61 if (!is_spi(first_irq))
62 return TRAP_UNHANDLED;
64 /* For SPIs, compare against the cell config mask */
66 for (spi = first_irq; spi < first_irq + irqs_per_reg; spi++) {
67 unsigned int bit_nr = (spi - first_irq) * bits_per_irq;
68 if (spi_in_cell(cpu_data->cell, spi))
69 access_mask |= spi_bits << bit_nr;
72 if (!access->is_write) {
73 /* Restrict the read value */
74 arch_mmio_access(access);
75 access->val &= access_mask;
81 * Modify the existing value of this register by first reading
83 * Relies on a spinlock since we need two mmio accesses.
85 unsigned long access_val = access->val;
87 spin_lock(&dist_lock);
89 access->is_write = false;
90 arch_mmio_access(access);
91 access->is_write = true;
94 access->val &= ~(access_mask & ~access_val);
95 access->val |= access_val;
96 arch_mmio_access(access);
98 spin_unlock(&dist_lock);
102 access->val &= access_mask;
104 return TRAP_UNHANDLED;
109 * GICv3 uses a 64bit register IROUTER for each IRQ
111 static int handle_irq_route(struct per_cpu *cpu_data,
112 struct mmio_access *access, unsigned int irq)
114 struct cell *cell = cpu_data->cell;
117 /* Ignore aff3 on AArch32 (return 0) */
118 if (access->size == 4 && (access->addr % 8))
121 /* SGIs and PPIs are res0 */
126 * Ignore accesses to SPIs that do not belong to the cell. This isn't
127 * forbidden, because the guest driver may simply iterate over all
128 * registers at initialisation
130 if (!spi_in_cell(cell, irq - 32))
133 /* Translate the virtual cpu id into the physical one */
134 if (access->is_write) {
135 access->val = arm_cpu_virt2phys(cell, access->val);
136 if (access->val == -1) {
137 printk("Attempt to route IRQ%d outside of cell\n", irq);
138 return TRAP_FORBIDDEN;
140 /* And do the access */
141 return TRAP_UNHANDLED;
143 cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
144 access->val = arm_cpu_phys2virt(cpu);
150 * GICv2 uses 8bit values for each IRQ in the ITARGETRs registers
152 static int handle_irq_target(struct per_cpu *cpu_data,
153 struct mmio_access *access, unsigned int reg)
156 * ITARGETSR contain one byte per IRQ, so the first one affected by this
157 * access corresponds to the reg index
160 unsigned int spi = reg - 32;
166 * Let the guest freely access its SGIs and PPIs, which may be used to
167 * fill its CPU interface map.
170 return TRAP_UNHANDLED;
173 * The registers are byte-accessible, extend the access to a word if
177 access->val <<= 8 * offset;
181 for (i = 0; i < 4; i++, spi++) {
182 if (spi_in_cell(cpu_data->cell, spi))
183 access_mask |= 0xff << (8 * i);
187 if (!access->is_write)
190 targets = (access->val >> (8 * i)) & 0xff;
192 /* Check that the targeted interface belongs to the cell */
193 for (cpu = 0; cpu < 8; cpu++) {
194 if (!(targets & target_cpu_map[cpu]))
197 if (per_cpu(cpu)->cell == cpu_data->cell)
200 printk("Attempt to route SPI%d outside of cell\n", spi);
201 return TRAP_FORBIDDEN;
205 if (access->is_write) {
206 spin_lock(&dist_lock);
208 mmio_read32(gicd_base + GICD_ITARGETSR + reg + offset);
209 access->val &= access_mask;
210 /* Combine with external SPIs */
211 access->val |= (itargetsr & ~access_mask);
212 /* And do the access */
213 arch_mmio_access(access);
214 spin_unlock(&dist_lock);
216 arch_mmio_access(access);
217 access->val &= access_mask;
223 static int handle_sgir_access(struct per_cpu *cpu_data,
224 struct mmio_access *access)
227 unsigned long val = access->val;
229 if (!access->is_write)
232 sgi.targets = (val >> 16) & 0xff;
233 sgi.routing_mode = (val >> 24) & 0x3;
239 return gic_handle_sgir_write(cpu_data, &sgi, false);
243 * Get the CPU interface ID for this cpu. It can be discovered by reading
244 * the banked value of the PPI and IPI TARGET registers
245 * Patch 2bb3135 in Linux explains why the probe may need to scans the first 8
246 * registers: some early implementation returned 0 for the first TARGETS
248 * Since those didn't have virtualization extensions, we can safely ignore that
251 int gic_probe_cpu_id(unsigned int cpu)
256 target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);
258 if (target_cpu_map[cpu] == 0)
264 int gic_handle_sgir_write(struct per_cpu *cpu_data, struct sgi *sgi,
268 unsigned long targets;
269 unsigned int this_cpu = cpu_data->cpu_id;
270 struct cell *cell = cpu_data->cell;
271 bool is_target = false;
273 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VSGI]++;
275 targets = sgi->targets;
278 /* Filter the targets */
279 for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
281 * When using a cpu map to target the different CPUs (GICv2),
282 * they are independent from the physical CPU IDs, so there is
283 * no need to translate them to the hypervisor's virtual IDs.
286 is_target = !!test_bit(arm_cpu_phys2virt(cpu),
289 is_target = !!(targets & target_cpu_map[cpu]);
291 if (sgi->routing_mode == 0 && !is_target)
294 irqchip_set_pending(per_cpu(cpu), sgi->id, false);
295 sgi->targets |= (1 << cpu);
298 /* Let the other CPUS inject their SGIs */
299 sgi->id = SGI_INJECT;
300 irqchip_send_sgi(sgi);
305 int gic_handle_dist_access(struct per_cpu *cpu_data,
306 struct mmio_access *access)
309 unsigned long reg = access->addr - (unsigned long)gicd_base;
312 case REG_RANGE(GICD_IROUTER, 1024, 8):
313 ret = handle_irq_route(cpu_data, access,
314 (reg - GICD_IROUTER) / 8);
317 case REG_RANGE(GICD_ITARGETSR, 1024, 1):
318 ret = handle_irq_target(cpu_data, access, reg - GICD_ITARGETSR);
321 case REG_RANGE(GICD_ICENABLER, 32, 4):
322 case REG_RANGE(GICD_ISENABLER, 32, 4):
323 case REG_RANGE(GICD_ICPENDR, 32, 4):
324 case REG_RANGE(GICD_ISPENDR, 32, 4):
325 case REG_RANGE(GICD_ICACTIVER, 32, 4):
326 case REG_RANGE(GICD_ISACTIVER, 32, 4):
327 ret = restrict_bitmask_access(cpu_data, access,
328 (reg & 0x7f) / 4, 1, true);
331 case REG_RANGE(GICD_IGROUPR, 32, 4):
332 ret = restrict_bitmask_access(cpu_data, access,
333 (reg & 0x7f) / 4, 1, false);
336 case REG_RANGE(GICD_ICFGR, 64, 4):
337 ret = restrict_bitmask_access(cpu_data, access,
338 (reg & 0xff) / 4, 2, false);
341 case REG_RANGE(GICD_IPRIORITYR, 255, 4):
342 ret = restrict_bitmask_access(cpu_data, access,
343 (reg & 0x3ff) / 4, 8, false);
347 ret = handle_sgir_access(cpu_data, access);
353 case REG_RANGE(GICD_PIDR0, 4, 4):
354 case REG_RANGE(GICD_PIDR4, 4, 4):
355 case REG_RANGE(GICD_CIDR0, 4, 4):
356 /* Allow read access, ignore write */
357 ret = (access->is_write ? TRAP_HANDLED : TRAP_UNHANDLED);
365 /* The sub-handlers return TRAP_UNHANDLED to allow the access */
366 if (ret == TRAP_UNHANDLED) {
367 arch_mmio_access(access);
374 void gic_handle_irq(struct per_cpu *cpu_data)
376 bool handled = false;
380 /* Read IAR1: set 'active' state */
381 irq_id = gic_read_iar();
383 if (irq_id == 0x3ff) /* Spurious IRQ */
387 if (is_sgi(irq_id)) {
388 arch_handle_sgi(cpu_data, irq_id);
391 handled = arch_handle_phys_irq(cpu_data, irq_id);
395 * Write EOIR1: drop priority, but stay active if handled is
397 * This allows to not be re-interrupted by a level-triggered
398 * interrupt that needs handling in the guest (e.g. timer)
400 irqchip_eoi_irq(irq_id, handled);
404 void gic_target_spis(struct cell *config_cell, struct cell *dest_cell)
406 unsigned int i, first_cpu, cpu_itf;
407 unsigned int shift = 0;
408 void *itargetsr = gicd_base + GICD_ITARGETSR;
413 /* Always route to the first logical CPU on reset */
414 for_each_cpu(first_cpu, dest_cell->cpu_set)
417 cpu_itf = target_cpu_map[first_cpu];
419 /* ITARGETSR0-7 contain the PPIs and SGIs, and are read-only. */
422 for (i = 0; i < 64; i++, shift = (shift + 8) % 32) {
423 if (spi_in_cell(config_cell, i)) {
424 mask |= (0xff << shift);
425 bits |= (cpu_itf << shift);
428 /* ITARGETRs have 4 IRQ per register */
429 if ((i + 1) % 4 == 0) {
430 targets = mmio_read32(itargetsr);
433 mmio_write32(itargetsr, targets);