2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/cell.h>
14 #include <jailhouse/control.h>
15 #include <jailhouse/mmio.h>
16 #include <jailhouse/printk.h>
17 #include <asm/control.h>
18 #include <asm/gic_common.h>
19 #include <asm/irqchip.h>
20 #include <asm/percpu.h>
21 #include <asm/platform.h>
22 #include <asm/spinlock.h>
23 #include <asm/traps.h>
25 #define REG_RANGE(base, n, size) \
26 (base) ... ((base) + (n - 1) * (size))
28 extern void *gicd_base;
29 extern unsigned int gicd_size;
31 static DEFINE_SPINLOCK(dist_lock);
33 /* The GIC interface numbering does not necessarily match the logical map */
34 static u8 target_cpu_map[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
36 /* Check that the targeted interface belongs to the cell */
37 bool gic_targets_in_cell(struct cell *cell, u8 targets)
41 for (cpu = 0; cpu < ARRAY_SIZE(target_cpu_map); cpu++)
42 if (targets & target_cpu_map[cpu] &&
43 per_cpu(cpu)->cell != cell)
50 * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
51 * the bits of the written value, by using separate `set' and `clear' registers.
52 * Such registers can be handled by setting the `is_poke' boolean, which allows
53 * to simply restrict the mmio->value with the cell configuration mask.
54 * Others, such as the priority registers, will need to be read and written back
55 * with a restricted value, by using the distributor lock.
57 static enum mmio_result
58 restrict_bitmask_access(struct mmio_access *mmio, unsigned int reg_index,
59 unsigned int bits_per_irq, bool is_poke)
61 struct cell *cell = this_cell();
63 unsigned long access_mask = 0;
65 * In order to avoid division, the number of bits per irq is limited
66 * to powers of 2 for the moment.
68 unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
69 unsigned long irq_bits = (1 << bits_per_irq) - 1;
70 /* First, extract the first interrupt affected by this access */
71 unsigned int first_irq = reg_index * irqs_per_reg;
73 for (irq = 0; irq < irqs_per_reg; irq++)
74 if (irqchip_irq_in_cell(cell, first_irq + irq))
75 access_mask |= irq_bits << (irq * bits_per_irq);
77 if (!mmio->is_write) {
78 /* Restrict the read value */
79 mmio_perform_access(gicd_base, mmio);
80 mmio->value &= access_mask;
86 * Modify the existing value of this register by first reading
88 * Relies on a spinlock since we need two mmio accesses.
90 unsigned long access_val = mmio->value;
92 spin_lock(&dist_lock);
94 mmio->is_write = false;
95 mmio_perform_access(gicd_base, mmio);
96 mmio->is_write = true;
99 mmio->value &= ~(access_mask & ~access_val);
100 mmio->value |= access_val;
101 mmio_perform_access(gicd_base, mmio);
103 spin_unlock(&dist_lock);
105 mmio->value &= access_mask;
106 mmio_perform_access(gicd_base, mmio);
112 * GICv2 uses 8bit values for each IRQ in the ITARGETRs registers
114 static enum mmio_result handle_irq_target(struct mmio_access *mmio,
118 * ITARGETSR contain one byte per IRQ, so the first one affected by this
119 * access corresponds to the reg index
121 struct cell *cell = this_cell();
128 * Let the guest freely access its SGIs and PPIs, which may be used to
129 * fill its CPU interface map.
132 mmio_perform_access(gicd_base, mmio);
137 * The registers are byte-accessible, but we always do word accesses.
140 mmio->address &= ~0x3;
141 mmio->value <<= 8 * offset;
145 for (i = 0; i < 4; i++, irq++) {
146 if (irqchip_irq_in_cell(cell, irq))
147 access_mask |= 0xff << (8 * i);
154 targets = (mmio->value >> (8 * i)) & 0xff;
156 if (!gic_targets_in_cell(cell, targets)) {
157 printk("Attempt to route IRQ%d outside of cell\n", irq);
162 if (mmio->is_write) {
163 spin_lock(&dist_lock);
165 mmio_read32(gicd_base + GICD_ITARGETSR + irq + offset);
166 mmio->value &= access_mask;
167 /* Combine with external SPIs */
168 mmio->value |= (itargetsr & ~access_mask);
169 /* And do the access */
170 mmio_perform_access(gicd_base, mmio);
171 spin_unlock(&dist_lock);
173 mmio_perform_access(gicd_base, mmio);
174 mmio->value &= access_mask;
180 static enum mmio_result handle_sgir_access(struct mmio_access *mmio)
183 unsigned long val = mmio->value;
188 sgi.targets = (val >> 16) & 0xff;
189 sgi.routing_mode = (val >> 24) & 0x3;
195 gic_handle_sgir_write(&sgi, false);
200 * Get the CPU interface ID for this cpu. It can be discovered by reading
201 * the banked value of the PPI and IPI TARGET registers
202 * Patch 2bb3135 in Linux explains why the probe may need to scans the first 8
203 * registers: some early implementation returned 0 for the first ITARGETSR
205 * Since those didn't have virtualization extensions, we can safely ignore that
208 int gic_probe_cpu_id(unsigned int cpu)
210 if (cpu >= ARRAY_SIZE(target_cpu_map))
213 target_cpu_map[cpu] = mmio_read32(gicd_base + GICD_ITARGETSR);
215 if (target_cpu_map[cpu] == 0)
221 void gic_handle_sgir_write(struct sgi *sgi, bool virt_input)
223 struct per_cpu *cpu_data = this_cpu_data();
225 unsigned long targets;
226 unsigned int this_cpu = cpu_data->cpu_id;
227 struct cell *cell = cpu_data->cell;
228 bool is_target = false;
230 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VSGI]++;
232 targets = sgi->targets;
235 /* Filter the targets */
236 for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
238 * When using a cpu map to target the different CPUs (GICv2),
239 * they are independent from the physical CPU IDs, so there is
240 * no need to translate them to the hypervisor's virtual IDs.
243 is_target = !!test_bit(arm_cpu_phys2virt(cpu),
246 is_target = !!(targets & target_cpu_map[cpu]);
248 if (sgi->routing_mode == 0 && !is_target)
251 irqchip_set_pending(per_cpu(cpu), sgi->id);
252 sgi->targets |= (1 << cpu);
255 /* Let the other CPUS inject their SGIs */
256 sgi->id = SGI_INJECT;
257 irqchip_send_sgi(sgi);
260 enum mmio_result gic_handle_dist_access(void *arg, struct mmio_access *mmio)
262 unsigned long reg = mmio->address;
263 enum mmio_result ret;
266 case REG_RANGE(GICD_IROUTER, 1024, 8):
267 ret = gic_handle_irq_route(mmio, (reg - GICD_IROUTER) / 8);
270 case REG_RANGE(GICD_ITARGETSR, 1024, 1):
271 ret = handle_irq_target(mmio, reg - GICD_ITARGETSR);
274 case REG_RANGE(GICD_ICENABLER, 32, 4):
275 case REG_RANGE(GICD_ISENABLER, 32, 4):
276 case REG_RANGE(GICD_ICPENDR, 32, 4):
277 case REG_RANGE(GICD_ISPENDR, 32, 4):
278 case REG_RANGE(GICD_ICACTIVER, 32, 4):
279 case REG_RANGE(GICD_ISACTIVER, 32, 4):
280 ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, true);
283 case REG_RANGE(GICD_IGROUPR, 32, 4):
284 ret = restrict_bitmask_access(mmio, (reg & 0x7f) / 4, 1, false);
287 case REG_RANGE(GICD_ICFGR, 64, 4):
288 ret = restrict_bitmask_access(mmio, (reg & 0xff) / 4, 2, false);
291 case REG_RANGE(GICD_IPRIORITYR, 255, 4):
292 ret = restrict_bitmask_access(mmio, (reg & 0x3ff) / 4, 8,
297 ret = handle_sgir_access(mmio);
303 case REG_RANGE(GICD_PIDR0, 4, 4):
304 case REG_RANGE(GICD_PIDR4, 4, 4):
305 case REG_RANGE(GICD_CIDR0, 4, 4):
306 /* Allow read access, ignore write */
308 mmio_perform_access(gicd_base, mmio);
318 void gic_handle_irq(struct per_cpu *cpu_data)
320 bool handled = false;
324 /* Read IAR1: set 'active' state */
325 irq_id = gic_read_iar();
327 if (irq_id == 0x3ff) /* Spurious IRQ */
331 if (is_sgi(irq_id)) {
332 arch_handle_sgi(cpu_data, irq_id);
335 handled = arch_handle_phys_irq(cpu_data, irq_id);
339 * Write EOIR1: drop priority, but stay active if handled is
341 * This allows to not be re-interrupted by a level-triggered
342 * interrupt that needs handling in the guest (e.g. timer)
344 irqchip_eoi_irq(irq_id, handled);
348 void gic_target_spis(struct cell *config_cell, struct cell *dest_cell)
350 unsigned int i, first_cpu, cpu_itf;
351 unsigned int shift = 0;
352 void *itargetsr = gicd_base + GICD_ITARGETSR;
357 /* Always route to the first logical CPU on reset */
358 for_each_cpu(first_cpu, dest_cell->cpu_set)
361 cpu_itf = target_cpu_map[first_cpu];
363 /* ITARGETSR0-7 contain the PPIs and SGIs, and are read-only. */
366 for (i = 0; i < 64; i++, shift = (shift + 8) % 32) {
367 if (irqchip_irq_in_cell(config_cell, 32 + i)) {
368 mask |= (0xff << shift);
369 bits |= (cpu_itf << shift);
372 /* ITARGETRs have 4 IRQ per register */
373 if ((i + 1) % 4 == 0) {
374 targets = mmio_read32(itargetsr);
377 mmio_write32(itargetsr, targets);