2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
16 #include <asm/control.h>
17 #include <asm/gic_common.h>
18 #include <asm/irqchip.h>
19 #include <asm/percpu.h>
20 #include <asm/platform.h>
21 #include <asm/spinlock.h>
22 #include <asm/traps.h>
24 #define REG_RANGE(base, n, size) \
25 (base) ... ((base) + (n - 1) * (size))
27 extern void *gicd_base;
28 extern unsigned int gicd_size;
30 static DEFINE_SPINLOCK(dist_lock);
33 * Most of the GIC distributor writes only reconfigure the IRQs corresponding to
34 * the bits of the written value, by using separate `set' and `clear' registers.
35 * Such registers can be handled by setting the `is_poke' boolean, which allows
36 * to simply restrict the access->val with the cell configuration mask.
37 * Others, such as the priority registers, will need to be read and written back
38 * with a restricted value, by using the distributor lock.
40 static int restrict_bitmask_access(struct per_cpu *cpu_data,
41 struct mmio_access *access,
42 unsigned int reg_index,
43 unsigned int bits_per_irq,
47 unsigned long access_mask = 0;
49 * In order to avoid division, the number of bits per irq is limited
50 * to powers of 2 for the moment.
52 unsigned long irqs_per_reg = 32 >> ffsl(bits_per_irq);
53 unsigned long spi_bits = (1 << bits_per_irq) - 1;
54 /* First, extract the first interrupt affected by this access */
55 unsigned int first_irq = reg_index * irqs_per_reg;
57 /* For SGIs or PPIs, let the caller do the mmio access */
58 if (!is_spi(first_irq))
59 return TRAP_UNHANDLED;
61 /* For SPIs, compare against the cell config mask */
63 for (spi = first_irq; spi < first_irq + irqs_per_reg; spi++) {
64 unsigned int bit_nr = (spi - first_irq) * bits_per_irq;
65 if (spi_in_cell(cpu_data->cell, spi))
66 access_mask |= spi_bits << bit_nr;
69 if (!access->is_write) {
70 /* Restrict the read value */
71 arch_mmio_access(access);
72 access->val &= access_mask;
78 * Modify the existing value of this register by first reading
80 * Relies on a spinlock since we need two mmio accesses.
82 unsigned long access_val = access->val;
84 spin_lock(&dist_lock);
86 access->is_write = false;
87 arch_mmio_access(access);
88 access->is_write = true;
91 access->val &= ~(access_mask & ~access_val);
92 access->val |= access_val;
93 arch_mmio_access(access);
95 spin_unlock(&dist_lock);
99 access->val &= access_mask;
101 return TRAP_UNHANDLED;
106 * GICv3 uses a 64bit register IROUTER for each IRQ
108 static int handle_irq_route(struct per_cpu *cpu_data,
109 struct mmio_access *access, unsigned int irq)
111 struct cell *cell = cpu_data->cell;
114 /* Ignore aff3 on AArch32 (return 0) */
115 if (access->size == 4 && (access->addr % 8))
118 /* SGIs and PPIs are res0 */
123 * Ignore accesses to SPIs that do not belong to the cell. This isn't
124 * forbidden, because the guest driver may simply iterate over all
125 * registers at initialisation
127 if (!spi_in_cell(cell, irq - 32))
130 /* Translate the virtual cpu id into the physical one */
131 if (access->is_write) {
132 access->val = arm_cpu_virt2phys(cell, access->val);
133 if (access->val == -1) {
134 printk("Attempt to route IRQ%d outside of cell\n", irq);
135 return TRAP_FORBIDDEN;
137 /* And do the access */
138 return TRAP_UNHANDLED;
140 cpu = mmio_read32(gicd_base + GICD_IROUTER + 8 * irq);
141 access->val = arm_cpu_phys2virt(cpu);
146 int gic_handle_sgir_write(struct per_cpu *cpu_data, struct sgi *sgi,
150 unsigned long targets;
151 unsigned int this_cpu = cpu_data->cpu_id;
152 struct cell *cell = cpu_data->cell;
153 bool is_target = false;
155 targets = sgi->targets;
158 /* Filter the targets */
159 for_each_cpu_except(cpu, cell->cpu_set, this_cpu) {
161 is_target = !!test_bit(arm_cpu_phys2virt(cpu),
164 if (sgi->routing_mode == 0 && !is_target)
167 irqchip_set_pending(per_cpu(cpu), sgi->id, false);
168 sgi->targets |= (1 << cpu);
171 /* Let the other CPUS inject their SGIs */
172 sgi->id = SGI_INJECT;
173 irqchip_send_sgi(sgi);
178 int gic_handle_dist_access(struct per_cpu *cpu_data,
179 struct mmio_access *access)
182 unsigned long reg = access->addr - (unsigned long)gicd_base;
185 case REG_RANGE(GICD_IROUTER, 1024, 8):
186 ret = handle_irq_route(cpu_data, access,
187 (reg - GICD_IROUTER) / 8);
190 case REG_RANGE(GICD_ICENABLER, 32, 4):
191 case REG_RANGE(GICD_ISENABLER, 32, 4):
192 case REG_RANGE(GICD_ICPENDR, 32, 4):
193 case REG_RANGE(GICD_ISPENDR, 32, 4):
194 case REG_RANGE(GICD_ICACTIVER, 32, 4):
195 case REG_RANGE(GICD_ISACTIVER, 32, 4):
196 ret = restrict_bitmask_access(cpu_data, access,
197 (reg & 0x7f) / 4, 1, true);
200 case REG_RANGE(GICD_IGROUPR, 32, 4):
201 ret = restrict_bitmask_access(cpu_data, access,
202 (reg & 0x7f) / 4, 1, false);
205 case REG_RANGE(GICD_ICFGR, 64, 4):
206 ret = restrict_bitmask_access(cpu_data, access,
207 (reg & 0xff) / 4, 2, false);
210 case REG_RANGE(GICD_IPRIORITYR, 255, 4):
211 ret = restrict_bitmask_access(cpu_data, access,
212 (reg & 0x3ff) / 4, 8, false);
218 case REG_RANGE(GICD_PIDR0, 4, 4):
219 case REG_RANGE(GICD_PIDR4, 4, 4):
220 case REG_RANGE(GICD_CIDR0, 4, 4):
221 /* Allow read access, ignore write */
222 ret = (access->is_write ? TRAP_HANDLED : TRAP_UNHANDLED);
230 /* The sub-handlers return TRAP_UNHANDLED to allow the access */
231 if (ret == TRAP_UNHANDLED) {
232 arch_mmio_access(access);
239 void gic_handle_irq(struct per_cpu *cpu_data)
241 bool handled = false;
245 /* Read IAR1: set 'active' state */
246 irq_id = gic_read_iar();
248 if (irq_id == 0x3ff) /* Spurious IRQ */
252 if (is_sgi(irq_id)) {
253 arch_handle_sgi(cpu_data, irq_id);
256 handled = arch_handle_phys_irq(cpu_data, irq_id);
260 * Write EOIR1: drop priority, but stay active if handled is
262 * This allows to not be re-interrupted by a level-triggered
263 * interrupt that needs handling in the guest (e.g. timer)
265 irqchip_eoi_irq(irq_id, handled);