2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
5 * Copyright (c) Siemens AG, 2016
8 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * Jan Kiszka <jan.kiszka@siemens.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #include <jailhouse/entry.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/printk.h>
19 #include <jailhouse/string.h>
20 #include <asm/control.h>
21 #include <asm/gic_common.h>
22 #include <asm/irqchip.h>
23 #include <asm/platform.h>
24 #include <asm/setup.h>
25 #include <asm/sysregs.h>
28 #define AMBA_DEVICE 0xb105f00d
30 #define for_each_irqchip(chip, config, counter) \
31 for ((chip) = jailhouse_cell_irqchips(config), (counter) = 0; \
32 (counter) < (config)->num_irqchips; \
33 (chip)++, (counter)++)
35 extern struct irqchip_ops irqchip;
38 unsigned long gicd_size;
41 * The init function must be called after the MMU setup, and whilst in the
42 * per-cpu setup, which means that a bool must be set by the master CPU
44 static bool irqchip_is_init;
46 bool irqchip_irq_in_cell(struct cell *cell, unsigned int irq_id)
48 if (irq_id >= sizeof(cell->arch.irq_bitmap) * 8)
51 return (cell->arch.irq_bitmap[irq_id / 32] & (1 << (irq_id % 32))) != 0;
54 void irqchip_set_pending(struct per_cpu *cpu_data, u16 irq_id)
56 bool local_injection = (this_cpu_data() == cpu_data);
57 unsigned int new_tail;
59 if (local_injection && irqchip.inject_irq(cpu_data, irq_id) != -EBUSY)
62 spin_lock(&cpu_data->pending_irqs_lock);
64 new_tail = (cpu_data->pending_irqs_tail + 1) % MAX_PENDING_IRQS;
66 /* Queue space available? */
67 if (new_tail != cpu_data->pending_irqs_head) {
68 cpu_data->pending_irqs[cpu_data->pending_irqs_tail] = irq_id;
69 cpu_data->pending_irqs_tail = new_tail;
71 * Make the change to pending_irqs_tail visible before the
72 * caller sends SGI_INJECT.
77 spin_unlock(&cpu_data->pending_irqs_lock);
80 * The list registers are full, trigger maintenance interrupt if we are
81 * on the target CPU. In the other case, the caller will send a
82 * SGI_INJECT, and irqchip_inject_pending will take care.
85 irqchip.enable_maint_irq(true);
88 void irqchip_inject_pending(struct per_cpu *cpu_data)
92 while (cpu_data->pending_irqs_head != cpu_data->pending_irqs_tail) {
93 irq_id = cpu_data->pending_irqs[cpu_data->pending_irqs_head];
95 if (irqchip.inject_irq(cpu_data, irq_id) == -EBUSY) {
97 * The list registers are full, trigger maintenance
98 * interrupt and leave.
100 irqchip.enable_maint_irq(true);
104 cpu_data->pending_irqs_head =
105 (cpu_data->pending_irqs_head + 1) % MAX_PENDING_IRQS;
109 * The software interrupt queue is empty - turn off the maintenance
112 irqchip.enable_maint_irq(false);
115 void irqchip_handle_irq(struct per_cpu *cpu_data)
117 irqchip.handle_irq(cpu_data);
120 void irqchip_eoi_irq(u32 irqn, bool deactivate)
122 irqchip.eoi_irq(irqn, deactivate);
125 int irqchip_send_sgi(struct sgi *sgi)
127 return irqchip.send_sgi(sgi);
130 int irqchip_cpu_init(struct per_cpu *cpu_data)
132 return irqchip.cpu_init(cpu_data);
135 int irqchip_cpu_reset(struct per_cpu *cpu_data)
137 cpu_data->pending_irqs_head = cpu_data->pending_irqs_tail = 0;
139 return irqchip.cpu_reset(cpu_data, false);
142 void irqchip_cpu_shutdown(struct per_cpu *cpu_data)
145 * The GIC backend must take care of only resetting the hyp interface if
146 * it has been initialised: this function may be executed during the
149 irqchip.cpu_reset(cpu_data, true);
152 int irqchip_cell_init(struct cell *cell)
154 const struct jailhouse_irqchip *chip;
157 for_each_irqchip(chip, cell->config, n) {
158 if (chip->address != (unsigned long)gicd_base)
160 if (chip->pin_base % 32 != 0 ||
161 chip->pin_base + sizeof(chip->pin_bitmap) * 8 >
162 sizeof(cell->arch.irq_bitmap) * 8)
163 return trace_error(-EINVAL);
164 memcpy(&cell->arch.irq_bitmap[chip->pin_base / 32],
165 chip->pin_bitmap, sizeof(chip->pin_bitmap));
168 * Permit direct access to all SGIs and PPIs except for those used by
171 cell->arch.irq_bitmap[0] = ~((1 << SGI_INJECT) | (1 << SGI_CPU_OFF) |
172 (1 << MAINTENANCE_IRQ));
174 return irqchip.cell_init(cell);
177 void irqchip_cell_exit(struct cell *cell)
179 const struct jailhouse_irqchip *chip;
182 /* might be called by arch_shutdown while rolling back
184 if (!irqchip_is_init)
187 /* set all pins of the old cell in the root cell */
188 for_each_irqchip(chip, cell->config, n) {
189 if (chip->address != (unsigned long)gicd_base)
191 for (pos = 0; pos < ARRAY_SIZE(chip->pin_bitmap); pos++)
192 root_cell.arch.irq_bitmap[chip->pin_base / 32] |=
193 chip->pin_bitmap[pos];
196 /* mask out pins again that actually didn't belong to the root cell */
197 for_each_irqchip(chip, root_cell.config, n) {
198 if (chip->address != (unsigned long)gicd_base)
200 for (pos = 0; pos < ARRAY_SIZE(chip->pin_bitmap); pos++)
201 root_cell.arch.irq_bitmap[chip->pin_base / 32] &=
202 chip->pin_bitmap[pos];
205 if (irqchip.cell_exit)
206 irqchip.cell_exit(cell);
209 void irqchip_root_cell_shrink(struct cell *cell)
211 const struct jailhouse_irqchip *irqchip;
214 for_each_irqchip(irqchip, cell->config, n) {
215 if (irqchip->address != (unsigned long)gicd_base)
217 for (pos = 0; pos < ARRAY_SIZE(irqchip->pin_bitmap); pos++)
218 root_cell.arch.irq_bitmap[irqchip->pin_base / 32] &=
219 ~irqchip->pin_bitmap[pos];
223 int irqchip_init(void)
229 /* Only executed on master CPU */
233 /* FIXME: parse device tree */
234 gicd_base = GICD_BASE;
235 gicd_size = GICD_SIZE;
237 if ((err = arch_map_device(gicd_base, gicd_base, gicd_size)) != 0)
240 for (i = 3; i >= 0; i--) {
241 cidr = mmio_read32(gicd_base + GICD_CIDR0 + i * 4);
242 dev_id |= cidr << i * 8;
244 if (dev_id != AMBA_DEVICE)
245 goto err_no_distributor;
247 /* Probe the GIC version */
248 pidr2 = mmio_read32(gicd_base + GICD_PIDR2);
249 switch (GICD_PIDR2_ARCH(pidr2)) {
255 goto err_no_distributor;
259 err = irqchip.init();
260 irqchip_is_init = true;
266 printk("GIC: no supported distributor found\n");
267 arch_unmap_device(gicd_base, gicd_size);