]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/irqchip.c
47673dd34f125845c7d458c5c3a8c2976f69edf6
[jailhouse.git] / hypervisor / arch / arm / irqchip.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  * Copyright (c) Siemens AG, 2016
6  *
7  * Authors:
8  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9  *  Jan Kiszka <jan.kiszka@siemens.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  */
14
15 #include <jailhouse/entry.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/printk.h>
19 #include <jailhouse/string.h>
20 #include <asm/control.h>
21 #include <asm/gic_common.h>
22 #include <asm/irqchip.h>
23 #include <asm/platform.h>
24 #include <asm/setup.h>
25 #include <asm/sysregs.h>
26
27 /* AMBA's biosfood */
28 #define AMBA_DEVICE     0xb105f00d
29
30 #define for_each_irqchip(chip, config, counter)                         \
31         for ((chip) = jailhouse_cell_irqchips(config), (counter) = 0;   \
32              (counter) < (config)->num_irqchips;                        \
33              (chip)++, (counter)++)
34
35 extern struct irqchip_ops irqchip;
36
37 void *gicd_base;
38 unsigned long gicd_size;
39
40 /*
41  * The init function must be called after the MMU setup, and whilst in the
42  * per-cpu setup, which means that a bool must be set by the master CPU
43  */
44 static bool irqchip_is_init;
45
46 bool irqchip_irq_in_cell(struct cell *cell, unsigned int irq_id)
47 {
48         if (irq_id >= sizeof(cell->arch.irq_bitmap) * 8)
49                 return false;
50
51         return (cell->arch.irq_bitmap[irq_id / 32] & (1 << (irq_id % 32))) != 0;
52 }
53
54 void irqchip_set_pending(struct per_cpu *cpu_data, u16 irq_id)
55 {
56         bool local_injection = (this_cpu_data() == cpu_data);
57         unsigned int new_tail;
58
59         if (local_injection && irqchip.inject_irq(cpu_data, irq_id) != -EBUSY)
60                 return;
61
62         spin_lock(&cpu_data->pending_irqs_lock);
63
64         new_tail = (cpu_data->pending_irqs_tail + 1) % MAX_PENDING_IRQS;
65
66         /* Queue space available? */
67         if (new_tail != cpu_data->pending_irqs_head) {
68                 cpu_data->pending_irqs[cpu_data->pending_irqs_tail] = irq_id;
69                 cpu_data->pending_irqs_tail = new_tail;
70                 /*
71                  * Make the change to pending_irqs_tail visible before the
72                  * caller sends SGI_INJECT.
73                  */
74                 memory_barrier();
75         }
76
77         spin_unlock(&cpu_data->pending_irqs_lock);
78
79         /*
80          * The list registers are full, trigger maintenance interrupt if we are
81          * on the target CPU. In the other case, the caller will send a
82          * SGI_INJECT, and irqchip_inject_pending will take care.
83          */
84         if (local_injection)
85                 irqchip.enable_maint_irq(true);
86 }
87
88 void irqchip_inject_pending(struct per_cpu *cpu_data)
89 {
90         u16 irq_id;
91
92         while (cpu_data->pending_irqs_head != cpu_data->pending_irqs_tail) {
93                 irq_id = cpu_data->pending_irqs[cpu_data->pending_irqs_head];
94
95                 if (irqchip.inject_irq(cpu_data, irq_id) == -EBUSY) {
96                         /*
97                          * The list registers are full, trigger maintenance
98                          * interrupt and leave.
99                          */
100                         irqchip.enable_maint_irq(true);
101                         return;
102                 }
103
104                 cpu_data->pending_irqs_head =
105                         (cpu_data->pending_irqs_head + 1) % MAX_PENDING_IRQS;
106         }
107
108         /*
109          * The software interrupt queue is empty - turn off the maintenance
110          * interrupt.
111          */
112         irqchip.enable_maint_irq(false);
113 }
114
115 void irqchip_handle_irq(struct per_cpu *cpu_data)
116 {
117         irqchip.handle_irq(cpu_data);
118 }
119
120 void irqchip_eoi_irq(u32 irqn, bool deactivate)
121 {
122         irqchip.eoi_irq(irqn, deactivate);
123 }
124
125 int irqchip_send_sgi(struct sgi *sgi)
126 {
127         return irqchip.send_sgi(sgi);
128 }
129
130 int irqchip_cpu_init(struct per_cpu *cpu_data)
131 {
132         return irqchip.cpu_init(cpu_data);
133 }
134
135 int irqchip_cpu_reset(struct per_cpu *cpu_data)
136 {
137         cpu_data->pending_irqs_head = cpu_data->pending_irqs_tail = 0;
138
139         return irqchip.cpu_reset(cpu_data, false);
140 }
141
142 void irqchip_cpu_shutdown(struct per_cpu *cpu_data)
143 {
144         /*
145          * The GIC backend must take care of only resetting the hyp interface if
146          * it has been initialised: this function may be executed during the
147          * setup phase.
148          */
149         irqchip.cpu_reset(cpu_data, true);
150 }
151
152 int irqchip_cell_init(struct cell *cell)
153 {
154         const struct jailhouse_irqchip *chip;
155         unsigned int n;
156
157         for_each_irqchip(chip, cell->config, n) {
158                 if (chip->address != (unsigned long)gicd_base)
159                         continue;
160                 if (chip->pin_base % 32 != 0 ||
161                     chip->pin_base + sizeof(chip->pin_bitmap) * 8 >
162                     sizeof(cell->arch.irq_bitmap) * 8)
163                         return trace_error(-EINVAL);
164                 memcpy(&cell->arch.irq_bitmap[chip->pin_base / 32],
165                        chip->pin_bitmap, sizeof(chip->pin_bitmap));
166         }
167         /*
168          * Permit direct access to all SGIs and PPIs except for those used by
169          * the hypervisor.
170          */
171         cell->arch.irq_bitmap[0] = ~((1 << SGI_INJECT) | (1 << SGI_CPU_OFF) |
172                                      (1 << MAINTENANCE_IRQ));
173
174         return irqchip.cell_init(cell);
175 }
176
177 void irqchip_cell_exit(struct cell *cell)
178 {
179         const struct jailhouse_irqchip *chip;
180         unsigned int n, pos;
181
182         /* might be called by arch_shutdown while rolling back
183          * a failed setup */
184         if (!irqchip_is_init)
185                 return;
186
187         /* set all pins of the old cell in the root cell */
188         for_each_irqchip(chip, cell->config, n) {
189                 if (chip->address != (unsigned long)gicd_base)
190                         continue;
191                 for (pos = 0; pos < ARRAY_SIZE(chip->pin_bitmap); pos++)
192                         root_cell.arch.irq_bitmap[chip->pin_base / 32] |=
193                                 chip->pin_bitmap[pos];
194         }
195
196         /* mask out pins again that actually didn't belong to the root cell */
197         for_each_irqchip(chip, root_cell.config, n) {
198                 if (chip->address != (unsigned long)gicd_base)
199                         continue;
200                 for (pos = 0; pos < ARRAY_SIZE(chip->pin_bitmap); pos++)
201                         root_cell.arch.irq_bitmap[chip->pin_base / 32] &=
202                                 chip->pin_bitmap[pos];
203         }
204
205         if (irqchip.cell_exit)
206                 irqchip.cell_exit(cell);
207 }
208
209 void irqchip_root_cell_shrink(struct cell *cell)
210 {
211         const struct jailhouse_irqchip *irqchip;
212         unsigned int n, pos;
213
214         for_each_irqchip(irqchip, cell->config, n) {
215                 if (irqchip->address != (unsigned long)gicd_base)
216                         continue;
217                 for (pos = 0; pos < ARRAY_SIZE(irqchip->pin_bitmap); pos++)
218                         root_cell.arch.irq_bitmap[irqchip->pin_base / 32] &=
219                                 ~irqchip->pin_bitmap[pos];
220         }
221 }
222
223 int irqchip_init(void)
224 {
225         int i, err;
226         u32 pidr2, cidr;
227         u32 dev_id = 0;
228
229         /* Only executed on master CPU */
230         if (irqchip_is_init)
231                 return 0;
232
233         /* FIXME: parse device tree */
234         gicd_base = GICD_BASE;
235         gicd_size = GICD_SIZE;
236
237         if ((err = arch_map_device(gicd_base, gicd_base, gicd_size)) != 0)
238                 return err;
239
240         for (i = 3; i >= 0; i--) {
241                 cidr = mmio_read32(gicd_base + GICD_CIDR0 + i * 4);
242                 dev_id |= cidr << i * 8;
243         }
244         if (dev_id != AMBA_DEVICE)
245                 goto err_no_distributor;
246
247         /* Probe the GIC version */
248         pidr2 = mmio_read32(gicd_base + GICD_PIDR2);
249         switch (GICD_PIDR2_ARCH(pidr2)) {
250         case 0x2:
251         case 0x3:
252         case 0x4:
253                 break;
254         default:
255                 goto err_no_distributor;
256         }
257
258         if (irqchip.init) {
259                 err = irqchip.init();
260                 irqchip_is_init = true;
261
262                 return err;
263         }
264
265 err_no_distributor:
266         printk("GIC: no supported distributor found\n");
267         arch_unmap_device(gicd_base, gicd_size);
268
269         return -ENODEV;
270 }