]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/irqchip.c
arm: Reject unknown GIC versions
[jailhouse.git] / hypervisor / arch / arm / irqchip.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/entry.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/paging.h>
16 #include <jailhouse/printk.h>
17 #include <jailhouse/string.h>
18 #include <asm/gic_common.h>
19 #include <asm/irqchip.h>
20 #include <asm/platform.h>
21 #include <asm/setup.h>
22 #include <asm/sysregs.h>
23
24 /* AMBA's biosfood */
25 #define AMBA_DEVICE     0xb105f00d
26
27 void *gicd_base;
28 unsigned long gicd_size;
29
30 /*
31  * The init function must be called after the MMU setup, and whilst in the
32  * per-cpu setup, which means that a bool must be set by the master CPU
33  */
34 static bool irqchip_is_init;
35 static struct irqchip_ops irqchip;
36
37 bool spi_in_cell(struct cell *cell, unsigned int spi)
38 {
39         /* FIXME: Change the configuration to a bitmask range */
40         u32 spi_mask;
41
42         if (spi >= 64)
43                 return false;
44         else if (spi >= 32)
45                 spi_mask = cell->arch.spis >> 32;
46         else
47                 spi_mask = cell->arch.spis;
48
49         return spi_mask & (1 << (spi & 31));
50 }
51
52 void irqchip_set_pending(struct per_cpu *cpu_data, u16 irq_id)
53 {
54         bool local_injection = (this_cpu_data() == cpu_data);
55         unsigned int new_tail;
56
57         if (local_injection && irqchip.inject_irq(cpu_data, irq_id) != -EBUSY)
58                 return;
59
60         spin_lock(&cpu_data->pending_irqs_lock);
61
62         new_tail = (cpu_data->pending_irqs_tail + 1) % MAX_PENDING_IRQS;
63
64         /* Queue space available? */
65         if (new_tail != cpu_data->pending_irqs_head) {
66                 cpu_data->pending_irqs[cpu_data->pending_irqs_tail] = irq_id;
67                 cpu_data->pending_irqs_tail = new_tail;
68                 /*
69                  * Make the change to pending_irqs_tail visible before the
70                  * caller sends SGI_INJECT.
71                  */
72                 memory_barrier();
73         }
74
75         spin_unlock(&cpu_data->pending_irqs_lock);
76
77         /*
78          * The list registers are full, trigger maintenance interrupt if we are
79          * on the target CPU. In the other case, the caller will send a
80          * SGI_INJECT, and irqchip_inject_pending will take care.
81          */
82         if (local_injection)
83                 irqchip.enable_maint_irq(true);
84 }
85
86 void irqchip_inject_pending(struct per_cpu *cpu_data)
87 {
88         u16 irq_id;
89
90         while (cpu_data->pending_irqs_head != cpu_data->pending_irqs_tail) {
91                 irq_id = cpu_data->pending_irqs[cpu_data->pending_irqs_head];
92
93                 if (irqchip.inject_irq(cpu_data, irq_id) == -EBUSY) {
94                         /*
95                          * The list registers are full, trigger maintenance
96                          * interrupt and leave.
97                          */
98                         irqchip.enable_maint_irq(true);
99                         return;
100                 }
101
102                 cpu_data->pending_irqs_head =
103                         (cpu_data->pending_irqs_head + 1) % MAX_PENDING_IRQS;
104         }
105
106         /*
107          * The software interrupt queue is empty - turn off the maintenance
108          * interrupt.
109          */
110         irqchip.enable_maint_irq(false);
111 }
112
113 void irqchip_handle_irq(struct per_cpu *cpu_data)
114 {
115         irqchip.handle_irq(cpu_data);
116 }
117
118 void irqchip_eoi_irq(u32 irqn, bool deactivate)
119 {
120         irqchip.eoi_irq(irqn, deactivate);
121 }
122
123 int irqchip_send_sgi(struct sgi *sgi)
124 {
125         return irqchip.send_sgi(sgi);
126 }
127
128 int irqchip_cpu_init(struct per_cpu *cpu_data)
129 {
130         return irqchip.cpu_init(cpu_data);
131 }
132
133 int irqchip_cpu_reset(struct per_cpu *cpu_data)
134 {
135         cpu_data->pending_irqs_head = cpu_data->pending_irqs_tail = 0;
136
137         return irqchip.cpu_reset(cpu_data, false);
138 }
139
140 void irqchip_cpu_shutdown(struct per_cpu *cpu_data)
141 {
142         /*
143          * The GIC backend must take care of only resetting the hyp interface if
144          * it has been initialised: this function may be executed during the
145          * setup phase.
146          */
147         irqchip.cpu_reset(cpu_data, true);
148 }
149
150 static const struct jailhouse_irqchip *
151 irqchip_find_config(struct jailhouse_cell_desc *config)
152 {
153         const struct jailhouse_irqchip *irq_config =
154                 jailhouse_cell_irqchips(config);
155
156         if (config->num_irqchips)
157                 return irq_config;
158         else
159                 return NULL;
160 }
161
162 int irqchip_cell_init(struct cell *cell)
163 {
164         const struct jailhouse_irqchip *pins = irqchip_find_config(cell->config);
165
166         cell->arch.spis = (pins ? pins->pin_bitmap : 0);
167
168         return irqchip.cell_init(cell);
169 }
170
171 void irqchip_cell_exit(struct cell *cell)
172 {
173         const struct jailhouse_irqchip *root_pins =
174                 irqchip_find_config(root_cell.config);
175
176         /* might be called by arch_shutdown while rolling back
177          * a failed setup */
178         if (!irqchip_is_init)
179                 return;
180
181         if (root_pins)
182                 root_cell.arch.spis |= cell->arch.spis & root_pins->pin_bitmap;
183
184         irqchip.cell_exit(cell);
185 }
186
187 void irqchip_root_cell_shrink(struct cell *cell)
188 {
189         root_cell.arch.spis &= ~(cell->arch.spis);
190 }
191
192 /* Only the GIC is implemented */
193 extern struct irqchip_ops gic_irqchip;
194
195 int irqchip_init(void)
196 {
197         int i, err;
198         u32 pidr2, cidr;
199         u32 dev_id = 0;
200
201         /* Only executed on master CPU */
202         if (irqchip_is_init)
203                 return 0;
204
205         /* FIXME: parse device tree */
206         gicd_base = GICD_BASE;
207         gicd_size = GICD_SIZE;
208
209         if ((err = arch_map_device(gicd_base, gicd_base, gicd_size)) != 0)
210                 return err;
211
212         for (i = 3; i >= 0; i--) {
213                 cidr = mmio_read32(gicd_base + GICD_CIDR0 + i * 4);
214                 dev_id |= cidr << i * 8;
215         }
216         if (dev_id != AMBA_DEVICE)
217                 goto err_no_distributor;
218
219         /* Probe the GIC version */
220         pidr2 = mmio_read32(gicd_base + GICD_PIDR2);
221         switch (GICD_PIDR2_ARCH(pidr2)) {
222         case 0x2:
223         case 0x3:
224         case 0x4:
225                 memcpy(&irqchip, &gic_irqchip, sizeof(struct irqchip_ops));
226                 break;
227         default:
228                 goto err_no_distributor;
229         }
230
231         if (irqchip.init) {
232                 err = irqchip.init();
233                 irqchip_is_init = true;
234
235                 return err;
236         }
237
238 err_no_distributor:
239         printk("GIC: no supported distributor found\n");
240         arch_unmap_device(gicd_base, gicd_size);
241
242         return -ENODEV;
243 }