]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/irqchip.c
arm: GICv3: handle IRQs
[jailhouse.git] / hypervisor / arch / arm / irqchip.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/entry.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/paging.h>
16 #include <jailhouse/printk.h>
17 #include <jailhouse/string.h>
18 #include <asm/gic_common.h>
19 #include <asm/irqchip.h>
20 #include <asm/platform.h>
21 #include <asm/setup.h>
22 #include <asm/sysregs.h>
23
24 /* AMBA's biosfood */
25 #define AMBA_DEVICE     0xb105f00d
26
27 void *gicd_base;
28 unsigned long gicd_size;
29
30 /*
31  * The init function must be called after the MMU setup, and whilst in the
32  * per-cpu setup, which means that a bool must be set by the master CPU
33  */
34 static bool irqchip_is_init;
35 static struct irqchip_ops irqchip;
36
37 static int irqchip_init_pending(struct per_cpu *cpu_data)
38 {
39         struct pending_irq *pend_array = page_alloc(&mem_pool, 1);
40
41         if (pend_array == NULL)
42                 return -ENOMEM;
43         memset(pend_array, 0, PAGE_SIZE);
44
45         cpu_data->pending_irqs = pend_array;
46         cpu_data->first_pending = NULL;
47
48         return 0;
49 }
50
51 /*
52  * Find the first available pending struct for insertion. The `prev' pointer is
53  * set to the previous pending interrupt, if any, to help inserting the new one
54  * into the list.
55  * Returns NULL when no slot is available
56  */
57 static struct pending_irq* get_pending_slot(struct per_cpu *cpu_data,
58                                             struct pending_irq **prev)
59 {
60         u32 i, pending_idx;
61         struct pending_irq *pending = cpu_data->first_pending;
62
63         *prev = NULL;
64
65         for (i = 0; i < MAX_PENDING_IRQS; i++) {
66                 pending_idx = pending - cpu_data->pending_irqs;
67                 if (pending == NULL || i < pending_idx)
68                         return cpu_data->pending_irqs + i;
69
70                 *prev = pending;
71                 pending = pending->next;
72         }
73
74         return NULL;
75 }
76
77 int irqchip_insert_pending(struct per_cpu *cpu_data, struct pending_irq *irq)
78 {
79         struct pending_irq *prev = NULL;
80         struct pending_irq *slot;
81
82         spin_lock(&cpu_data->gic_lock);
83
84         slot = get_pending_slot(cpu_data, &prev);
85         if (slot == NULL) {
86                 spin_unlock(&cpu_data->gic_lock);
87                 return -ENOMEM;
88         }
89
90         /*
91          * Don't override the pointers yet, they may be read by the injection
92          * loop. Odds are astronomically low, but hey.
93          */
94         memcpy(slot, irq, sizeof(struct pending_irq) - 2 * sizeof(void *));
95         slot->prev = prev;
96         if (prev) {
97                 slot->next = prev->next;
98                 prev->next = slot;
99         } else {
100                 slot->next = cpu_data->first_pending;
101                 cpu_data->first_pending = slot;
102         }
103         if (slot->next)
104                 slot->next->prev = slot;
105
106         spin_unlock(&cpu_data->gic_lock);
107
108         return 0;
109 }
110
111 int irqchip_set_pending(struct per_cpu *cpu_data, u32 irq_id, bool try_inject)
112 {
113         struct pending_irq pending;
114
115         pending.virt_id = irq_id;
116         /* Priority must be less than ICC_PMR */
117         pending.priority = 0;
118
119         if (is_sgi(irq_id)) {
120                 pending.hw = 0;
121                 pending.type.sgi.maintenance = 0;
122                 pending.type.sgi.cpuid = 0;
123         } else {
124                 pending.hw = 1;
125                 pending.type.irq = irq_id;
126         }
127
128         if (try_inject && irqchip.inject_irq(cpu_data, &pending) == 0)
129                 return 0;
130
131         return irqchip_insert_pending(cpu_data, &pending);
132 }
133
134 /*
135  * Only executed by `irqchip_inject_pending' on a CPU to inject its own stuff.
136  */
137 int irqchip_remove_pending(struct per_cpu *cpu_data, struct pending_irq *irq)
138 {
139         spin_lock(&cpu_data->gic_lock);
140
141         if (cpu_data->first_pending == irq)
142                 cpu_data->first_pending = irq->next;
143         if (irq->prev)
144                 irq->prev->next = irq->next;
145         if (irq->next)
146                 irq->next->prev = irq->prev;
147
148         spin_unlock(&cpu_data->gic_lock);
149
150         return 0;
151 }
152
153 int irqchip_inject_pending(struct per_cpu *cpu_data)
154 {
155         int err;
156         struct pending_irq *pending = cpu_data->first_pending;
157
158         while (pending != NULL) {
159                 err = irqchip.inject_irq(cpu_data, pending);
160                 if (err == -EBUSY)
161                         /* The list registers are full. */
162                         break;
163                 else
164                         /*
165                          * Removal only changes the pointers, but does not
166                          * deallocate anything.
167                          * Concurrent accesses are avoided with the spinlock,
168                          * but the `next' pointer of the current pending object
169                          * may be rewritten by an external insert before or
170                          * after this removal, which isn't an issue.
171                          */
172                         irqchip_remove_pending(cpu_data, pending);
173
174                 pending = pending->next;
175         }
176
177         return 0;
178 }
179
180 void irqchip_handle_irq(struct per_cpu *cpu_data)
181 {
182         irqchip.handle_irq(cpu_data);
183 }
184
185 int irqchip_send_sgi(struct sgi *sgi)
186 {
187         return irqchip.send_sgi(sgi);
188 }
189
190 int irqchip_cpu_init(struct per_cpu *cpu_data)
191 {
192         int err;
193
194         err = irqchip_init_pending(cpu_data);
195         if (err)
196                 return err;
197
198         if (irqchip.cpu_init)
199                 return irqchip.cpu_init(cpu_data);
200
201         return 0;
202 }
203
204 /* Only the GIC is implemented */
205 extern struct irqchip_ops gic_irqchip;
206
207 int irqchip_init(void)
208 {
209         int i, err;
210         u32 pidr2, cidr;
211         u32 dev_id = 0;
212
213         /* Only executed on master CPU */
214         if (irqchip_is_init)
215                 return 0;
216
217         /* FIXME: parse device tree */
218         gicd_base = GICD_BASE;
219         gicd_size = GICD_SIZE;
220
221         if ((err = arch_map_device(gicd_base, gicd_base, gicd_size)) != 0)
222                 return err;
223
224         for (i = 3; i >= 0; i--) {
225                 cidr = mmio_read32(gicd_base + GICD_CIDR0 + i * 4);
226                 dev_id |= cidr << i * 8;
227         }
228         if (dev_id != AMBA_DEVICE)
229                 goto err_no_distributor;
230
231         /* Probe the GIC version */
232         pidr2 = mmio_read32(gicd_base + GICD_PIDR2);
233         switch (GICD_PIDR2_ARCH(pidr2)) {
234         case 0x2:
235                 break;
236         case 0x3:
237         case 0x4:
238                 memcpy(&irqchip, &gic_irqchip, sizeof(struct irqchip_ops));
239                 break;
240         }
241
242         if (irqchip.init) {
243                 err = irqchip.init();
244                 irqchip_is_init = true;
245
246                 return err;
247         }
248
249 err_no_distributor:
250         printk("GIC: no distributor found\n");
251         arch_unmap_device(gicd_base, gicd_size);
252
253         return -ENODEV;
254 }