]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/gic-v3.c
arm: GICv3: handle IRQs
[jailhouse.git] / hypervisor / arch / arm / gic-v3.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/mmio.h>
15 #include <jailhouse/printk.h>
16 #include <jailhouse/processor.h>
17 #include <jailhouse/types.h>
18 #include <asm/control.h>
19 #include <asm/gic_common.h>
20 #include <asm/irqchip.h>
21 #include <asm/platform.h>
22 #include <asm/setup.h>
23
24 /*
25  * This implementation assumes that the kernel driver already initialised most
26  * of the GIC.
27  * There is almost no instruction barrier, since IRQs are always disabled in the
28  * hyp, and ERET serves as the context synchronization event.
29  */
30
31 static unsigned int gic_num_lr;
32
33 static void *gicr_base;
34 static unsigned int gicr_size;
35
36 static int gic_init(void)
37 {
38         int err;
39
40         /* FIXME: parse a dt */
41         gicr_base = GICR_BASE;
42         gicr_size = GICR_SIZE;
43
44         /* Let the per-cpu code access the redistributors */
45         err = arch_map_device(gicr_base, gicr_base, gicr_size);
46
47         return err;
48 }
49
50 static int gic_cpu_init(struct per_cpu *cpu_data)
51 {
52         u64 typer;
53         u32 pidr;
54         u32 gic_version;
55         u32 cell_icc_ctlr, cell_icc_pmr, cell_icc_igrpen1;
56         u32 ich_vtr;
57         u32 ich_vmcr;
58         void *redist_base = gicr_base;
59
60         /* Find redistributor */
61         do {
62                 pidr = mmio_read32(redist_base + GICR_PIDR2);
63                 gic_version = GICR_PIDR2_ARCH(pidr);
64                 if (gic_version != 3 && gic_version != 4)
65                         break;
66
67                 typer = mmio_read64(redist_base + GICR_TYPER);
68                 if ((typer >> 32) == cpu_data->cpu_id) {
69                         cpu_data->gicr_base = redist_base;
70                         break;
71                 }
72
73                 redist_base += 0x20000;
74                 if (gic_version == 4)
75                         redist_base += 0x20000;
76         } while (!(typer & GICR_TYPER_Last));
77
78         if (cpu_data->gicr_base == 0) {
79                 printk("GIC: No redist found for CPU%d\n", cpu_data->cpu_id);
80                 return -ENODEV;
81         }
82
83         /* Ensure all IPIs are enabled */
84         mmio_write32(redist_base + GICR_SGI_BASE + GICR_ISENABLER, 0x0000ffff);
85
86         /*
87          * Set EOIMode to 1
88          * This allow to drop the priority of level-triggered interrupts without
89          * deactivating them, and thus ensure that they won't be immediately
90          * re-triggered. (e.g. timer)
91          * They can then be injected into the guest using the LR.HW bit, and
92          * will be deactivated once the guest does an EOI after handling the
93          * interrupt source.
94          */
95         arm_read_sysreg(ICC_CTLR_EL1, cell_icc_ctlr);
96         arm_write_sysreg(ICC_CTLR_EL1, ICC_CTLR_EOImode);
97
98         arm_read_sysreg(ICC_PMR_EL1, cell_icc_pmr);
99         arm_write_sysreg(ICC_PMR_EL1, ICC_PMR_DEFAULT);
100
101         arm_read_sysreg(ICC_IGRPEN1_EL1, cell_icc_igrpen1);
102         arm_write_sysreg(ICC_IGRPEN1_EL1, ICC_IGRPEN1_EN);
103
104         arm_read_sysreg(ICH_VTR_EL2, ich_vtr);
105         gic_num_lr = (ich_vtr & 0xf) + 1;
106
107         ich_vmcr = (cell_icc_pmr & ICC_PMR_MASK) << ICH_VMCR_VPMR_SHIFT;
108         if (cell_icc_igrpen1 & ICC_IGRPEN1_EN)
109                 ich_vmcr |= ICH_VMCR_VENG1;
110         if (cell_icc_ctlr & ICC_CTLR_EOImode)
111                 ich_vmcr |= ICH_VMCR_VEOIM;
112         arm_write_sysreg(ICH_VMCR_EL2, ich_vmcr);
113
114         /* After this, the cells access the virtual interface of the GIC. */
115         arm_write_sysreg(ICH_HCR_EL2, ICH_HCR_EN);
116
117         return 0;
118 }
119
120 static int gic_send_sgi(struct sgi *sgi)
121 {
122         u64 val;
123         u16 targets = sgi->targets;
124
125         if (!is_sgi(sgi->id))
126                 return -EINVAL;
127
128         if (sgi->routing_mode == 2)
129                 targets = 1 << phys_processor_id();
130
131         val = (u64)sgi->aff3 << ICC_SGIR_AFF3_SHIFT
132             | (u64)sgi->aff2 << ICC_SGIR_AFF2_SHIFT
133             | sgi->aff1 << ICC_SGIR_AFF1_SHIFT
134             | (targets & ICC_SGIR_TARGET_MASK)
135             | (sgi->id & 0xf) << ICC_SGIR_IRQN_SHIFT;
136
137         if (sgi->routing_mode == 1)
138                 val |= ICC_SGIR_ROUTING_BIT;
139
140         /*
141          * Ensure the targets see our modifications to their per-cpu
142          * structures.
143          */
144         dsb(ish);
145
146         arm_write_sysreg(ICC_SGI1R_EL1, val);
147         isb();
148
149         return 0;
150 }
151
152 /*
153  * Handle the maintenance interrupt, the rest is injected into the cell.
154  * Return true when the IRQ has been handled by the hyp.
155  */
156 static bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
157 {
158         if (irqn == MAINTENANCE_IRQ) {
159                 irqchip_inject_pending(cpu_data);
160                 return true;
161         }
162
163         irqchip_set_pending(cpu_data, irqn, true);
164
165         return false;
166 }
167
168 static void gic_handle_irq(struct per_cpu *cpu_data)
169 {
170         bool handled = false;
171         u32 irq_id;
172
173         while (1) {
174                 /* Read ICC_IAR1: set 'active' state */
175                 arm_read_sysreg(ICC_IAR1_EL1, irq_id);
176
177                 if (irq_id == 0x3ff) /* Spurious IRQ */
178                         break;
179
180                 /* Handle IRQ */
181                 if (is_sgi(irq_id)) {
182                         arch_handle_sgi(cpu_data, irq_id);
183                         handled = true;
184                 } else {
185                         handled = arch_handle_phys_irq(cpu_data, irq_id);
186                 }
187
188                 /*
189                  * Write ICC_EOIR1: drop priority, but stay active if handled is
190                  * false.
191                  * This allows to not be re-interrupted by a level-triggered
192                  * interrupt that needs handling in the guest (e.g. timer)
193                  */
194                 arm_write_sysreg(ICC_EOIR1_EL1, irq_id);
195                 /* Deactivate if necessary */
196                 if (handled)
197                         arm_write_sysreg(ICC_DIR_EL1, irq_id);
198         }
199 }
200
201 static int gic_inject_irq(struct per_cpu *cpu_data, struct pending_irq *irq)
202 {
203         int i;
204         int free_lr = -1;
205         u32 elsr;
206         u64 lr;
207
208         arm_read_sysreg(ICH_ELSR_EL2, elsr);
209         for (i = 0; i < gic_num_lr; i++) {
210                 if ((elsr >> i) & 1) {
211                         /* Entry is invalid, candidate for injection */
212                         if (free_lr == -1)
213                                 free_lr = i;
214                         continue;
215                 }
216
217                 /*
218                  * Entry is in use, check that it doesn't match the one we want
219                  * to inject.
220                  */
221                 lr = gic_read_lr(i);
222
223                 /*
224                  * A strict phys->virt id mapping is used for SPIs, so this test
225                  * should be sufficient.
226                  */
227                 if ((u32)lr == irq->virt_id)
228                         return -EINVAL;
229         }
230
231         if (free_lr == -1) {
232                 u32 hcr;
233                 /*
234                  * All list registers are in use, trigger a maintenance
235                  * interrupt once they are available again.
236                  */
237                 arm_read_sysreg(ICH_HCR_EL2, hcr);
238                 hcr |= ICH_HCR_UIE;
239                 arm_write_sysreg(ICH_HCR_EL2, hcr);
240
241                 return -EBUSY;
242         }
243
244         lr = irq->virt_id;
245         /* Only group 1 interrupts */
246         lr |= ICH_LR_GROUP_BIT;
247         lr |= ICH_LR_PENDING;
248         if (irq->hw) {
249                 lr |= ICH_LR_HW_BIT;
250                 lr |= (u64)irq->type.irq << ICH_LR_PHYS_ID_SHIFT;
251         } else if (irq->type.sgi.maintenance) {
252                 lr |= ICH_LR_SGI_EOI;
253         }
254
255         gic_write_lr(free_lr, lr);
256
257         return 0;
258 }
259
260 struct irqchip_ops gic_irqchip = {
261         .init = gic_init,
262         .cpu_init = gic_cpu_init,
263         .send_sgi = gic_send_sgi,
264         .handle_irq = gic_handle_irq,
265         .inject_irq = gic_inject_irq,
266 };