]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/control.c
arm: Unmap virtual GIC on cell destruction
[jailhouse.git] / hypervisor / arch / arm / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
16 #include <jailhouse/string.h>
17 #include <asm/control.h>
18 #include <asm/irqchip.h>
19 #include <asm/platform.h>
20 #include <asm/processor.h>
21 #include <asm/sysregs.h>
22 #include <asm/traps.h>
23
24 static void arch_reset_el1(struct registers *regs)
25 {
26         u32 sctlr;
27
28         /* Wipe all banked and usr regs */
29         memset(regs, 0, sizeof(struct registers));
30
31         arm_write_banked_reg(SP_usr, 0);
32         arm_write_banked_reg(SP_svc, 0);
33         arm_write_banked_reg(SP_abt, 0);
34         arm_write_banked_reg(SP_und, 0);
35         arm_write_banked_reg(SP_svc, 0);
36         arm_write_banked_reg(SP_irq, 0);
37         arm_write_banked_reg(SP_fiq, 0);
38         arm_write_banked_reg(LR_svc, 0);
39         arm_write_banked_reg(LR_abt, 0);
40         arm_write_banked_reg(LR_und, 0);
41         arm_write_banked_reg(LR_svc, 0);
42         arm_write_banked_reg(LR_irq, 0);
43         arm_write_banked_reg(LR_fiq, 0);
44         arm_write_banked_reg(R8_fiq, 0);
45         arm_write_banked_reg(R9_fiq, 0);
46         arm_write_banked_reg(R10_fiq, 0);
47         arm_write_banked_reg(R11_fiq, 0);
48         arm_write_banked_reg(R12_fiq, 0);
49         arm_write_banked_reg(SPSR_svc, 0);
50         arm_write_banked_reg(SPSR_abt, 0);
51         arm_write_banked_reg(SPSR_und, 0);
52         arm_write_banked_reg(SPSR_svc, 0);
53         arm_write_banked_reg(SPSR_irq, 0);
54         arm_write_banked_reg(SPSR_fiq, 0);
55
56         /* Wipe the system registers */
57         arm_read_sysreg(SCTLR_EL1, sctlr);
58         sctlr = sctlr & ~SCTLR_MASK;
59         arm_write_sysreg(SCTLR_EL1, sctlr);
60         arm_write_sysreg(CPACR_EL1, 0);
61         arm_write_sysreg(CONTEXTIDR_EL1, 0);
62         arm_write_sysreg(PAR_EL1, 0);
63         arm_write_sysreg(TTBR0_EL1, 0);
64         arm_write_sysreg(TTBR1_EL1, 0);
65         arm_write_sysreg(CSSELR_EL1, 0);
66
67         arm_write_sysreg(CNTKCTL_EL1, 0);
68         arm_write_sysreg(CNTP_CTL_EL0, 0);
69         arm_write_sysreg(CNTP_CVAL_EL0, 0);
70         arm_write_sysreg(CNTV_CTL_EL0, 0);
71         arm_write_sysreg(CNTV_CVAL_EL0, 0);
72
73         /* AArch32 specific */
74         arm_write_sysreg(TTBCR, 0);
75         arm_write_sysreg(DACR, 0);
76         arm_write_sysreg(VBAR, 0);
77         arm_write_sysreg(DFSR, 0);
78         arm_write_sysreg(DFAR, 0);
79         arm_write_sysreg(IFSR, 0);
80         arm_write_sysreg(IFAR, 0);
81         arm_write_sysreg(ADFSR, 0);
82         arm_write_sysreg(AIFSR, 0);
83         arm_write_sysreg(MAIR0, 0);
84         arm_write_sysreg(MAIR1, 0);
85         arm_write_sysreg(AMAIR0, 0);
86         arm_write_sysreg(AMAIR1, 0);
87         arm_write_sysreg(TPIDRURW, 0);
88         arm_write_sysreg(TPIDRURO, 0);
89         arm_write_sysreg(TPIDRPRW, 0);
90 }
91
92 void arch_reset_self(struct per_cpu *cpu_data)
93 {
94         int err = 0;
95         unsigned long reset_address;
96         struct cell *cell = cpu_data->cell;
97         struct registers *regs = guest_regs(cpu_data);
98         bool is_shutdown = cpu_data->shutdown;
99
100         if (!is_shutdown)
101                 err = arch_mmu_cpu_cell_init(cpu_data);
102         if (err)
103                 printk("MMU setup failed\n");
104         /*
105          * On the first CPU to reach this, write all cell datas to memory so it
106          * can be started with caches disabled.
107          * On all CPUs, invalidate the instruction caches to take into account
108          * the potential new instructions.
109          */
110         arch_cell_caches_flush(cell);
111
112         /*
113          * We come from the IRQ handler, but we won't return there, so the IPI
114          * is deactivated here.
115          */
116         irqchip_eoi_irq(SGI_CPU_OFF, true);
117
118         if (is_shutdown) {
119 #ifndef CONFIG_MACH_VEXPRESS
120                 if (cell != &root_cell) {
121                         irqchip_cpu_shutdown(cpu_data);
122
123                         smc(PSCI_CPU_OFF, 0, 0, 0);
124                         smc(PSCI_CPU_OFF_V0_1_UBOOT, 0, 0, 0);
125                         printk("FATAL: PSCI_CPU_OFF failed\n");
126                         panic_stop();
127                 }
128 #endif
129                 /* arch_shutdown_self resets the GIC on all remaining CPUs. */
130         } else {
131                 err = irqchip_cpu_reset(cpu_data);
132                 if (err)
133                         printk("IRQ setup failed\n");
134         }
135
136         /* Wait for the driver to call cpu_up */
137         if (cell == &root_cell || is_shutdown)
138                 reset_address = arch_smp_spin(cpu_data, root_cell.arch.smp);
139         else
140                 reset_address = arch_smp_spin(cpu_data, cell->arch.smp);
141
142         /* Set the new MPIDR */
143         arm_write_sysreg(VMPIDR_EL2, cpu_data->virt_id | MPIDR_MP_BIT);
144
145         /* Restore an empty context */
146         arch_reset_el1(regs);
147
148         arm_write_banked_reg(ELR_hyp, reset_address);
149         arm_write_banked_reg(SPSR_hyp, RESET_PSR);
150
151         if (is_shutdown)
152                 /* Won't return here. */
153                 arch_shutdown_self(cpu_data);
154
155         vmreturn(regs);
156 }
157
158 static void arch_suspend_self(struct per_cpu *cpu_data)
159 {
160         psci_suspend(cpu_data);
161
162         if (cpu_data->flush_vcpu_caches)
163                 arch_cpu_tlb_flush(cpu_data);
164 }
165
166 static void arch_dump_exit(const char *reason)
167 {
168         unsigned long pc;
169
170         arm_read_banked_reg(ELR_hyp, pc);
171         printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
172 }
173
174 static void arch_dump_abt(bool is_data)
175 {
176         u32 hxfar;
177         u32 esr;
178
179         arm_read_sysreg(ESR_EL2, esr);
180         if (is_data)
181                 arm_read_sysreg(HDFAR, hxfar);
182         else
183                 arm_read_sysreg(HIFAR, hxfar);
184
185         printk("  paddr=0x%lx esr=0x%x\n", hxfar, esr);
186 }
187
188 struct registers* arch_handle_exit(struct per_cpu *cpu_data,
189                                    struct registers *regs)
190 {
191         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
192
193         switch (regs->exit_reason) {
194         case EXIT_REASON_IRQ:
195                 irqchip_handle_irq(cpu_data);
196                 break;
197         case EXIT_REASON_TRAP:
198                 arch_handle_trap(cpu_data, regs);
199                 break;
200
201         case EXIT_REASON_UNDEF:
202                 arch_dump_exit("undef");
203                 panic_stop();
204         case EXIT_REASON_DABT:
205                 arch_dump_exit("data abort");
206                 arch_dump_abt(true);
207                 panic_stop();
208         case EXIT_REASON_PABT:
209                 arch_dump_exit("prefetch abort");
210                 arch_dump_abt(false);
211                 panic_stop();
212         case EXIT_REASON_HVC:
213                 arch_dump_exit("hvc");
214                 panic_stop();
215         case EXIT_REASON_FIQ:
216                 arch_dump_exit("fiq");
217                 panic_stop();
218         default:
219                 arch_dump_exit("unknown");
220                 panic_stop();
221         }
222
223         if (cpu_data->shutdown)
224                 /* Won't return here. */
225                 arch_shutdown_self(cpu_data);
226
227         return regs;
228 }
229
230 /* CPU must be stopped */
231 void arch_resume_cpu(unsigned int cpu_id)
232 {
233         /*
234          * Simply get out of the spin loop by returning to handle_sgi
235          * If the CPU is being reset, it already has left the PSCI idle loop.
236          */
237         if (psci_cpu_stopped(cpu_id))
238                 psci_resume(cpu_id);
239 }
240
241 /* CPU must be stopped */
242 void arch_park_cpu(unsigned int cpu_id)
243 {
244         struct per_cpu *cpu_data = per_cpu(cpu_id);
245
246         /*
247          * Reset always follows park_cpu, so we just need to make sure that the
248          * CPU is suspended
249          */
250         if (psci_wait_cpu_stopped(cpu_id) != 0)
251                 printk("ERROR: CPU%d is supposed to be stopped\n", cpu_id);
252         else
253                 cpu_data->cell->arch.needs_flush = true;
254 }
255
256 /* CPU must be stopped */
257 void arch_reset_cpu(unsigned int cpu_id)
258 {
259         unsigned long cpu_data = (unsigned long)per_cpu(cpu_id);
260
261         if (psci_cpu_on(cpu_id, (unsigned long)arch_reset_self, cpu_data))
262                 printk("ERROR: unable to reset CPU%d (was running)\n", cpu_id);
263 }
264
265 void arch_suspend_cpu(unsigned int cpu_id)
266 {
267         struct sgi sgi;
268
269         if (psci_cpu_stopped(cpu_id))
270                 return;
271
272         sgi.routing_mode = 0;
273         sgi.aff1 = 0;
274         sgi.aff2 = 0;
275         sgi.aff3 = 0;
276         sgi.targets = 1 << cpu_id;
277         sgi.id = SGI_CPU_OFF;
278
279         irqchip_send_sgi(&sgi);
280
281         psci_wait_cpu_stopped(cpu_id);
282 }
283
284 void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn)
285 {
286         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
287
288         switch (irqn) {
289         case SGI_INJECT:
290                 irqchip_inject_pending(cpu_data);
291                 break;
292         case SGI_CPU_OFF:
293                 arch_suspend_self(cpu_data);
294                 break;
295         default:
296                 printk("WARN: unknown SGI received %d\n", irqn);
297         }
298 }
299
300 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id)
301 {
302         unsigned int cpu;
303
304         for_each_cpu(cpu, cell->cpu_set) {
305                 if (per_cpu(cpu)->virt_id == virt_id)
306                         return cpu;
307         }
308
309         return -1;
310 }
311
312 /*
313  * Handle the maintenance interrupt, the rest is injected into the cell.
314  * Return true when the IRQ has been handled by the hyp.
315  */
316 bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
317 {
318         if (irqn == MAINTENANCE_IRQ) {
319                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE]++;
320
321                 irqchip_inject_pending(cpu_data);
322                 return true;
323         }
324
325         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VIRQ]++;
326
327         irqchip_set_pending(cpu_data, irqn, true);
328
329         return false;
330 }
331
332 int arch_cell_create(struct cell *cell)
333 {
334         int err;
335         unsigned int cpu;
336         unsigned int virt_id = 0;
337
338         err = arch_mmu_cell_init(cell);
339         if (err)
340                 return err;
341
342         /*
343          * Generate a virtual CPU id according to the position of each CPU in
344          * the cell set
345          */
346         for_each_cpu(cpu, cell->cpu_set) {
347                 per_cpu(cpu)->virt_id = virt_id;
348                 virt_id++;
349         }
350         cell->arch.last_virt_id = virt_id - 1;
351
352         err = irqchip_cell_init(cell);
353         if (err) {
354                 arch_mmu_cell_destroy(cell);
355                 return err;
356         }
357         irqchip_root_cell_shrink(cell);
358
359         register_smp_ops(cell);
360
361         return 0;
362 }
363
364 void arch_cell_destroy(struct cell *cell)
365 {
366         unsigned int cpu;
367         struct per_cpu *percpu;
368
369         for_each_cpu(cpu, cell->cpu_set) {
370                 percpu = per_cpu(cpu);
371                 /* Re-assign the physical IDs for the root cell */
372                 percpu->virt_id = percpu->cpu_id;
373                 arch_reset_cpu(cpu);
374         }
375
376         irqchip_cell_exit(cell);
377
378         arch_mmu_cell_destroy(cell);
379 }
380
381 /* Note: only supports synchronous flushing as triggered by config_commit! */
382 void arch_flush_cell_vcpu_caches(struct cell *cell)
383 {
384         unsigned int cpu;
385
386         for_each_cpu(cpu, cell->cpu_set)
387                 if (cpu == this_cpu_id())
388                         arch_cpu_tlb_flush(per_cpu(cpu));
389                 else
390                         per_cpu(cpu)->flush_vcpu_caches = true;
391 }
392
393 void arch_config_commit(struct cell *cell_added_removed)
394 {
395 }
396
397 void __attribute__((noreturn)) arch_panic_stop(void)
398 {
399         psci_cpu_off(this_cpu_data());
400         __builtin_unreachable();
401 }
402
403 void arch_panic_park(void)
404 {
405         /* Won't return to panic_park */
406         if (phys_processor_id() == panic_cpu)
407                 panic_in_progress = 0;
408
409         psci_cpu_off(this_cpu_data());
410         __builtin_unreachable();
411 }
412
413 /*
414  * This handler is only used for cells, not for the root. The core already
415  * issued a cpu_suspend. arch_reset_cpu will cause arch_reset_self to be
416  * called on that CPU, which will in turn call arch_shutdown_self.
417  */
418 void arch_shutdown_cpu(unsigned int cpu_id)
419 {
420         struct per_cpu *cpu_data = per_cpu(cpu_id);
421
422         cpu_data->virt_id = cpu_id;
423         cpu_data->shutdown = true;
424
425         if (psci_wait_cpu_stopped(cpu_id))
426                 printk("FATAL: unable to stop CPU%d\n", cpu_id);
427
428         arch_reset_cpu(cpu_id);
429 }
430
431 void arch_shutdown(void)
432 {
433         unsigned int cpu;
434         struct cell *cell = root_cell.next;
435
436         /* Re-route each SPI to CPU0 */
437         for (; cell != NULL; cell = cell->next)
438                 irqchip_cell_exit(cell);
439
440         /*
441          * Let the exit handler call reset_self to let the core finish its
442          * shutdown function and release its lock.
443          */
444         for_each_cpu(cpu, root_cell.cpu_set)
445                 per_cpu(cpu)->shutdown = true;
446 }