]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/control.c
arm: Fold irqchip_root_cell_shrink into irqchip_cell_init
[jailhouse.git] / hypervisor / arch / arm / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
16 #include <jailhouse/string.h>
17 #include <asm/control.h>
18 #include <asm/irqchip.h>
19 #include <asm/platform.h>
20 #include <asm/processor.h>
21 #include <asm/sysregs.h>
22 #include <asm/traps.h>
23
24 static void arch_reset_el1(struct registers *regs)
25 {
26         u32 sctlr;
27
28         /* Wipe all banked and usr regs */
29         memset(regs, 0, sizeof(struct registers));
30
31         arm_write_banked_reg(SP_usr, 0);
32         arm_write_banked_reg(SP_svc, 0);
33         arm_write_banked_reg(SP_abt, 0);
34         arm_write_banked_reg(SP_und, 0);
35         arm_write_banked_reg(SP_svc, 0);
36         arm_write_banked_reg(SP_irq, 0);
37         arm_write_banked_reg(SP_fiq, 0);
38         arm_write_banked_reg(LR_svc, 0);
39         arm_write_banked_reg(LR_abt, 0);
40         arm_write_banked_reg(LR_und, 0);
41         arm_write_banked_reg(LR_svc, 0);
42         arm_write_banked_reg(LR_irq, 0);
43         arm_write_banked_reg(LR_fiq, 0);
44         arm_write_banked_reg(R8_fiq, 0);
45         arm_write_banked_reg(R9_fiq, 0);
46         arm_write_banked_reg(R10_fiq, 0);
47         arm_write_banked_reg(R11_fiq, 0);
48         arm_write_banked_reg(R12_fiq, 0);
49         arm_write_banked_reg(SPSR_svc, 0);
50         arm_write_banked_reg(SPSR_abt, 0);
51         arm_write_banked_reg(SPSR_und, 0);
52         arm_write_banked_reg(SPSR_svc, 0);
53         arm_write_banked_reg(SPSR_irq, 0);
54         arm_write_banked_reg(SPSR_fiq, 0);
55
56         /* Wipe the system registers */
57         arm_read_sysreg(SCTLR_EL1, sctlr);
58         sctlr = sctlr & ~SCTLR_MASK;
59         arm_write_sysreg(SCTLR_EL1, sctlr);
60         arm_write_sysreg(CPACR_EL1, 0);
61         arm_write_sysreg(CONTEXTIDR_EL1, 0);
62         arm_write_sysreg(PAR_EL1, 0);
63         arm_write_sysreg(TTBR0_EL1, 0);
64         arm_write_sysreg(TTBR1_EL1, 0);
65         arm_write_sysreg(CSSELR_EL1, 0);
66
67         arm_write_sysreg(CNTKCTL_EL1, 0);
68         arm_write_sysreg(CNTP_CTL_EL0, 0);
69         arm_write_sysreg(CNTP_CVAL_EL0, 0);
70         arm_write_sysreg(CNTV_CTL_EL0, 0);
71         arm_write_sysreg(CNTV_CVAL_EL0, 0);
72
73         /* AArch32 specific */
74         arm_write_sysreg(TTBCR, 0);
75         arm_write_sysreg(DACR, 0);
76         arm_write_sysreg(VBAR, 0);
77         arm_write_sysreg(DFSR, 0);
78         arm_write_sysreg(DFAR, 0);
79         arm_write_sysreg(IFSR, 0);
80         arm_write_sysreg(IFAR, 0);
81         arm_write_sysreg(ADFSR, 0);
82         arm_write_sysreg(AIFSR, 0);
83         arm_write_sysreg(MAIR0, 0);
84         arm_write_sysreg(MAIR1, 0);
85         arm_write_sysreg(AMAIR0, 0);
86         arm_write_sysreg(AMAIR1, 0);
87         arm_write_sysreg(TPIDRURW, 0);
88         arm_write_sysreg(TPIDRURO, 0);
89         arm_write_sysreg(TPIDRPRW, 0);
90 }
91
92 void arch_reset_self(struct per_cpu *cpu_data)
93 {
94         int err = 0;
95         unsigned long reset_address;
96         struct cell *cell = cpu_data->cell;
97         struct registers *regs = guest_regs(cpu_data);
98         bool is_shutdown = cpu_data->shutdown;
99
100         if (!is_shutdown)
101                 err = arch_mmu_cpu_cell_init(cpu_data);
102         if (err)
103                 printk("MMU setup failed\n");
104         /*
105          * On the first CPU to reach this, write all cell datas to memory so it
106          * can be started with caches disabled.
107          * On all CPUs, invalidate the instruction caches to take into account
108          * the potential new instructions.
109          */
110         arch_cell_caches_flush(cell);
111
112         /*
113          * We come from the IRQ handler, but we won't return there, so the IPI
114          * is deactivated here.
115          */
116         irqchip_eoi_irq(SGI_CPU_OFF, true);
117
118         if (is_shutdown) {
119 #ifndef CONFIG_MACH_VEXPRESS
120                 if (cell != &root_cell) {
121                         irqchip_cpu_shutdown(cpu_data);
122
123                         smc(PSCI_CPU_OFF, 0, 0, 0);
124                         smc(PSCI_CPU_OFF_V0_1_UBOOT, 0, 0, 0);
125                         panic_printk("FATAL: PSCI_CPU_OFF failed\n");
126                         panic_stop();
127                 }
128 #endif
129                 /* arch_shutdown_self resets the GIC on all remaining CPUs. */
130         } else {
131                 err = irqchip_cpu_reset(cpu_data);
132                 if (err)
133                         printk("IRQ setup failed\n");
134         }
135
136         /* Wait for the driver to call cpu_up */
137         if (cell == &root_cell || is_shutdown)
138                 reset_address = arch_smp_spin(cpu_data, root_cell.arch.smp);
139         else
140                 reset_address = arch_smp_spin(cpu_data, cell->arch.smp);
141
142         /* Set the new MPIDR */
143         arm_write_sysreg(VMPIDR_EL2, cpu_data->virt_id | MPIDR_MP_BIT);
144
145         /* Restore an empty context */
146         arch_reset_el1(regs);
147
148         arm_write_banked_reg(ELR_hyp, reset_address);
149         arm_write_banked_reg(SPSR_hyp, RESET_PSR);
150
151         if (is_shutdown)
152                 /* Won't return here. */
153                 arch_shutdown_self(cpu_data);
154
155         vmreturn(regs);
156 }
157
158 static void arch_suspend_self(struct per_cpu *cpu_data)
159 {
160         psci_suspend(cpu_data);
161
162         if (cpu_data->flush_vcpu_caches)
163                 arch_cpu_tlb_flush(cpu_data);
164 }
165
166 static void arch_dump_exit(struct registers *regs, const char *reason)
167 {
168         unsigned long pc;
169         unsigned int n;
170
171         arm_read_banked_reg(ELR_hyp, pc);
172         panic_printk("Unhandled HYP %s exit at 0x%x\n", reason, pc);
173         for (n = 0; n < NUM_USR_REGS; n++)
174                 panic_printk("r%d:%s 0x%08lx%s", n, n < 10 ? " " : "",
175                              regs->usr[n], n % 4 == 3 ? "\n" : "  ");
176         panic_printk("\n");
177 }
178
179 static void arch_dump_abt(bool is_data)
180 {
181         u32 hxfar;
182         u32 esr;
183
184         arm_read_sysreg(ESR_EL2, esr);
185         if (is_data)
186                 arm_read_sysreg(HDFAR, hxfar);
187         else
188                 arm_read_sysreg(HIFAR, hxfar);
189
190         panic_printk("Physical address: 0x%08lx ESR: 0x%08x\n", hxfar, esr);
191 }
192
193 struct registers* arch_handle_exit(struct per_cpu *cpu_data,
194                                    struct registers *regs)
195 {
196         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_TOTAL]++;
197
198         switch (regs->exit_reason) {
199         case EXIT_REASON_IRQ:
200                 irqchip_handle_irq(cpu_data);
201                 break;
202         case EXIT_REASON_TRAP:
203                 arch_handle_trap(cpu_data, regs);
204                 break;
205
206         case EXIT_REASON_UNDEF:
207                 arch_dump_exit(regs, "undef");
208                 panic_stop();
209         case EXIT_REASON_DABT:
210                 arch_dump_exit(regs, "data abort");
211                 arch_dump_abt(true);
212                 panic_stop();
213         case EXIT_REASON_PABT:
214                 arch_dump_exit(regs, "prefetch abort");
215                 arch_dump_abt(false);
216                 panic_stop();
217         case EXIT_REASON_HVC:
218                 arch_dump_exit(regs, "hvc");
219                 panic_stop();
220         case EXIT_REASON_FIQ:
221                 arch_dump_exit(regs, "fiq");
222                 panic_stop();
223         default:
224                 arch_dump_exit(regs, "unknown");
225                 panic_stop();
226         }
227
228         if (cpu_data->shutdown)
229                 /* Won't return here. */
230                 arch_shutdown_self(cpu_data);
231
232         return regs;
233 }
234
235 /* CPU must be stopped */
236 void arch_resume_cpu(unsigned int cpu_id)
237 {
238         /*
239          * Simply get out of the spin loop by returning to handle_sgi
240          * If the CPU is being reset, it already has left the PSCI idle loop.
241          */
242         if (psci_cpu_stopped(cpu_id))
243                 psci_resume(cpu_id);
244 }
245
246 /* CPU must be stopped */
247 void arch_park_cpu(unsigned int cpu_id)
248 {
249         struct per_cpu *cpu_data = per_cpu(cpu_id);
250
251         /*
252          * Reset always follows park_cpu, so we just need to make sure that the
253          * CPU is suspended
254          */
255         if (psci_wait_cpu_stopped(cpu_id) != 0)
256                 printk("ERROR: CPU%d is supposed to be stopped\n", cpu_id);
257         else
258                 cpu_data->cell->arch.needs_flush = true;
259 }
260
261 /* CPU must be stopped */
262 void arch_reset_cpu(unsigned int cpu_id)
263 {
264         unsigned long cpu_data = (unsigned long)per_cpu(cpu_id);
265
266         if (psci_cpu_on(cpu_id, (unsigned long)arch_reset_self, cpu_data))
267                 printk("ERROR: unable to reset CPU%d (was running)\n", cpu_id);
268 }
269
270 void arch_suspend_cpu(unsigned int cpu_id)
271 {
272         struct sgi sgi;
273
274         if (psci_cpu_stopped(cpu_id))
275                 return;
276
277         sgi.routing_mode = 0;
278         sgi.aff1 = 0;
279         sgi.aff2 = 0;
280         sgi.aff3 = 0;
281         sgi.targets = 1 << cpu_id;
282         sgi.id = SGI_CPU_OFF;
283
284         irqchip_send_sgi(&sgi);
285
286         psci_wait_cpu_stopped(cpu_id);
287 }
288
289 void arch_handle_sgi(struct per_cpu *cpu_data, u32 irqn)
290 {
291         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MANAGEMENT]++;
292
293         switch (irqn) {
294         case SGI_INJECT:
295                 irqchip_inject_pending(cpu_data);
296                 break;
297         case SGI_CPU_OFF:
298                 arch_suspend_self(cpu_data);
299                 break;
300         default:
301                 printk("WARN: unknown SGI received %d\n", irqn);
302         }
303 }
304
305 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id)
306 {
307         unsigned int cpu;
308
309         for_each_cpu(cpu, cell->cpu_set) {
310                 if (per_cpu(cpu)->virt_id == virt_id)
311                         return cpu;
312         }
313
314         return -1;
315 }
316
317 /*
318  * Handle the maintenance interrupt, the rest is injected into the cell.
319  * Return true when the IRQ has been handled by the hyp.
320  */
321 bool arch_handle_phys_irq(struct per_cpu *cpu_data, u32 irqn)
322 {
323         if (irqn == MAINTENANCE_IRQ) {
324                 cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_MAINTENANCE]++;
325
326                 irqchip_inject_pending(cpu_data);
327                 return true;
328         }
329
330         cpu_data->stats[JAILHOUSE_CPU_STAT_VMEXITS_VIRQ]++;
331
332         irqchip_set_pending(cpu_data, irqn);
333
334         return false;
335 }
336
337 int arch_cell_create(struct cell *cell)
338 {
339         int err;
340         unsigned int cpu;
341         unsigned int virt_id = 0;
342
343         err = arch_mmu_cell_init(cell);
344         if (err)
345                 return err;
346
347         /*
348          * Generate a virtual CPU id according to the position of each CPU in
349          * the cell set
350          */
351         for_each_cpu(cpu, cell->cpu_set) {
352                 per_cpu(cpu)->virt_id = virt_id;
353                 virt_id++;
354         }
355         cell->arch.last_virt_id = virt_id - 1;
356
357         err = irqchip_cell_init(cell);
358         if (err) {
359                 arch_mmu_cell_destroy(cell);
360                 return err;
361         }
362
363         register_smp_ops(cell);
364
365         return 0;
366 }
367
368 void arch_cell_destroy(struct cell *cell)
369 {
370         unsigned int cpu;
371         struct per_cpu *percpu;
372
373         for_each_cpu(cpu, cell->cpu_set) {
374                 percpu = per_cpu(cpu);
375                 /* Re-assign the physical IDs for the root cell */
376                 percpu->virt_id = percpu->cpu_id;
377                 arch_reset_cpu(cpu);
378         }
379
380         irqchip_cell_exit(cell);
381
382         arch_mmu_cell_destroy(cell);
383 }
384
385 /* Note: only supports synchronous flushing as triggered by config_commit! */
386 void arch_flush_cell_vcpu_caches(struct cell *cell)
387 {
388         unsigned int cpu;
389
390         for_each_cpu(cpu, cell->cpu_set)
391                 if (cpu == this_cpu_id())
392                         arch_cpu_tlb_flush(per_cpu(cpu));
393                 else
394                         per_cpu(cpu)->flush_vcpu_caches = true;
395 }
396
397 void arch_config_commit(struct cell *cell_added_removed)
398 {
399 }
400
401 void __attribute__((noreturn)) arch_panic_stop(void)
402 {
403         psci_cpu_off(this_cpu_data());
404         __builtin_unreachable();
405 }
406
407 void arch_panic_park(void)
408 {
409         /* Won't return to panic_park */
410         if (phys_processor_id() == panic_cpu)
411                 panic_in_progress = 0;
412
413         psci_cpu_off(this_cpu_data());
414         __builtin_unreachable();
415 }
416
417 /*
418  * This handler is only used for cells, not for the root. The core already
419  * issued a cpu_suspend. arch_reset_cpu will cause arch_reset_self to be
420  * called on that CPU, which will in turn call arch_shutdown_self.
421  */
422 void arch_shutdown_cpu(unsigned int cpu_id)
423 {
424         struct per_cpu *cpu_data = per_cpu(cpu_id);
425
426         cpu_data->virt_id = cpu_id;
427         cpu_data->shutdown = true;
428
429         if (psci_wait_cpu_stopped(cpu_id))
430                 printk("FATAL: unable to stop CPU%d\n", cpu_id);
431
432         arch_reset_cpu(cpu_id);
433 }
434
435 void arch_shutdown(void)
436 {
437         unsigned int cpu;
438         struct cell *cell = root_cell.next;
439
440         /* Re-route each SPI to CPU0 */
441         for (; cell != NULL; cell = cell->next)
442                 irqchip_cell_exit(cell);
443
444         /*
445          * Let the exit handler call reset_self to let the core finish its
446          * shutdown function and release its lock.
447          */
448         for_each_cpu(cpu, root_cell.cpu_set)
449                 per_cpu(cpu)->shutdown = true;
450 }