]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/include/asm/percpu.h
arm: PSCI emulation
[jailhouse.git] / hypervisor / arch / arm / include / asm / percpu.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #ifndef _JAILHOUSE_ASM_PERCPU_H
14 #define _JAILHOUSE_ASM_PERCPU_H
15
16 #include <jailhouse/types.h>
17 #include <asm/paging.h>
18
19 #define NUM_ENTRY_REGS                  13
20
21 /* Keep in sync with struct per_cpu! */
22 #define PERCPU_SIZE_SHIFT               13
23 #define PERCPU_STACK_END                PAGE_SIZE
24 #define PERCPU_LINUX_SP                 PERCPU_STACK_END
25
26 #ifndef __ASSEMBLY__
27
28 #include <asm/cell.h>
29 #include <asm/psci.h>
30 #include <asm/spinlock.h>
31 #include <asm/sysregs.h>
32
33 struct pending_irq;
34
35 struct per_cpu {
36         /* Keep these two in sync with defines above! */
37         u8 stack[PAGE_SIZE];
38         unsigned long linux_sp;
39         unsigned long linux_ret;
40         unsigned long linux_flags;
41         unsigned long linux_reg[NUM_ENTRY_REGS];
42
43         unsigned int cpu_id;
44         unsigned int virt_id;
45
46         /* Other CPUs can insert sgis into the pending array */
47         spinlock_t gic_lock;
48         struct pending_irq *pending_irqs;
49         struct pending_irq *first_pending;
50         /* Only GICv3: redistributor base */
51         void *gicr_base;
52
53         struct cell *cell;
54
55         u32 stats[JAILHOUSE_NUM_CPU_STATS];
56
57         bool initialized;
58
59         /* The mbox will be accessed with a ldrd, which requires alignment */
60         __attribute__((aligned(8))) struct psci_mbox psci_mbox;
61         struct psci_mbox guest_mbox;
62
63         bool flush_vcpu_caches;
64         int shutdown_state;
65         bool failed;
66 } __attribute__((aligned(PAGE_SIZE)));
67
68 static inline struct per_cpu *this_cpu_data(void)
69 {
70         struct per_cpu *cpu_data;
71
72         arm_read_sysreg(TPIDR_EL2, cpu_data);
73         return cpu_data;
74 }
75
76 #define DEFINE_PER_CPU_ACCESSOR(field)                                  \
77 static inline typeof(((struct per_cpu *)0)->field) this_##field(void)   \
78 {                                                                       \
79         return this_cpu_data()->field;                                  \
80 }
81
82 DEFINE_PER_CPU_ACCESSOR(cpu_id)
83 DEFINE_PER_CPU_ACCESSOR(cell)
84
85 static inline struct per_cpu *per_cpu(unsigned int cpu)
86 {
87         extern u8 __page_pool[];
88
89         return (struct per_cpu *)(__page_pool + (cpu << PERCPU_SIZE_SHIFT));
90 }
91
92 static inline struct registers *guest_regs(struct per_cpu *cpu_data)
93 {
94         /* Assumes that the trap handler is entered with an empty stack */
95         return (struct registers *)(cpu_data->stack + PERCPU_STACK_END
96                         - sizeof(struct registers));
97 }
98
99 static inline unsigned int arm_cpu_phys2virt(unsigned int cpu_id)
100 {
101         return per_cpu(cpu_id)->virt_id;
102 }
103
104 unsigned int arm_cpu_virt2phys(struct cell *cell, unsigned int virt_id);
105
106 /* Validate defines */
107 #define CHECK_ASSUMPTION(assume)        ((void)sizeof(char[1 - 2*!(assume)]))
108
109 static inline void __check_assumptions(void)
110 {
111         struct per_cpu cpu_data;
112
113         CHECK_ASSUMPTION(sizeof(struct per_cpu) == (1 << PERCPU_SIZE_SHIFT));
114         CHECK_ASSUMPTION(sizeof(cpu_data.stack) == PERCPU_STACK_END);
115         CHECK_ASSUMPTION(__builtin_offsetof(struct per_cpu, linux_sp) ==
116                          PERCPU_LINUX_SP);
117 }
118 #endif /* !__ASSEMBLY__ */
119
120 #endif /* !_JAILHOUSE_ASM_PERCPU_H */