]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/include/asm/percpu.h
Documentation: x86: Move PERCPU_SIZE_SHIFT out of documentation scope
[jailhouse.git] / hypervisor / arch / x86 / include / asm / percpu.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  */
14
15 #ifndef _JAILHOUSE_ASM_PERCPU_H
16 #define _JAILHOUSE_ASM_PERCPU_H
17
18 #include <jailhouse/types.h>
19 #include <asm/paging.h>
20 #include <asm/processor.h>
21
22 #include <jailhouse/hypercall.h>
23
24 #define NUM_ENTRY_REGS                  6
25
26 #define STACK_SIZE                      PAGE_SIZE
27
28 #ifndef __ASSEMBLY__
29
30 #include <asm/cell.h>
31 #include <asm/spinlock.h>
32 #include <asm/svm.h>
33 #include <asm/vmx.h>
34
35 /* Round up sizeof(struct per_cpu) to the next power of two. */
36 #define PERCPU_SIZE_SHIFT \
37         (BITS_PER_LONG - __builtin_clzl(sizeof(struct per_cpu) - 1))
38
39 /**
40  * @defgroup Per-CPU Per-CPU Subsystem
41  *
42  * The per-CPU subsystem provides a CPU-local state structure and accessors.
43  *
44  * @{
45  */
46
47 /** Per-CPU states. */
48 struct per_cpu {
49         union {
50                 /** Stack used while in hypervisor mode. */
51                 u8 stack[STACK_SIZE];
52                 struct {
53                         u8 __fill[STACK_SIZE - sizeof(union registers)];
54                         /** Guest registers saved on stack during VM exit. */
55                         union registers guest_regs;
56                 };
57         };
58
59         /** Linux stack pointer, used for handover to hypervisor. */
60         unsigned long linux_sp;
61
62         /** Self reference, required for this_cpu_data(). */
63         struct per_cpu *cpu_data;
64         /** Logical CPU ID (same as Linux). */
65         unsigned int cpu_id;
66         /** Physical APIC ID. */
67         u32 apic_id;
68         /** Owning cell. */
69         struct cell *cell;
70
71         /** Statistic counters. */
72         u32 stats[JAILHOUSE_NUM_CPU_STATS];
73
74         /** Linux states, used for handover to/from hypervisor. @{ */
75         struct desc_table_reg linux_gdtr;
76         struct desc_table_reg linux_idtr;
77         unsigned long linux_reg[NUM_ENTRY_REGS];
78         unsigned long linux_ip;
79         unsigned long linux_cr0;
80         unsigned long linux_cr3;
81         unsigned long linux_cr4;
82         struct segment linux_cs;
83         struct segment linux_ds;
84         struct segment linux_es;
85         struct segment linux_fs;
86         struct segment linux_gs;
87         struct segment linux_tss;
88         unsigned long linux_efer;
89         /** @} */
90
91         /** Shadow states. @{ */
92         unsigned long pat;
93         unsigned long mtrr_def_type;
94         /** @} */
95
96         /** True when CPU is initialized by hypervisor. */
97         bool initialized;
98         union {
99                 /** VMX initialization state */
100                 enum vmx_state vmx_state;
101                 /** SVM initialization state */
102                 enum {SVMOFF = 0, SVMON} svm_state;
103         };
104
105         /**
106          * Lock protecting CPU state changes done for control tasks.
107          *
108          * The lock protects the following fields (unless CPU is suspended):
109          * @li per_cpu::suspend_cpu
110          * @li per_cpu::cpu_suspended (except for spinning on it to become
111          *                             true)
112          * @li per_cpu::wait_for_sipi
113          * @li per_cpu::init_signaled
114          * @li per_cpu::sipi_vector
115          * @li per_cpu::flush_vcpu_caches
116          */
117         spinlock_t control_lock;
118
119         /** Set to true for instructing the CPU to suspend. */
120         volatile bool suspend_cpu;
121         /** True if CPU is waiting for SIPI. */
122         volatile bool wait_for_sipi;
123         /** True if CPU is suspended. */
124         volatile bool cpu_suspended;
125         /** Set to true for pending an INIT signal. */
126         bool init_signaled;
127         /** Pending SIPI vector; -1 if none is pending. */
128         int sipi_vector;
129         /** Set to true for a pending TLB flush for the paging layer that does
130          *  host physical <-> guest physical memory mappings */
131         bool flush_vcpu_caches;
132         /** Set to true for instructing the CPU to disable hypervisor mode. */
133         bool shutdown_cpu;
134         /** State of the shutdown process. Possible values:
135          * @li SHUTDOWN_NONE: no shutdown in progress
136          * @li SHUTDOWN_STARTED: shutdown in progress
137          * @li negative error code: shutdown failed
138          */
139         int shutdown_state;
140         /** True if CPU violated a cell boundary or cause some other failure in
141          * guest mode. */
142         bool failed;
143
144         /** Number of iterations to clear pending APIC IRQs. */
145         unsigned int num_clear_apic_irqs;
146
147         union {
148                 struct {
149                         /** VMXON region, required by VMX. */
150                         struct vmcs vmxon_region
151                                 __attribute__((aligned(PAGE_SIZE)));
152                         /** VMCS of this CPU, required by VMX. */
153                         struct vmcs vmcs
154                                 __attribute__((aligned(PAGE_SIZE)));
155                 };
156                 struct {
157                         /** VMCB block, required by SVM. */
158                         struct vmcb vmcb
159                                 __attribute__((aligned(PAGE_SIZE)));
160                         /** SVM Host save area; opaque to us. */
161                         u8 host_state[PAGE_SIZE]
162                                 __attribute__((aligned(PAGE_SIZE)));
163                 };
164         };
165 } __attribute__((aligned(PAGE_SIZE)));
166
167 /**
168  * Define CPU-local accessor for a per-CPU field.
169  * @param field         Field name.
170  *
171  * The accessor will have the form of a function, returning the correspondingly
172  * typed field value: @c this_field().
173  */
174 #define DEFINE_PER_CPU_ACCESSOR(field)                                      \
175 static inline typeof(((struct per_cpu *)0)->field) this_##field(void)       \
176 {                                                                           \
177         typeof(((struct per_cpu *)0)->field) tmp;                           \
178                                                                             \
179         asm volatile(                                                       \
180                 "mov %%gs:%1,%0\n\t"                                        \
181                 : "=&q" (tmp)                                               \
182                 : "m" (*(u8 *)__builtin_offsetof(struct per_cpu, field)));  \
183         return tmp;                                                         \
184 }
185
186 /**
187  * Retrieve the data structure of the current CPU.
188  *
189  * @return Pointer to per-CPU data structure.
190  */
191 static inline struct per_cpu *this_cpu_data(void);
192 DEFINE_PER_CPU_ACCESSOR(cpu_data)
193
194 /**
195  * Retrieve the ID of the current CPU.
196  *
197  * @return CPU ID.
198  */
199 static inline unsigned int this_cpu_id(void);
200 DEFINE_PER_CPU_ACCESSOR(cpu_id)
201
202 /**
203  * Retrieve the cell owning the current CPU.
204  *
205  * @return Pointer to cell.
206  */
207 static inline struct cell *this_cell(void);
208 DEFINE_PER_CPU_ACCESSOR(cell)
209
210 /**
211  * Retrieve the data structure of the specified CPU.
212  * @param cpu   ID of the target CPU.
213  *
214  * @return Pointer to per-CPU data structure.
215  */
216 static inline struct per_cpu *per_cpu(unsigned int cpu)
217 {
218         struct per_cpu *cpu_data;
219
220         asm volatile(
221                 "lea __page_pool(%%rip),%0\n\t"
222                 "add %1,%0\n\t"
223                 : "=&q" (cpu_data)
224                 : "qm" ((unsigned long)cpu << PERCPU_SIZE_SHIFT));
225         return cpu_data;
226 }
227
228 /** @} **/
229
230 #endif /* !__ASSEMBLY__ */
231
232 #endif /* !_JAILHOUSE_ASM_PERCPU_H */