]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/include/asm/processor.h
x86: Add copyright information to files with Linux roots
[jailhouse.git] / hypervisor / arch / x86 / include / asm / processor.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  *
14  * This file is based on linux/arch/x86/include/asm/special_insn.h and other
15  * kernel headers:
16  *
17  * Copyright (c) Linux kernel developers, 2013
18  */
19
20 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
21 #define _JAILHOUSE_ASM_PROCESSOR_H
22
23 #include <jailhouse/types.h>
24
25 /* leaf 0x01, ECX */
26 #define X86_FEATURE_VMX                                 (1 << 5)
27 #define X86_FEATURE_XSAVE                               (1 << 26)
28
29 /* leaf 0x80000001, ECX */
30 #define X86_FEATURE_SVM                                 (1 << 2)
31
32 /* leaf 0x80000001, EDX */
33 #define X86_FEATURE_GBPAGES                             (1 << 26)
34 #define X86_FEATURE_RDTSCP                              (1 << 27)
35
36 /* leaf 0x8000000a, EDX */
37 #define X86_FEATURE_NP                                  (1 << 0)
38 #define X86_FEATURE_FLUSH_BY_ASID                       (1 << 6)
39 #define X86_FEATURE_DECODE_ASSISTS                      (1 << 7)
40 #define X86_FEATURE_AVIC                                (1 << 13)
41
42 #define X86_RFLAGS_VM                                   (1 << 17)
43
44 #define X86_CR0_PE                                      (1UL << 0)
45 #define X86_CR0_MP                                      (1UL << 1)
46 #define X86_CR0_TS                                      (1UL << 3)
47 #define X86_CR0_ET                                      (1UL << 4)
48 #define X86_CR0_NE                                      (1UL << 5)
49 #define X86_CR0_WP                                      (1UL << 16)
50 #define X86_CR0_NW                                      (1UL << 29)
51 #define X86_CR0_CD                                      (1UL << 30)
52 #define X86_CR0_PG                                      (1UL << 31)
53 #define X86_CR0_RESERVED                                \
54         (BIT_MASK(28, 19) |  (1UL << 17) | BIT_MASK(15, 6))
55
56 #define X86_CR4_PAE                                     (1UL << 5)
57 #define X86_CR4_VMXE                                    (1UL << 13)
58 #define X86_CR4_OSXSAVE                                 (1UL << 18)
59 #define X86_CR4_RESERVED                                \
60         (BIT_MASK(31, 22) | (1UL << 19) | (1UL << 15) | BIT_MASK(12, 11))
61
62 #define X86_XCR0_FP                                     0x00000001
63
64 #define MSR_IA32_APICBASE                               0x0000001b
65 #define MSR_IA32_FEATURE_CONTROL                        0x0000003a
66 #define MSR_IA32_PAT                                    0x00000277
67 #define MSR_IA32_MTRR_DEF_TYPE                          0x000002ff
68 #define MSR_IA32_SYSENTER_CS                            0x00000174
69 #define MSR_IA32_SYSENTER_ESP                           0x00000175
70 #define MSR_IA32_SYSENTER_EIP                           0x00000176
71 #define MSR_IA32_PERF_GLOBAL_CTRL                       0x0000038f
72 #define MSR_IA32_VMX_BASIC                              0x00000480
73 #define MSR_IA32_VMX_PINBASED_CTLS                      0x00000481
74 #define MSR_IA32_VMX_PROCBASED_CTLS                     0x00000482
75 #define MSR_IA32_VMX_EXIT_CTLS                          0x00000483
76 #define MSR_IA32_VMX_ENTRY_CTLS                         0x00000484
77 #define MSR_IA32_VMX_MISC                               0x00000485
78 #define MSR_IA32_VMX_CR0_FIXED0                         0x00000486
79 #define MSR_IA32_VMX_CR0_FIXED1                         0x00000487
80 #define MSR_IA32_VMX_CR4_FIXED0                         0x00000488
81 #define MSR_IA32_VMX_CR4_FIXED1                         0x00000489
82 #define MSR_IA32_VMX_PROCBASED_CTLS2                    0x0000048b
83 #define MSR_IA32_VMX_EPT_VPID_CAP                       0x0000048c
84 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS                0x0000048e
85 #define MSR_X2APIC_BASE                                 0x00000800
86 #define MSR_X2APIC_ICR                                  0x00000830
87 #define MSR_X2APIC_END                                  0x0000083f
88 #define MSR_EFER                                        0xc0000080
89 #define MSR_STAR                                        0xc0000081
90 #define MSR_LSTAR                                       0xc0000082
91 #define MSR_CSTAR                                       0xc0000083
92 #define MSR_SFMASK                                      0xc0000084
93 #define MSR_FS_BASE                                     0xc0000100
94 #define MSR_GS_BASE                                     0xc0000101
95 #define MSR_KERNGS_BASE                                 0xc0000102
96
97 #define FEATURE_CONTROL_LOCKED                          (1 << 0)
98 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX       (1 << 2)
99
100 #define PAT_RESET_VALUE                                 0x0007040600070406UL
101
102 #define MTRR_ENABLE                                     (1UL << 11)
103
104 #define EFER_LME                                        0x00000100
105 #define EFER_LMA                                        0x00000400
106 #define EFER_NXE                                        0x00000800
107
108 #define GDT_DESC_NULL                                   0
109 #define GDT_DESC_CODE                                   1
110 #define GDT_DESC_TSS                                    2
111 #define GDT_DESC_TSS_HI                                 3
112 #define NUM_GDT_DESC                                    4
113
114 #define X86_INST_LEN_CPUID                              2
115 #define X86_INST_LEN_RDMSR                              2
116 #define X86_INST_LEN_WRMSR                              2
117 /* This covers both VMCALL and VMMCALL */
118 #define X86_INST_LEN_HYPERCALL                          3
119 #define X86_INST_LEN_MOV_TO_CR                          3
120 #define X86_INST_LEN_XSETBV                             3
121
122 #define X86_REX_CODE                                    4
123
124 #define X86_OP_MOV_TO_MEM                               0x89
125 #define X86_OP_MOV_FROM_MEM                             0x8b
126
127 #define NMI_VECTOR                                      2
128 #define PF_VECTOR                                       14
129
130 #define DESC_TSS_BUSY                                   (1UL << (9 + 32))
131 #define DESC_PRESENT                                    (1UL << (15 + 32))
132 #define DESC_CODE_DATA                                  (1UL << (12 + 32))
133 #define DESC_PAGE_GRAN                                  (1UL << (23 + 32))
134
135 #ifndef __ASSEMBLY__
136
137 /**
138  * @ingroup X86
139  * @defgroup Processor Processor
140  *
141  * Low-level support for x86 processor configuration and status retrieval.
142  *
143  * @{
144  */
145
146 union registers {
147         struct {
148                 unsigned long r15;
149                 unsigned long r14;
150                 unsigned long r13;
151                 unsigned long r12;
152                 unsigned long r11;
153                 unsigned long r10;
154                 unsigned long r9;
155                 unsigned long r8;
156                 unsigned long rdi;
157                 unsigned long rsi;
158                 unsigned long rbp;
159                 unsigned long unused;
160                 unsigned long rbx;
161                 unsigned long rdx;
162                 unsigned long rcx;
163                 unsigned long rax;
164         };
165         unsigned long by_index[16];
166 };
167
168 struct desc_table_reg {
169         u16 limit;
170         u64 base;
171 } __attribute__((packed));
172
173 struct segment {
174         u64 base;
175         u32 limit;
176         u32 access_rights;
177         u16 selector;
178 };
179
180 static unsigned long __force_order;
181
182 static inline void cpu_relax(void)
183 {
184         asm volatile("rep; nop" : : : "memory");
185 }
186
187 static inline void memory_barrier(void)
188 {
189         asm volatile("mfence" : : : "memory");
190 }
191
192 static inline void memory_load_barrier(void)
193 {
194         asm volatile("lfence" : : : "memory");
195 }
196
197 static inline void cpuid(unsigned int *eax, unsigned int *ebx,
198                          unsigned int *ecx, unsigned int *edx)
199 {
200         /* ecx is often an input as well as an output. */
201         asm volatile("cpuid"
202             : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
203             : "0" (*eax), "2" (*ecx)
204             : "memory");
205 }
206
207 #define CPUID_REG(reg)                                          \
208 static inline unsigned int cpuid_##reg(unsigned int op)         \
209 {                                                               \
210         unsigned int eax, ebx, ecx, edx;                        \
211                                                                 \
212         eax = op;                                               \
213         ecx = 0;                                                \
214         cpuid(&eax, &ebx, &ecx, &edx);                          \
215         return reg;                                             \
216 }
217
218 CPUID_REG(eax)
219 CPUID_REG(ebx)
220 CPUID_REG(ecx)
221 CPUID_REG(edx)
222
223 static inline unsigned long read_cr0(void)
224 {
225         unsigned long cr0;
226
227         asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
228         return cr0;
229 }
230
231 static inline void write_cr0(unsigned long val)
232 {
233         asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
234 }
235
236 static inline unsigned long read_cr2(void)
237 {
238         unsigned long cr2;
239
240         asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
241         return cr2;
242 }
243
244 static inline unsigned long read_cr3(void)
245 {
246         unsigned long cr3;
247
248         asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
249         return cr3;
250 }
251
252 static inline void write_cr3(unsigned long val)
253 {
254         asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
255 }
256
257 static inline unsigned long read_cr4(void)
258 {
259         unsigned long cr4;
260
261         asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
262         return cr4;
263 }
264
265 static inline void write_cr4(unsigned long val)
266 {
267         asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
268 }
269
270 static inline unsigned long read_msr(unsigned int msr)
271 {
272         u32 low, high;
273
274         asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
275         return low | ((unsigned long)high << 32);
276 }
277
278 static inline void write_msr(unsigned int msr, unsigned long val)
279 {
280         asm volatile("wrmsr"
281                 : /* no output */
282                 : "c" (msr), "a" (val), "d" (val >> 32)
283                 : "memory");
284 }
285
286 static inline void set_rdmsr_value(union registers *regs, unsigned long val)
287 {
288         regs->rax = (u32)val;
289         regs->rdx = val >> 32;
290 }
291
292 static inline unsigned long get_wrmsr_value(union registers *regs)
293 {
294         return (u32)regs->rax | (regs->rdx << 32);
295 }
296
297 static inline void read_gdtr(struct desc_table_reg *val)
298 {
299         asm volatile("sgdtq %0" : "=m" (*val));
300 }
301
302 static inline void write_gdtr(struct desc_table_reg *val)
303 {
304         asm volatile("lgdtq %0" : : "m" (*val));
305 }
306
307 static inline void read_idtr(struct desc_table_reg *val)
308 {
309         asm volatile("sidtq %0" : "=m" (*val));
310 }
311
312 static inline void write_idtr(struct desc_table_reg *val)
313 {
314         asm volatile("lidtq %0" : : "m" (*val));
315 }
316
317 /**
318  * Enable or disable interrupts delivery to the local CPU when in host mode.
319  *
320  * In some cases (AMD) changing IF isn't enough, so these are implemented on
321  * per-vendor basis.
322  * @{
323  */
324 void enable_irq(void);
325
326 void disable_irq(void);
327 /** @} */
328
329 /** @} */
330 #endif /* !__ASSEMBLY__ */
331
332 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */