]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/include/asm/processor.h
x86: Introduce Cache Allocation Technology support for Intel CPUs
[jailhouse.git] / hypervisor / arch / x86 / include / asm / processor.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  *
14  * This file is based on linux/arch/x86/include/asm/special_insn.h and other
15  * kernel headers:
16  *
17  * Copyright (c) Linux kernel developers, 2013
18  */
19
20 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
21 #define _JAILHOUSE_ASM_PROCESSOR_H
22
23 #include <jailhouse/types.h>
24
25 /* leaf 0x01, ECX */
26 #define X86_FEATURE_VMX                                 (1 << 5)
27 #define X86_FEATURE_XSAVE                               (1 << 26)
28 #define X86_FEATURE_HYPERVISOR                          (1 << 31)
29
30 /* leaf 0x07, subleaf 0, EBX */
31 #define X86_FEATURE_CAT                                 (1 << 15)
32
33 /* leaf 0x80000001, ECX */
34 #define X86_FEATURE_SVM                                 (1 << 2)
35
36 /* leaf 0x80000001, EDX */
37 #define X86_FEATURE_GBPAGES                             (1 << 26)
38 #define X86_FEATURE_RDTSCP                              (1 << 27)
39
40 /* leaf 0x8000000a, EDX */
41 #define X86_FEATURE_NP                                  (1 << 0)
42 #define X86_FEATURE_FLUSH_BY_ASID                       (1 << 6)
43 #define X86_FEATURE_DECODE_ASSISTS                      (1 << 7)
44 #define X86_FEATURE_AVIC                                (1 << 13)
45
46 #define X86_RFLAGS_VM                                   (1 << 17)
47
48 #define X86_CR0_PE                                      (1UL << 0)
49 #define X86_CR0_MP                                      (1UL << 1)
50 #define X86_CR0_TS                                      (1UL << 3)
51 #define X86_CR0_ET                                      (1UL << 4)
52 #define X86_CR0_NE                                      (1UL << 5)
53 #define X86_CR0_WP                                      (1UL << 16)
54 #define X86_CR0_NW                                      (1UL << 29)
55 #define X86_CR0_CD                                      (1UL << 30)
56 #define X86_CR0_PG                                      (1UL << 31)
57 #define X86_CR0_RESERVED                                \
58         (BIT_MASK(28, 19) |  (1UL << 17) | BIT_MASK(15, 6))
59
60 #define X86_CR4_PAE                                     (1UL << 5)
61 #define X86_CR4_VMXE                                    (1UL << 13)
62 #define X86_CR4_OSXSAVE                                 (1UL << 18)
63 #define X86_CR4_RESERVED                                \
64         (BIT_MASK(31, 22) | (1UL << 19) | (1UL << 15) | BIT_MASK(12, 11))
65
66 #define X86_XCR0_FP                                     0x00000001
67
68 #define MSR_IA32_APICBASE                               0x0000001b
69 #define MSR_IA32_FEATURE_CONTROL                        0x0000003a
70 #define MSR_IA32_PAT                                    0x00000277
71 #define MSR_IA32_MTRR_DEF_TYPE                          0x000002ff
72 #define MSR_IA32_SYSENTER_CS                            0x00000174
73 #define MSR_IA32_SYSENTER_ESP                           0x00000175
74 #define MSR_IA32_SYSENTER_EIP                           0x00000176
75 #define MSR_IA32_PERF_GLOBAL_CTRL                       0x0000038f
76 #define MSR_IA32_VMX_BASIC                              0x00000480
77 #define MSR_IA32_VMX_PINBASED_CTLS                      0x00000481
78 #define MSR_IA32_VMX_PROCBASED_CTLS                     0x00000482
79 #define MSR_IA32_VMX_EXIT_CTLS                          0x00000483
80 #define MSR_IA32_VMX_ENTRY_CTLS                         0x00000484
81 #define MSR_IA32_VMX_MISC                               0x00000485
82 #define MSR_IA32_VMX_CR0_FIXED0                         0x00000486
83 #define MSR_IA32_VMX_CR0_FIXED1                         0x00000487
84 #define MSR_IA32_VMX_CR4_FIXED0                         0x00000488
85 #define MSR_IA32_VMX_CR4_FIXED1                         0x00000489
86 #define MSR_IA32_VMX_PROCBASED_CTLS2                    0x0000048b
87 #define MSR_IA32_VMX_EPT_VPID_CAP                       0x0000048c
88 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS                0x0000048e
89 #define MSR_X2APIC_BASE                                 0x00000800
90 #define MSR_X2APIC_ICR                                  0x00000830
91 #define MSR_X2APIC_END                                  0x0000083f
92 #define MSR_IA32_PQR_ASSOC                              0x00000c8f
93 #define MSR_IA32_L3_MASK_0                              0x00000c90
94 #define MSR_EFER                                        0xc0000080
95 #define MSR_STAR                                        0xc0000081
96 #define MSR_LSTAR                                       0xc0000082
97 #define MSR_CSTAR                                       0xc0000083
98 #define MSR_SFMASK                                      0xc0000084
99 #define MSR_FS_BASE                                     0xc0000100
100 #define MSR_GS_BASE                                     0xc0000101
101 #define MSR_KERNGS_BASE                                 0xc0000102
102
103 #define FEATURE_CONTROL_LOCKED                          (1 << 0)
104 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX       (1 << 2)
105
106 #define PAT_RESET_VALUE                                 0x0007040600070406UL
107
108 #define MTRR_ENABLE                                     (1UL << 11)
109
110 #define EFER_LME                                        0x00000100
111 #define EFER_LMA                                        0x00000400
112 #define EFER_NXE                                        0x00000800
113
114 #define PQR_ASSOC_COS_SHIFT                             32
115
116 #define CAT_RESID_L3                                    1
117
118 #define CAT_CBM_LEN_MASK                                BIT_MASK(4, 0)
119 #define CAT_COS_MAX_MASK                                BIT_MASK(15, 0)
120
121 #define GDT_DESC_NULL                                   0
122 #define GDT_DESC_CODE                                   1
123 #define GDT_DESC_TSS                                    2
124 #define GDT_DESC_TSS_HI                                 3
125 #define NUM_GDT_DESC                                    4
126
127 #define X86_INST_LEN_CPUID                              2
128 #define X86_INST_LEN_RDMSR                              2
129 #define X86_INST_LEN_WRMSR                              2
130 /* This covers both VMCALL and VMMCALL */
131 #define X86_INST_LEN_HYPERCALL                          3
132 #define X86_INST_LEN_MOV_TO_CR                          3
133 #define X86_INST_LEN_XSETBV                             3
134
135 #define X86_REX_CODE                                    4
136
137 #define X86_OP_MOV_TO_MEM                               0x89
138 #define X86_OP_MOV_FROM_MEM                             0x8b
139
140 #define DB_VECTOR                                       1
141 #define NMI_VECTOR                                      2
142 #define PF_VECTOR                                       14
143 #define AC_VECTOR                                       17
144
145 #define DESC_TSS_BUSY                                   (1UL << (9 + 32))
146 #define DESC_PRESENT                                    (1UL << (15 + 32))
147 #define DESC_CODE_DATA                                  (1UL << (12 + 32))
148 #define DESC_PAGE_GRAN                                  (1UL << (23 + 32))
149
150 #ifndef __ASSEMBLY__
151
152 /**
153  * @ingroup X86
154  * @defgroup Processor Processor
155  *
156  * Low-level support for x86 processor configuration and status retrieval.
157  *
158  * @{
159  */
160
161 union registers {
162         struct {
163                 unsigned long r15;
164                 unsigned long r14;
165                 unsigned long r13;
166                 unsigned long r12;
167                 unsigned long r11;
168                 unsigned long r10;
169                 unsigned long r9;
170                 unsigned long r8;
171                 unsigned long rdi;
172                 unsigned long rsi;
173                 unsigned long rbp;
174                 unsigned long unused;
175                 unsigned long rbx;
176                 unsigned long rdx;
177                 unsigned long rcx;
178                 unsigned long rax;
179         };
180         unsigned long by_index[16];
181 };
182
183 struct desc_table_reg {
184         u16 limit;
185         u64 base;
186 } __attribute__((packed));
187
188 struct segment {
189         u64 base;
190         u32 limit;
191         u32 access_rights;
192         u16 selector;
193 };
194
195 static unsigned long __force_order;
196
197 static inline void cpu_relax(void)
198 {
199         asm volatile("rep; nop" : : : "memory");
200 }
201
202 static inline void memory_barrier(void)
203 {
204         asm volatile("mfence" : : : "memory");
205 }
206
207 static inline void memory_load_barrier(void)
208 {
209         asm volatile("lfence" : : : "memory");
210 }
211
212 static inline void cpuid(unsigned int *eax, unsigned int *ebx,
213                          unsigned int *ecx, unsigned int *edx)
214 {
215         /* ecx is often an input as well as an output. */
216         asm volatile("cpuid"
217             : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
218             : "0" (*eax), "2" (*ecx)
219             : "memory");
220 }
221
222 #define CPUID_REG(reg)                                                  \
223 static inline unsigned int cpuid_##reg(unsigned int op, unsigned int sub) \
224 {                                                                       \
225         unsigned int eax, ebx, ecx, edx;                                \
226                                                                         \
227         eax = op;                                                       \
228         ecx = sub;                                                      \
229         cpuid(&eax, &ebx, &ecx, &edx);                                  \
230         return reg;                                                     \
231 }
232
233 CPUID_REG(eax)
234 CPUID_REG(ebx)
235 CPUID_REG(ecx)
236 CPUID_REG(edx)
237
238 static inline unsigned long read_cr0(void)
239 {
240         unsigned long cr0;
241
242         asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
243         return cr0;
244 }
245
246 static inline void write_cr0(unsigned long val)
247 {
248         asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
249 }
250
251 static inline unsigned long read_cr2(void)
252 {
253         unsigned long cr2;
254
255         asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
256         return cr2;
257 }
258
259 static inline unsigned long read_cr3(void)
260 {
261         unsigned long cr3;
262
263         asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
264         return cr3;
265 }
266
267 static inline void write_cr3(unsigned long val)
268 {
269         asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
270 }
271
272 static inline unsigned long read_cr4(void)
273 {
274         unsigned long cr4;
275
276         asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
277         return cr4;
278 }
279
280 static inline void write_cr4(unsigned long val)
281 {
282         asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
283 }
284
285 static inline unsigned long read_msr(unsigned int msr)
286 {
287         u32 low, high;
288
289         asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
290         return low | ((unsigned long)high << 32);
291 }
292
293 static inline void write_msr(unsigned int msr, unsigned long val)
294 {
295         asm volatile("wrmsr"
296                 : /* no output */
297                 : "c" (msr), "a" (val), "d" (val >> 32)
298                 : "memory");
299 }
300
301 static inline void set_rdmsr_value(union registers *regs, unsigned long val)
302 {
303         regs->rax = (u32)val;
304         regs->rdx = val >> 32;
305 }
306
307 static inline unsigned long get_wrmsr_value(union registers *regs)
308 {
309         return (u32)regs->rax | (regs->rdx << 32);
310 }
311
312 static inline void read_gdtr(struct desc_table_reg *val)
313 {
314         asm volatile("sgdtq %0" : "=m" (*val));
315 }
316
317 static inline void write_gdtr(struct desc_table_reg *val)
318 {
319         asm volatile("lgdtq %0" : : "m" (*val));
320 }
321
322 static inline void read_idtr(struct desc_table_reg *val)
323 {
324         asm volatile("sidtq %0" : "=m" (*val));
325 }
326
327 static inline void write_idtr(struct desc_table_reg *val)
328 {
329         asm volatile("lidtq %0" : : "m" (*val));
330 }
331
332 /**
333  * Enable or disable interrupts delivery to the local CPU when in host mode.
334  *
335  * In some cases (AMD) changing IF isn't enough, so these are implemented on
336  * per-vendor basis.
337  * @{
338  */
339 void enable_irq(void);
340
341 void disable_irq(void);
342 /** @} */
343
344 /** @} */
345 #endif /* !__ASSEMBLY__ */
346
347 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */