]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/include/asm/processor.h
x86: Control interrupts in vendor-specific way
[jailhouse.git] / hypervisor / arch / x86 / include / asm / processor.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  */
14
15 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
16 #define _JAILHOUSE_ASM_PROCESSOR_H
17
18 #include <jailhouse/types.h>
19
20 #define X86_FEATURE_VMX                                 (1 << 5)
21 #define X86_FEATURE_GBPAGES                             (1 << 26)
22 #define X86_FEATURE_RDTSCP                              (1 << 27)
23
24 #define X86_FEATURE_SVM                                 (1 << 2)
25 #define X86_FEATURE_NP                                  (1 << 0)
26 #define X86_FEATURE_FLUSH_BY_ASID                       (1 << 6)
27 #define X86_FEATURE_DECODE_ASSISTS                      (1 << 7)
28 #define X86_FEATURE_AVIC                                (1 << 13)
29
30 #define X86_RFLAGS_VM                                   (1 << 17)
31
32 #define X86_CR0_PE                                      0x00000001
33 #define X86_CR0_ET                                      0x00000010
34 #define X86_CR0_WP                                      0x00010000
35 #define X86_CR0_NW                                      0x20000000
36 #define X86_CR0_CD                                      0x40000000
37 #define X86_CR0_PG                                      0x80000000
38
39 #define X86_CR4_PAE                                     0x00000020
40 #define X86_CR4_PGE                                     0x00000080
41 #define X86_CR4_VMXE                                    0x00002000
42
43 #define X86_XCR0_FP                                     0x00000001
44
45 #define MSR_IA32_APICBASE                               0x0000001b
46 #define MSR_IA32_FEATURE_CONTROL                        0x0000003a
47 #define MSR_IA32_PAT                                    0x00000277
48 #define MSR_IA32_SYSENTER_CS                            0x00000174
49 #define MSR_IA32_SYSENTER_ESP                           0x00000175
50 #define MSR_IA32_SYSENTER_EIP                           0x00000176
51 #define MSR_IA32_VMX_BASIC                              0x00000480
52 #define MSR_IA32_VMX_PINBASED_CTLS                      0x00000481
53 #define MSR_IA32_VMX_PROCBASED_CTLS                     0x00000482
54 #define MSR_IA32_VMX_EXIT_CTLS                          0x00000483
55 #define MSR_IA32_VMX_ENTRY_CTLS                         0x00000484
56 #define MSR_IA32_VMX_MISC                               0x00000485
57 #define MSR_IA32_VMX_CR0_FIXED0                         0x00000486
58 #define MSR_IA32_VMX_CR0_FIXED1                         0x00000487
59 #define MSR_IA32_VMX_CR4_FIXED0                         0x00000488
60 #define MSR_IA32_VMX_CR4_FIXED1                         0x00000489
61 #define MSR_IA32_VMX_PROCBASED_CTLS2                    0x0000048b
62 #define MSR_IA32_VMX_EPT_VPID_CAP                       0x0000048c
63 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS                0x0000048e
64 #define MSR_X2APIC_BASE                                 0x00000800
65 #define MSR_X2APIC_ICR                                  0x00000830
66 #define MSR_X2APIC_END                                  0x0000083f
67 #define MSR_EFER                                        0xc0000080
68 #define MSR_STAR                                        0xc0000081
69 #define MSR_LSTAR                                       0xc0000082
70 #define MSR_CSTAR                                       0xc0000083
71 #define MSR_SFMASK                                      0xc0000084
72 #define MSR_FS_BASE                                     0xc0000100
73 #define MSR_GS_BASE                                     0xc0000101
74 #define MSR_KERNGS_BASE                                 0xc0000102
75
76 #define FEATURE_CONTROL_LOCKED                          (1 << 0)
77 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX       (1 << 2)
78
79 #define EFER_LME                                        0x00000100
80 #define EFER_LMA                                        0x00000400
81 #define EFER_NXE                                        0x00000800
82
83 #define GDT_DESC_NULL                                   0
84 #define GDT_DESC_CODE                                   1
85 #define GDT_DESC_TSS                                    2
86 #define GDT_DESC_TSS_HI                                 3
87 #define NUM_GDT_DESC                                    4
88
89 #define X86_INST_LEN_CPUID                              2
90 #define X86_INST_LEN_RDMSR                              2
91 #define X86_INST_LEN_WRMSR                              2
92 /* This covers both VMCALL and VMMCALL */
93 #define X86_INST_LEN_HYPERCALL                          3
94 #define X86_INST_LEN_MOV_TO_CR                          3
95 #define X86_INST_LEN_XSETBV                             3
96
97 #define X86_REX_CODE                                    4
98
99 #define X86_OP_MOV_TO_MEM                               0x89
100 #define X86_OP_MOV_FROM_MEM                             0x8b
101
102 #define NMI_VECTOR                                      2
103 #define PF_VECTOR                                       14
104
105 #define DESC_TSS_BUSY                                   (1UL << (9 + 32))
106 #define DESC_PRESENT                                    (1UL << (15 + 32))
107 #define DESC_CODE_DATA                                  (1UL << (12 + 32))
108 #define DESC_PAGE_GRAN                                  (1UL << (23 + 32))
109
110 #ifndef __ASSEMBLY__
111
112 /**
113  * @ingroup X86
114  * @defgroup Processor Processor
115  *
116  * Low-level support for x86 processor configuration and status retrieval.
117  *
118  * @{
119  */
120
121 struct registers {
122         unsigned long r15;
123         unsigned long r14;
124         unsigned long r13;
125         unsigned long r12;
126         unsigned long r11;
127         unsigned long r10;
128         unsigned long r9;
129         unsigned long r8;
130         unsigned long rdi;
131         unsigned long rsi;
132         unsigned long rbp;
133         unsigned long unused;
134         unsigned long rbx;
135         unsigned long rdx;
136         unsigned long rcx;
137         unsigned long rax;
138 };
139
140 struct desc_table_reg {
141         u16 limit;
142         u64 base;
143 } __attribute__((packed));
144
145 struct segment {
146         u64 base;
147         u32 limit;
148         u32 access_rights;
149         u16 selector;
150 };
151
152 static unsigned long __force_order;
153
154 static inline void cpu_relax(void)
155 {
156         asm volatile("rep; nop" : : : "memory");
157 }
158
159 static inline void memory_barrier(void)
160 {
161         asm volatile("mfence" : : : "memory");
162 }
163
164 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
165                            unsigned int *ecx, unsigned int *edx)
166 {
167         /* ecx is often an input as well as an output. */
168         asm volatile("cpuid"
169             : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
170             : "0" (*eax), "2" (*ecx)
171             : "memory");
172 }
173
174 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
175                          unsigned int *ecx, unsigned int *edx)
176 {
177         *eax = op;
178         *ecx = 0;
179         __cpuid(eax, ebx, ecx, edx);
180 }
181
182 #define CPUID_REG(reg)                                          \
183 static inline unsigned int cpuid_##reg(unsigned int op)         \
184 {                                                               \
185         unsigned int eax, ebx, ecx, edx;                        \
186                                                                 \
187         cpuid(op, &eax, &ebx, &ecx, &edx);                      \
188         return reg;                                             \
189 }
190
191 CPUID_REG(eax)
192 CPUID_REG(ebx)
193 CPUID_REG(ecx)
194 CPUID_REG(edx)
195
196 static inline unsigned long read_cr0(void)
197 {
198         unsigned long cr0;
199
200         asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
201         return cr0;
202 }
203
204 static inline void write_cr0(unsigned long val)
205 {
206         asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
207 }
208
209 static inline unsigned long read_cr2(void)
210 {
211         unsigned long cr2;
212
213         asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
214         return cr2;
215 }
216
217 static inline unsigned long read_cr3(void)
218 {
219         unsigned long cr3;
220
221         asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
222         return cr3;
223 }
224
225 static inline void write_cr3(unsigned long val)
226 {
227         asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
228 }
229
230 static inline unsigned long read_cr4(void)
231 {
232         unsigned long cr4;
233
234         asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
235         return cr4;
236 }
237
238 static inline void write_cr4(unsigned long val)
239 {
240         asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
241 }
242
243 static inline unsigned long read_msr(unsigned int msr)
244 {
245         u32 low, high;
246
247         asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
248         return low | ((unsigned long)high << 32);
249 }
250
251 static inline void write_msr(unsigned int msr, unsigned long val)
252 {
253         asm volatile("wrmsr"
254                 : /* no output */
255                 : "c" (msr), "a" (val), "d" (val >> 32)
256                 : "memory");
257 }
258
259 static inline void read_gdtr(struct desc_table_reg *val)
260 {
261         asm volatile("sgdtq %0" : "=m" (*val));
262 }
263
264 static inline void write_gdtr(struct desc_table_reg *val)
265 {
266         asm volatile("lgdtq %0" : : "m" (*val));
267 }
268
269 static inline void read_idtr(struct desc_table_reg *val)
270 {
271         asm volatile("sidtq %0" : "=m" (*val));
272 }
273
274 static inline void write_idtr(struct desc_table_reg *val)
275 {
276         asm volatile("lidtq %0" : : "m" (*val));
277 }
278
279 /**
280  * Enable or disable interrupts delivery to the local CPU when in host mode.
281  *
282  * In some cases (AMD) changing IF isn't enough, so these are implemented on
283  * per-vendor basis.
284  * @{
285  */
286 void enable_irq(void);
287
288 void disable_irq(void);
289 /** @} */
290
291 /** @} */
292 #endif /* !__ASSEMBLY__ */
293
294 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */