]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/include/asm/processor.h
x86: Trap MSR access for AMD-V cells
[jailhouse.git] / hypervisor / arch / x86 / include / asm / processor.h
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  * Copyright (c) Valentine Sinitsyn, 2014
6  *
7  * Authors:
8  *  Jan Kiszka <jan.kiszka@siemens.com>
9  *  Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
10  *
11  * This work is licensed under the terms of the GNU GPL, version 2.  See
12  * the COPYING file in the top-level directory.
13  */
14
15 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
16 #define _JAILHOUSE_ASM_PROCESSOR_H
17
18 #include <jailhouse/types.h>
19
20 #define X86_FEATURE_VMX                                 (1 << 5)
21 #define X86_FEATURE_GBPAGES                             (1 << 26)
22 #define X86_FEATURE_RDTSCP                              (1 << 27)
23
24 #define X86_FEATURE_SVM                                 (1 << 2)
25 #define X86_FEATURE_NP                                  (1 << 0)
26 #define X86_FEATURE_FLUSH_BY_ASID                       (1 << 6)
27 #define X86_FEATURE_DECODE_ASSISTS                      (1 << 7)
28 #define X86_FEATURE_AVIC                                (1 << 13)
29
30 #define X86_RFLAGS_VM                                   (1 << 17)
31
32 #define X86_CR0_PE                                      0x00000001
33 #define X86_CR0_ET                                      0x00000010
34 #define X86_CR0_NW                                      0x20000000
35 #define X86_CR0_CD                                      0x40000000
36 #define X86_CR0_PG                                      0x80000000
37
38 #define X86_CR4_PAE                                     0x00000020
39 #define X86_CR4_PGE                                     0x00000080
40 #define X86_CR4_VMXE                                    0x00002000
41
42 #define X86_XCR0_FP                                     0x00000001
43
44 #define MSR_IA32_APICBASE                               0x0000001b
45 #define MSR_IA32_FEATURE_CONTROL                        0x0000003a
46 #define MSR_IA32_PAT                                    0x00000277
47 #define MSR_IA32_SYSENTER_CS                            0x00000174
48 #define MSR_IA32_SYSENTER_ESP                           0x00000175
49 #define MSR_IA32_SYSENTER_EIP                           0x00000176
50 #define MSR_IA32_VMX_BASIC                              0x00000480
51 #define MSR_IA32_VMX_PINBASED_CTLS                      0x00000481
52 #define MSR_IA32_VMX_PROCBASED_CTLS                     0x00000482
53 #define MSR_IA32_VMX_EXIT_CTLS                          0x00000483
54 #define MSR_IA32_VMX_ENTRY_CTLS                         0x00000484
55 #define MSR_IA32_VMX_MISC                               0x00000485
56 #define MSR_IA32_VMX_CR0_FIXED0                         0x00000486
57 #define MSR_IA32_VMX_CR0_FIXED1                         0x00000487
58 #define MSR_IA32_VMX_CR4_FIXED0                         0x00000488
59 #define MSR_IA32_VMX_CR4_FIXED1                         0x00000489
60 #define MSR_IA32_VMX_PROCBASED_CTLS2                    0x0000048b
61 #define MSR_IA32_VMX_EPT_VPID_CAP                       0x0000048c
62 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS                0x0000048e
63 #define MSR_X2APIC_BASE                                 0x00000800
64 #define MSR_X2APIC_ICR                                  0x00000830
65 #define MSR_X2APIC_END                                  0x0000083f
66 #define MSR_EFER                                        0xc0000080
67 #define MSR_STAR                                        0xc0000081
68 #define MSR_LSTAR                                       0xc0000082
69 #define MSR_CSTAR                                       0xc0000083
70 #define MSR_SFMASK                                      0xc0000084
71 #define MSR_FS_BASE                                     0xc0000100
72 #define MSR_GS_BASE                                     0xc0000101
73 #define MSR_KERNGS_BASE                                 0xc0000102
74
75 #define FEATURE_CONTROL_LOCKED                          (1 << 0)
76 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX       (1 << 2)
77
78 #define EFER_LME                                        0x00000100
79 #define EFER_LMA                                        0x00000400
80 #define EFER_NXE                                        0x00000800
81
82 #define GDT_DESC_NULL                                   0
83 #define GDT_DESC_CODE                                   1
84 #define GDT_DESC_TSS                                    2
85 #define GDT_DESC_TSS_HI                                 3
86 #define NUM_GDT_DESC                                    4
87
88 #define X86_INST_LEN_CPUID                              2
89 #define X86_INST_LEN_RDMSR                              2
90 #define X86_INST_LEN_WRMSR                              2
91 /* This covers both VMCALL and VMMCALL */
92 #define X86_INST_LEN_HYPERCALL                          3
93 #define X86_INST_LEN_MOV_TO_CR                          3
94 #define X86_INST_LEN_XSETBV                             3
95
96 #define X86_REX_CODE                                    4
97
98 #define X86_OP_MOV_TO_MEM                               0x89
99 #define X86_OP_MOV_FROM_MEM                             0x8b
100
101 #define NMI_VECTOR                                      2
102 #define PF_VECTOR                                       14
103
104 #define DESC_TSS_BUSY                                   (1UL << (9 + 32))
105 #define DESC_PRESENT                                    (1UL << (15 + 32))
106 #define DESC_CODE_DATA                                  (1UL << (12 + 32))
107 #define DESC_PAGE_GRAN                                  (1UL << (23 + 32))
108
109 #ifndef __ASSEMBLY__
110
111 struct registers {
112         unsigned long r15;
113         unsigned long r14;
114         unsigned long r13;
115         unsigned long r12;
116         unsigned long r11;
117         unsigned long r10;
118         unsigned long r9;
119         unsigned long r8;
120         unsigned long rdi;
121         unsigned long rsi;
122         unsigned long rbp;
123         unsigned long unused;
124         unsigned long rbx;
125         unsigned long rdx;
126         unsigned long rcx;
127         unsigned long rax;
128 };
129
130 struct desc_table_reg {
131         u16 limit;
132         u64 base;
133 } __attribute__((packed));
134
135 struct segment {
136         u64 base;
137         u32 limit;
138         u32 access_rights;
139         u16 selector;
140 };
141
142 static unsigned long __force_order;
143
144 static inline void cpu_relax(void)
145 {
146         asm volatile("rep; nop" : : : "memory");
147 }
148
149 static inline void memory_barrier(void)
150 {
151         asm volatile("mfence" : : : "memory");
152 }
153
154 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
155                            unsigned int *ecx, unsigned int *edx)
156 {
157         /* ecx is often an input as well as an output. */
158         asm volatile("cpuid"
159             : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
160             : "0" (*eax), "2" (*ecx)
161             : "memory");
162 }
163
164 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
165                          unsigned int *ecx, unsigned int *edx)
166 {
167         *eax = op;
168         *ecx = 0;
169         __cpuid(eax, ebx, ecx, edx);
170 }
171
172 #define CPUID_REG(reg)                                          \
173 static inline unsigned int cpuid_##reg(unsigned int op)         \
174 {                                                               \
175         unsigned int eax, ebx, ecx, edx;                        \
176                                                                 \
177         cpuid(op, &eax, &ebx, &ecx, &edx);                      \
178         return reg;                                             \
179 }
180
181 CPUID_REG(eax)
182 CPUID_REG(ebx)
183 CPUID_REG(ecx)
184 CPUID_REG(edx)
185
186 static inline unsigned long read_cr0(void)
187 {
188         unsigned long cr0;
189
190         asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
191         return cr0;
192 }
193
194 static inline void write_cr0(unsigned long val)
195 {
196         asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
197 }
198
199 static inline unsigned long read_cr2(void)
200 {
201         unsigned long cr2;
202
203         asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
204         return cr2;
205 }
206
207 static inline unsigned long read_cr3(void)
208 {
209         unsigned long cr3;
210
211         asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
212         return cr3;
213 }
214
215 static inline void write_cr3(unsigned long val)
216 {
217         asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
218 }
219
220 static inline unsigned long read_cr4(void)
221 {
222         unsigned long cr4;
223
224         asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
225         return cr4;
226 }
227
228 static inline void write_cr4(unsigned long val)
229 {
230         asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
231 }
232
233 static inline unsigned long read_msr(unsigned int msr)
234 {
235         u32 low, high;
236
237         asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
238         return low | ((unsigned long)high << 32);
239 }
240
241 static inline void write_msr(unsigned int msr, unsigned long val)
242 {
243         asm volatile("wrmsr"
244                 : /* no output */
245                 : "c" (msr), "a" (val), "d" (val >> 32)
246                 : "memory");
247 }
248
249 static inline void read_gdtr(struct desc_table_reg *val)
250 {
251         asm volatile("sgdtq %0" : "=m" (*val));
252 }
253
254 static inline void write_gdtr(struct desc_table_reg *val)
255 {
256         asm volatile("lgdtq %0" : : "m" (*val));
257 }
258
259 static inline void read_idtr(struct desc_table_reg *val)
260 {
261         asm volatile("sidtq %0" : "=m" (*val));
262 }
263
264 static inline void write_idtr(struct desc_table_reg *val)
265 {
266         asm volatile("lidtq %0" : : "m" (*val));
267 }
268
269 static inline void enable_irq(void)
270 {
271         asm volatile("sti");
272 }
273
274 static inline void disable_irq(void)
275 {
276         asm volatile("cli");
277 }
278
279 #endif /* !__ASSEMBLY__ */
280
281 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */