2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
16 #define _JAILHOUSE_ASM_PROCESSOR_H
18 #include <jailhouse/types.h>
20 #define X86_FEATURE_VMX (1 << 5)
21 #define X86_FEATURE_GBPAGES (1 << 26)
22 #define X86_FEATURE_RDTSCP (1 << 27)
24 #define X86_FEATURE_SVM (1 << 2)
25 #define X86_FEATURE_NP (1 << 0)
26 #define X86_FEATURE_FLUSH_BY_ASID (1 << 6)
27 #define X86_FEATURE_DECODE_ASSISTS (1 << 7)
28 #define X86_FEATURE_AVIC (1 << 13)
30 #define X86_RFLAGS_VM (1 << 17)
32 #define X86_CR0_PE 0x00000001
33 #define X86_CR0_ET 0x00000010
34 #define X86_CR0_WP 0x00010000
35 #define X86_CR0_NW 0x20000000
36 #define X86_CR0_CD 0x40000000
37 #define X86_CR0_PG 0x80000000
39 #define X86_CR4_PAE 0x00000020
40 #define X86_CR4_PGE 0x00000080
41 #define X86_CR4_VMXE 0x00002000
43 #define X86_XCR0_FP 0x00000001
45 #define MSR_IA32_APICBASE 0x0000001b
46 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
47 #define MSR_IA32_PAT 0x00000277
48 #define MSR_IA32_SYSENTER_CS 0x00000174
49 #define MSR_IA32_SYSENTER_ESP 0x00000175
50 #define MSR_IA32_SYSENTER_EIP 0x00000176
51 #define MSR_IA32_VMX_BASIC 0x00000480
52 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
53 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
54 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
55 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
56 #define MSR_IA32_VMX_MISC 0x00000485
57 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
58 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
59 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
60 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
61 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
62 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
63 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
64 #define MSR_X2APIC_BASE 0x00000800
65 #define MSR_X2APIC_ICR 0x00000830
66 #define MSR_X2APIC_END 0x0000083f
67 #define MSR_EFER 0xc0000080
68 #define MSR_STAR 0xc0000081
69 #define MSR_LSTAR 0xc0000082
70 #define MSR_CSTAR 0xc0000083
71 #define MSR_SFMASK 0xc0000084
72 #define MSR_FS_BASE 0xc0000100
73 #define MSR_GS_BASE 0xc0000101
74 #define MSR_KERNGS_BASE 0xc0000102
76 #define FEATURE_CONTROL_LOCKED (1 << 0)
77 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1 << 2)
79 #define EFER_LME 0x00000100
80 #define EFER_LMA 0x00000400
81 #define EFER_NXE 0x00000800
83 #define GDT_DESC_NULL 0
84 #define GDT_DESC_CODE 1
85 #define GDT_DESC_TSS 2
86 #define GDT_DESC_TSS_HI 3
87 #define NUM_GDT_DESC 4
89 #define X86_INST_LEN_CPUID 2
90 #define X86_INST_LEN_RDMSR 2
91 #define X86_INST_LEN_WRMSR 2
92 /* This covers both VMCALL and VMMCALL */
93 #define X86_INST_LEN_HYPERCALL 3
94 #define X86_INST_LEN_MOV_TO_CR 3
95 #define X86_INST_LEN_XSETBV 3
97 #define X86_REX_CODE 4
99 #define X86_OP_MOV_TO_MEM 0x89
100 #define X86_OP_MOV_FROM_MEM 0x8b
105 #define DESC_TSS_BUSY (1UL << (9 + 32))
106 #define DESC_PRESENT (1UL << (15 + 32))
107 #define DESC_CODE_DATA (1UL << (12 + 32))
108 #define DESC_PAGE_GRAN (1UL << (23 + 32))
114 * @defgroup Processor Processor
116 * Low-level support for x86 processor configuration and status retrieval.
133 unsigned long unused;
140 struct desc_table_reg {
143 } __attribute__((packed));
152 static unsigned long __force_order;
154 static inline void cpu_relax(void)
156 asm volatile("rep; nop" : : : "memory");
159 static inline void memory_barrier(void)
161 asm volatile("mfence" : : : "memory");
164 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx)
167 /* ecx is often an input as well as an output. */
169 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
170 : "0" (*eax), "2" (*ecx)
174 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx,
175 unsigned int *ecx, unsigned int *edx)
179 __cpuid(eax, ebx, ecx, edx);
182 #define CPUID_REG(reg) \
183 static inline unsigned int cpuid_##reg(unsigned int op) \
185 unsigned int eax, ebx, ecx, edx; \
187 cpuid(op, &eax, &ebx, &ecx, &edx); \
196 static inline unsigned long read_cr0(void)
200 asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
204 static inline void write_cr0(unsigned long val)
206 asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
209 static inline unsigned long read_cr2(void)
213 asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
217 static inline unsigned long read_cr3(void)
221 asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
225 static inline void write_cr3(unsigned long val)
227 asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
230 static inline unsigned long read_cr4(void)
234 asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
238 static inline void write_cr4(unsigned long val)
240 asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
243 static inline unsigned long read_msr(unsigned int msr)
247 asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
248 return low | ((unsigned long)high << 32);
251 static inline void write_msr(unsigned int msr, unsigned long val)
255 : "c" (msr), "a" (val), "d" (val >> 32)
259 static inline void read_gdtr(struct desc_table_reg *val)
261 asm volatile("sgdtq %0" : "=m" (*val));
264 static inline void write_gdtr(struct desc_table_reg *val)
266 asm volatile("lgdtq %0" : : "m" (*val));
269 static inline void read_idtr(struct desc_table_reg *val)
271 asm volatile("sidtq %0" : "=m" (*val));
274 static inline void write_idtr(struct desc_table_reg *val)
276 asm volatile("lidtq %0" : : "m" (*val));
280 * Enable or disable interrupts delivery to the local CPU when in host mode.
282 * In some cases (AMD) changing IF isn't enough, so these are implemented on
286 void enable_irq(void);
288 void disable_irq(void);
292 #endif /* !__ASSEMBLY__ */
294 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */