2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
5 * Copyright (c) Valentine Sinitsyn, 2014
8 * Jan Kiszka <jan.kiszka@siemens.com>
9 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * This file is based on linux/arch/x86/include/asm/special_insn.h and other
17 * Copyright (c) Linux kernel developers, 2013
20 #ifndef _JAILHOUSE_ASM_PROCESSOR_H
21 #define _JAILHOUSE_ASM_PROCESSOR_H
23 #include <jailhouse/types.h>
26 #define X86_FEATURE_VMX (1 << 5)
27 #define X86_FEATURE_XSAVE (1 << 26)
28 #define X86_FEATURE_HYPERVISOR (1 << 31)
30 /* leaf 0x07, subleaf 0, EBX */
31 #define X86_FEATURE_CAT (1 << 15)
33 /* leaf 0x80000001, ECX */
34 #define X86_FEATURE_SVM (1 << 2)
36 /* leaf 0x80000001, EDX */
37 #define X86_FEATURE_GBPAGES (1 << 26)
38 #define X86_FEATURE_RDTSCP (1 << 27)
40 /* leaf 0x8000000a, EDX */
41 #define X86_FEATURE_NP (1 << 0)
42 #define X86_FEATURE_FLUSH_BY_ASID (1 << 6)
43 #define X86_FEATURE_DECODE_ASSISTS (1 << 7)
44 #define X86_FEATURE_AVIC (1 << 13)
46 #define X86_RFLAGS_VM (1 << 17)
48 #define X86_CR0_PE (1UL << 0)
49 #define X86_CR0_MP (1UL << 1)
50 #define X86_CR0_TS (1UL << 3)
51 #define X86_CR0_ET (1UL << 4)
52 #define X86_CR0_NE (1UL << 5)
53 #define X86_CR0_WP (1UL << 16)
54 #define X86_CR0_NW (1UL << 29)
55 #define X86_CR0_CD (1UL << 30)
56 #define X86_CR0_PG (1UL << 31)
57 #define X86_CR0_RESERVED \
58 (BIT_MASK(28, 19) | (1UL << 17) | BIT_MASK(15, 6))
60 #define X86_CR4_PAE (1UL << 5)
61 #define X86_CR4_VMXE (1UL << 13)
62 #define X86_CR4_OSXSAVE (1UL << 18)
63 #define X86_CR4_RESERVED \
64 (BIT_MASK(31, 22) | (1UL << 19) | (1UL << 15) | BIT_MASK(12, 11))
66 #define X86_XCR0_FP 0x00000001
68 #define MSR_IA32_APICBASE 0x0000001b
69 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
70 #define MSR_IA32_PAT 0x00000277
71 #define MSR_IA32_MTRR_DEF_TYPE 0x000002ff
72 #define MSR_IA32_SYSENTER_CS 0x00000174
73 #define MSR_IA32_SYSENTER_ESP 0x00000175
74 #define MSR_IA32_SYSENTER_EIP 0x00000176
75 #define MSR_IA32_PERF_GLOBAL_CTRL 0x0000038f
76 #define MSR_IA32_VMX_BASIC 0x00000480
77 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
78 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
79 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
80 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
81 #define MSR_IA32_VMX_MISC 0x00000485
82 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
83 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
84 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
85 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
86 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
87 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
88 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
89 #define MSR_X2APIC_BASE 0x00000800
90 #define MSR_X2APIC_ICR 0x00000830
91 #define MSR_X2APIC_END 0x0000083f
92 #define MSR_IA32_PQR_ASSOC 0x00000c8f
93 #define MSR_IA32_L3_MASK_0 0x00000c90
94 #define MSR_EFER 0xc0000080
95 #define MSR_STAR 0xc0000081
96 #define MSR_LSTAR 0xc0000082
97 #define MSR_CSTAR 0xc0000083
98 #define MSR_SFMASK 0xc0000084
99 #define MSR_FS_BASE 0xc0000100
100 #define MSR_GS_BASE 0xc0000101
101 #define MSR_KERNGS_BASE 0xc0000102
103 #define FEATURE_CONTROL_LOCKED (1 << 0)
104 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1 << 2)
106 #define PAT_RESET_VALUE 0x0007040600070406UL
108 #define MTRR_ENABLE (1UL << 11)
110 #define EFER_LME 0x00000100
111 #define EFER_LMA 0x00000400
112 #define EFER_NXE 0x00000800
114 #define PQR_ASSOC_COS_SHIFT 32
116 #define CAT_RESID_L3 1
118 #define CAT_CBM_LEN_MASK BIT_MASK(4, 0)
119 #define CAT_COS_MAX_MASK BIT_MASK(15, 0)
121 #define GDT_DESC_NULL 0
122 #define GDT_DESC_CODE 1
123 #define GDT_DESC_TSS 2
124 #define GDT_DESC_TSS_HI 3
125 #define NUM_GDT_DESC 4
127 #define X86_INST_LEN_CPUID 2
128 #define X86_INST_LEN_RDMSR 2
129 #define X86_INST_LEN_WRMSR 2
130 /* This covers both VMCALL and VMMCALL */
131 #define X86_INST_LEN_HYPERCALL 3
132 #define X86_INST_LEN_MOV_TO_CR 3
133 #define X86_INST_LEN_XSETBV 3
135 #define X86_REX_CODE 4
137 #define X86_OP_MOV_TO_MEM 0x89
138 #define X86_OP_MOV_FROM_MEM 0x8b
145 #define DESC_TSS_BUSY (1UL << (9 + 32))
146 #define DESC_PRESENT (1UL << (15 + 32))
147 #define DESC_CODE_DATA (1UL << (12 + 32))
148 #define DESC_PAGE_GRAN (1UL << (23 + 32))
154 * @defgroup Processor Processor
156 * Low-level support for x86 processor configuration and status retrieval.
174 unsigned long unused;
180 unsigned long by_index[16];
183 struct desc_table_reg {
186 } __attribute__((packed));
195 static unsigned long __force_order;
197 static inline void cpu_relax(void)
199 asm volatile("rep; nop" : : : "memory");
202 static inline void memory_barrier(void)
204 asm volatile("mfence" : : : "memory");
207 static inline void memory_load_barrier(void)
209 asm volatile("lfence" : : : "memory");
212 static inline void cpuid(unsigned int *eax, unsigned int *ebx,
213 unsigned int *ecx, unsigned int *edx)
215 /* ecx is often an input as well as an output. */
217 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
218 : "0" (*eax), "2" (*ecx)
222 #define CPUID_REG(reg) \
223 static inline unsigned int cpuid_##reg(unsigned int op, unsigned int sub) \
225 unsigned int eax, ebx, ecx, edx; \
229 cpuid(&eax, &ebx, &ecx, &edx); \
238 static inline unsigned long read_cr0(void)
242 asm volatile("mov %%cr0,%0" : "=r" (cr0), "=m" (__force_order));
246 static inline void write_cr0(unsigned long val)
248 asm volatile("mov %0,%%cr0" : : "r" (val), "m" (__force_order));
251 static inline unsigned long read_cr2(void)
255 asm volatile("mov %%cr2,%0" : "=r" (cr2), "=m" (__force_order));
259 static inline unsigned long read_cr3(void)
263 asm volatile("mov %%cr3,%0" : "=r" (cr3), "=m" (__force_order));
267 static inline void write_cr3(unsigned long val)
269 asm volatile("mov %0,%%cr3" : : "r" (val), "m" (__force_order));
272 static inline unsigned long read_cr4(void)
276 asm volatile("mov %%cr4,%0" : "=r" (cr4), "=m" (__force_order));
280 static inline void write_cr4(unsigned long val)
282 asm volatile("mov %0,%%cr4" : : "r" (val), "m" (__force_order));
285 static inline unsigned long read_msr(unsigned int msr)
289 asm volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
290 return low | ((unsigned long)high << 32);
293 static inline void write_msr(unsigned int msr, unsigned long val)
297 : "c" (msr), "a" (val), "d" (val >> 32)
301 static inline void set_rdmsr_value(union registers *regs, unsigned long val)
303 regs->rax = (u32)val;
304 regs->rdx = val >> 32;
307 static inline unsigned long get_wrmsr_value(union registers *regs)
309 return (u32)regs->rax | (regs->rdx << 32);
312 static inline void read_gdtr(struct desc_table_reg *val)
314 asm volatile("sgdtq %0" : "=m" (*val));
317 static inline void write_gdtr(struct desc_table_reg *val)
319 asm volatile("lgdtq %0" : : "m" (*val));
322 static inline void read_idtr(struct desc_table_reg *val)
324 asm volatile("sidtq %0" : "=m" (*val));
327 static inline void write_idtr(struct desc_table_reg *val)
329 asm volatile("lidtq %0" : : "m" (*val));
333 * Enable or disable interrupts delivery to the local CPU when in host mode.
335 * In some cases (AMD) changing IF isn't enough, so these are implemented on
339 void enable_irq(void);
341 void disable_irq(void);
345 #endif /* !__ASSEMBLY__ */
347 #endif /* !_JAILHOUSE_ASM_PROCESSOR_H */