2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2014-2016
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/paging.h>
14 #include <jailhouse/string.h>
15 #include <jailhouse/utils.h>
17 #define X86_FLAG_HUGEPAGE 0x80
19 struct paging hv_paging[MAX_PAGE_TABLE_LEVELS];
21 static bool x86_64_entry_valid(pt_entry_t pte, unsigned long flags)
23 return (*pte & flags) == flags;
26 static unsigned long x86_64_get_flags(pt_entry_t pte)
28 return *pte & BIT_MASK(6, 0);
31 static void x86_64_set_next_pt(pt_entry_t pte, unsigned long next_pt)
33 *pte = (next_pt & BIT_MASK(51, 12)) | PAGE_DEFAULT_FLAGS;
36 static void x86_64_clear_entry(pt_entry_t pte)
41 static bool x86_64_page_table_empty(page_table_t page_table)
46 for (n = 0, pte = page_table; n < PAGE_SIZE / sizeof(u64); n++, pte++)
47 if (x86_64_entry_valid(pte, PAGE_FLAG_PRESENT))
52 static pt_entry_t x86_64_get_entry_l4(page_table_t page_table,
55 return &page_table[(virt >> 39) & 0x1ff];
58 static pt_entry_t x86_64_get_entry_l3(page_table_t page_table,
61 return &page_table[(virt >> 30) & 0x1ff];
64 static pt_entry_t x86_64_get_entry_l2(page_table_t page_table,
67 return &page_table[(virt >> 21) & 0x1ff];
70 static pt_entry_t x86_64_get_entry_l1(page_table_t page_table,
73 return &page_table[(virt >> 12) & 0x1ff];
76 static void x86_64_set_terminal_l3(pt_entry_t pte, unsigned long phys,
79 *pte = (phys & BIT_MASK(51, 30)) | X86_FLAG_HUGEPAGE | flags;
82 static void x86_64_set_terminal_l2(pt_entry_t pte, unsigned long phys,
85 *pte = (phys & BIT_MASK(51, 21)) | X86_FLAG_HUGEPAGE | flags;
88 static void x86_64_set_terminal_l1(pt_entry_t pte, unsigned long phys,
91 *pte = (phys & BIT_MASK(51, 12)) | flags;
94 static unsigned long x86_64_get_phys_l3(pt_entry_t pte, unsigned long virt)
96 if (!(*pte & X86_FLAG_HUGEPAGE))
97 return INVALID_PHYS_ADDR;
98 return (*pte & BIT_MASK(51, 30)) | (virt & BIT_MASK(29, 0));
101 static unsigned long x86_64_get_phys_l2(pt_entry_t pte, unsigned long virt)
103 if (!(*pte & X86_FLAG_HUGEPAGE))
104 return INVALID_PHYS_ADDR;
105 return (*pte & BIT_MASK(51, 21)) | (virt & BIT_MASK(20, 0));
108 static unsigned long x86_64_get_phys_l1(pt_entry_t pte, unsigned long virt)
110 return (*pte & BIT_MASK(51, 12)) | (virt & BIT_MASK(11, 0));
113 static unsigned long x86_64_get_next_pt(pt_entry_t pte)
115 return *pte & BIT_MASK(51, 12);
118 #define X86_64_PAGING_COMMON \
119 .entry_valid = x86_64_entry_valid, \
120 .get_flags = x86_64_get_flags, \
121 .set_next_pt = x86_64_set_next_pt, \
122 .clear_entry = x86_64_clear_entry, \
123 .page_table_empty = x86_64_page_table_empty
125 const struct paging x86_64_paging[] = {
127 X86_64_PAGING_COMMON,
128 .get_entry = x86_64_get_entry_l4,
129 /* set_terminal not valid */
130 .get_phys = paging_get_phys_invalid,
131 .get_next_pt = x86_64_get_next_pt,
134 .page_size = 1024 * 1024 * 1024,
135 X86_64_PAGING_COMMON,
136 .get_entry = x86_64_get_entry_l3,
137 .set_terminal = x86_64_set_terminal_l3,
138 .get_phys = x86_64_get_phys_l3,
139 .get_next_pt = x86_64_get_next_pt,
142 .page_size = 2 * 1024 * 1024,
143 X86_64_PAGING_COMMON,
144 .get_entry = x86_64_get_entry_l2,
145 .set_terminal = x86_64_set_terminal_l2,
146 .get_phys = x86_64_get_phys_l2,
147 .get_next_pt = x86_64_get_next_pt,
150 .page_size = PAGE_SIZE,
151 X86_64_PAGING_COMMON,
152 .get_entry = x86_64_get_entry_l1,
153 .set_terminal = x86_64_set_terminal_l1,
154 .get_phys = x86_64_get_phys_l1,
155 /* get_next_pt not valid */
159 void arch_paging_init(void)
161 memcpy(hv_paging, x86_64_paging, sizeof(x86_64_paging));
162 if (!(cpuid_edx(0x80000001, 0) & X86_FEATURE_GBPAGES))
163 hv_paging[1].page_size = 0;
166 static bool i386_entry_valid(pt_entry_t pte, unsigned long flags)
168 return (*(u32 *)pte & flags) == flags;
171 static pt_entry_t i386_get_entry_l2(page_table_t page_table,
174 u32 *page_table32 = (u32 *)page_table;
176 return (pt_entry_t)&page_table32[(virt >> 22) & 0x3ff];
179 static pt_entry_t i386_get_entry_l1(page_table_t page_table,
182 u32 *page_table32 = (u32 *)page_table;
184 return (pt_entry_t)&page_table32[(virt >> 12) & 0x3ff];
187 static unsigned long i386_get_phys_l2(pt_entry_t pte, unsigned long virt)
189 u32 pte32 = *(u32 *)pte;
191 if (!(pte32 & X86_FLAG_HUGEPAGE))
192 return INVALID_PHYS_ADDR;
193 return ((unsigned long)(pte32 & BIT_MASK(16, 13)) << (32 - 13)) |
194 (pte32 & BIT_MASK(31, 22)) | (virt & BIT_MASK(21, 0));
197 static unsigned long i386_get_phys_l1(pt_entry_t pte, unsigned long virt)
199 return (*(u32 *)pte & BIT_MASK(31, 12)) | (virt & BIT_MASK(11, 0));
202 static unsigned long i386_get_next_pt(pt_entry_t pte)
204 return *(u32 *)pte & BIT_MASK(31, 12);
207 /* read-only, no page table construction supported */
208 const struct paging i386_paging[] = {
210 .page_size = 4 * 1024 * 1024,
211 .entry_valid = i386_entry_valid,
212 .get_entry = i386_get_entry_l2,
213 .get_phys = i386_get_phys_l2,
214 .get_next_pt = i386_get_next_pt,
217 .page_size = PAGE_SIZE,
218 .entry_valid = i386_entry_valid,
219 .get_entry = i386_get_entry_l1,
220 .get_phys = i386_get_phys_l1,
221 /* get_next_pt not valid */
225 static bool realmode_entry_valid(pt_entry_t pte, unsigned long flags)
230 static pt_entry_t realmode_get_entry(page_table_t page_table,
236 static unsigned long realmode_get_phys(pt_entry_t pte, unsigned long virt)
241 /* naturally read-only */
242 const struct paging realmode_paging[] = {
244 .page_size = PAGE_SIZE,
245 .entry_valid = realmode_entry_valid,
246 .get_entry = realmode_get_entry,
247 .get_phys = realmode_get_phys,
248 /* get_next_pt not valid */