xapic_page = page_alloc(&remap_pool, 1);
if (!xapic_page)
return -ENOMEM;
- err = page_map_create(hv_page_table, XAPIC_BASE, PAGE_SIZE,
- (unsigned long)xapic_page,
+ err = page_map_create(&hv_paging_structs, XAPIC_BASE,
+ PAGE_SIZE, (unsigned long)xapic_page,
PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
#ifndef _JAILHOUSE_ASM_CELL_H
#define _JAILHOUSE_ASM_CELL_H
-#include <asm/types.h>
-#include <asm/paging.h>
+#include <jailhouse/paging.h>
#include <jailhouse/cell-config.h>
#include <jailhouse/hypercall.h>
struct {
/* should be first as it requires page alignment */
u8 __attribute__((aligned(PAGE_SIZE))) io_bitmap[2*PAGE_SIZE];
- pgd_t *ept;
+ struct paging_structures ept_structs;
} vmx;
struct {
- pgd_t *page_table;
+ struct paging_structures pg_structs;
} vtd;
unsigned int id;
/* swap CR3 */
cpu_data->linux_cr3 = read_cr3();
- write_cr3(page_map_hvirt2phys(hv_page_table));
+ write_cr3(page_map_hvirt2phys(hv_paging_structs.root_table));
/* set GDTR */
dtr.limit = NUM_GDT_DESC * 8 - 1;
phys_start = page_map_hvirt2phys(&cell->comm_page);
table_flags = page_flags & ~EPT_FLAG_WB_TYPE;
- return page_map_create(cell->vmx.ept, phys_start, mem->size,
+ return page_map_create(&cell->vmx.ept_structs, phys_start, mem->size,
mem->virt_start, page_flags, table_flags,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
}
void vmx_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
- page_map_destroy(cell->vmx.ept, mem->virt_start, mem->size,
+ page_map_destroy(&cell->vmx.ept_structs, mem->virt_start, mem->size,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
}
unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
unsigned long gphys)
{
- return page_map_virt2phys(cpu_data->cell->vmx.ept, gphys,
+ return page_map_virt2phys(&cpu_data->cell->vmx.ept_structs, gphys,
PAGE_DIR_LEVELS);
}
u32 size;
/* build root cell EPT */
- cell->vmx.ept = page_alloc(&mem_pool, 1);
- if (!cell->vmx.ept)
+ cell->vmx.ept_structs.root_table = page_alloc(&mem_pool, 1);
+ if (!cell->vmx.ept_structs.root_table)
return -ENOMEM;
for (n = 0; n < config->num_memory_regions; n++, mem++) {
err = vmx_map_memory_region(cell, mem);
if (err)
- /* FIXME: release vmx.ept */
+ /* FIXME: release vmx.ept_structs.root_table */
return err;
}
- err = page_map_create(cell->vmx.ept,
+ err = page_map_create(&cell->vmx.ept_structs,
page_map_hvirt2phys(apic_access_page),
PAGE_SIZE, XAPIC_BASE,
EPT_FLAG_READ|EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
EPT_FLAG_READ|EPT_FLAG_WRITE,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
if (err)
- /* FIXME: release vmx.ept */
+ /* FIXME: release vmx.ept_structs.root_table */
return err;
memset(cell->vmx.io_bitmap, -1, sizeof(cell->vmx.io_bitmap));
for (n = 0; n < config->num_memory_regions; n++, mem++)
if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION))
- page_map_destroy(linux_cell.vmx.ept, mem->phys_start,
- mem->size, PAGE_DIR_LEVELS,
+ page_map_destroy(&linux_cell.vmx.ept_structs,
+ mem->phys_start, mem->size,
+ PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
for (b = linux_cell.vmx.io_bitmap; pio_bitmap_size > 0;
u32 pio_bitmap_size = config->pio_bitmap_size;
u8 *b;
- page_map_destroy(cell->vmx.ept, XAPIC_BASE, PAGE_SIZE,
+ page_map_destroy(&cell->vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
if (linux_cell.config->pio_bitmap_size < pio_bitmap_size)
b++, pio_bitmap++, linux_pio_bitmap++, pio_bitmap_size--)
*b &= *pio_bitmap | *linux_pio_bitmap;
- page_free(&mem_pool, cell->vmx.ept, 1);
+ page_free(&mem_pool, cell->vmx.ept_structs.root_table, 1);
}
void vmx_invept(void)
page_map_hvirt2phys(io_bitmap + PAGE_SIZE));
ok &= vmcs_write64(EPT_POINTER,
- page_map_hvirt2phys(cell->vmx.ept) |
- EPT_TYPE_WRITEBACK | EPT_PAGE_WALK_LEN);
+ page_map_hvirt2phys(cell->vmx.ept_structs.root_table) |
+ EPT_TYPE_WRITEBACK | EPT_PAGE_WALK_LEN);
return ok;
}
else if (reg_base != dmar_reg_base + dmar_units * PAGE_SIZE)
return -ENOMEM;
- err = page_map_create(hv_page_table, drhd->register_base_addr,
- PAGE_SIZE, (unsigned long)reg_base,
+ err = page_map_create(&hv_paging_structs,
+ drhd->register_base_addr, PAGE_SIZE,
+ (unsigned long)reg_base,
PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
context_entry = &context_entry_table[device->devfn];
context_entry->lo_word = VTD_CTX_PRESENT |
VTD_CTX_FPD | VTD_CTX_TTYPE_MLP_UNTRANS |
- page_map_hvirt2phys(cell->vtd.page_table);
+ page_map_hvirt2phys(cell->vtd.pg_structs.root_table);
context_entry->hi_word =
(dmar_pt_levels == 3 ? VTD_CTX_AGAW_39 : VTD_CTX_AGAW_48) |
(cell->id << VTD_CTX_DID_SHIFT);
if (cell->id >= dmar_num_did)
return -ERANGE;
- cell->vtd.page_table = page_alloc(&mem_pool, 1);
- if (!cell->vtd.page_table)
+ cell->vtd.pg_structs.root_table = page_alloc(&mem_pool, 1);
+ if (!cell->vtd.pg_structs.root_table)
return -ENOMEM;
for (n = 0; n < config->num_memory_regions; n++, mem++) {
err = vtd_map_memory_region(cell, mem);
if (err)
- /* FIXME: release vtd.page_table */
+ /* FIXME: release vtd.pg_structs.root_table */
return err;
}
for (n = 0; n < config->num_pci_devices; n++)
if (!vtd_add_device_to_cell(cell, &dev[n]))
- /* FIXME: release vtd.page_table,
+ /* FIXME: release vtd.pg_structs.root_table,
* revert device additions*/
return -ENOMEM;
for (n = 0; n < config->num_memory_regions; n++, mem++)
if (mem->flags & JAILHOUSE_MEM_DMA)
- page_map_destroy(linux_cell.vtd.page_table,
+ page_map_destroy(&linux_cell.vtd.pg_structs,
mem->phys_start, mem->size,
dmar_pt_levels, PAGE_MAP_COHERENT);
if (mem->flags & JAILHOUSE_MEM_WRITE)
page_flags |= VTD_PAGE_WRITE;
- return page_map_create(cell->vtd.page_table, mem->phys_start,
+ return page_map_create(&cell->vtd.pg_structs, mem->phys_start,
mem->size, mem->virt_start, page_flags,
VTD_PAGE_READ | VTD_PAGE_WRITE,
dmar_pt_levels, PAGE_MAP_COHERENT);
return;
if (mem->flags & JAILHOUSE_MEM_DMA)
- page_map_destroy(cell->vtd.page_table, mem->virt_start,
+ page_map_destroy(&cell->vtd.pg_structs, mem->virt_start,
mem->size, dmar_pt_levels, PAGE_MAP_COHERENT);
}
vtd_flush_domain_caches(cell->id);
vtd_flush_domain_caches(linux_cell.id);
- page_free(&mem_pool, cell->vtd.page_table, 1);
+ page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1);
}
void vtd_shutdown(void)
cfg_header_size = (config_address & ~PAGE_MASK) +
sizeof(struct jailhouse_cell_desc);
- err = page_map_create(hv_page_table, config_address & PAGE_MASK,
+ err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
cfg_header_size, mapping_addr,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
goto resume_out;
}
- err = page_map_create(hv_page_table, config_address & PAGE_MASK,
+ err = page_map_create(&hv_paging_structs, config_address & PAGE_MASK,
cfg_total_size, mapping_addr,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
name_size = (name_address & ~PAGE_MASK) + JAILHOUSE_CELL_NAME_MAXLEN;
- err = page_map_create(hv_page_table, name_address & PAGE_MASK,
+ err = page_map_create(&hv_paging_structs, name_address & PAGE_MASK,
name_size, mapping_addr, PAGE_READONLY_FLAGS,
PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
* the COPYING file in the top-level directory.
*/
+#ifndef _JAILHOUSE_PAGING_H
+#define _JAILHOUSE_PAGING_H
+
#include <jailhouse/entry.h>
#include <asm/types.h>
#include <asm/paging.h>
PAGE_MAP_NON_COHERENT,
};
+typedef pgd_t *page_table_t;
+
+struct paging_structures {
+ page_table_t root_table;
+};
+
extern struct page_pool mem_pool;
extern struct page_pool remap_pool;
-extern pgd_t *hv_page_table;
+extern struct paging_structures hv_paging_structs;
void *page_alloc(struct page_pool *pool, unsigned int num);
void page_free(struct page_pool *pool, void *first_page, unsigned int num);
return (void *)phys + hypervisor_header.page_offset;
}
-unsigned long page_map_virt2phys(pgd_t *page_table, unsigned long virt,
- unsigned int levels);
+unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned int levels);
unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
unsigned long gphys);
-int page_map_create(pgd_t *page_table, unsigned long phys, unsigned long size,
- unsigned long virt, unsigned long page_flags,
- unsigned long table_flags, unsigned int levels,
- enum page_map_coherent coherent);
-void page_map_destroy(pgd_t *page_table, unsigned long virt,
- unsigned long size, unsigned int levels,
- enum page_map_coherent coherent);
+int page_map_create(const struct paging_structures *pg_structs,
+ unsigned long phys, unsigned long size, unsigned long virt,
+ unsigned long flags, unsigned long table_flags,
+ unsigned int levels, enum page_map_coherent coherent);
+void page_map_destroy(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ unsigned int levels, enum page_map_coherent coherent);
void *page_map_get_guest_page(struct per_cpu *cpu_data,
unsigned long page_table_paddr,
int paging_init(void);
void page_map_dump_stats(const char *when);
+
+#endif /* !_JAILHOUSE_PAGING_H */
.pages = BITS_PER_PAGE * NUM_REMAP_BITMAP_PAGES,
};
-pgd_t *hv_page_table;
+struct paging_structures hv_paging_structs;
static unsigned long find_next_free_page(struct page_pool *pool,
unsigned long start)
}
}
-unsigned long page_map_virt2phys(pgd_t *page_table, unsigned long virt,
- unsigned int levels)
+unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned int levels)
{
unsigned long offs = hypervisor_header.page_offset;
pgd_t *pgd;
switch (levels) {
case 4:
- pgd = pgd_offset(page_table, virt);
+ pgd = pgd_offset(pg_structs->root_table, virt);
if (!pgd_valid(pgd))
return INVALID_PHYS_ADDR;
pud = pud4l_offset(pgd, offs, virt);
break;
case 3:
- pud = pud3l_offset(page_table, virt);
+ pud = pud3l_offset(pg_structs->root_table, virt);
break;
default:
return INVALID_PHYS_ADDR;
flush_cache(addr, size);
}
-int page_map_create(pgd_t *page_table, unsigned long phys, unsigned long size,
- unsigned long virt, unsigned long flags,
- unsigned long table_flags, unsigned int levels,
- enum page_map_coherent coherent)
+int page_map_create(const struct paging_structures *pg_structs,
+ unsigned long phys, unsigned long size, unsigned long virt,
+ unsigned long flags, unsigned long table_flags,
+ unsigned int levels, enum page_map_coherent coherent)
{
unsigned long offs = hypervisor_header.page_offset;
pgd_t *pgd;
phys += PAGE_SIZE, virt += PAGE_SIZE, size -= PAGE_SIZE) {
switch (levels) {
case 4:
- pgd = pgd_offset(page_table, virt);
+ pgd = pgd_offset(pg_structs->root_table, virt);
if (!pgd_valid(pgd)) {
pud = page_alloc(&mem_pool, 1);
if (!pud)
pud = pud4l_offset(pgd, offs, virt);
break;
case 3:
- pud = pud3l_offset(page_table, virt);
+ pud = pud3l_offset(pg_structs->root_table, virt);
break;
default:
return -EINVAL;
return 0;
}
-void page_map_destroy(pgd_t *page_table, unsigned long virt,
- unsigned long size, unsigned int levels,
- enum page_map_coherent coherent)
+void page_map_destroy(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ unsigned int levels, enum page_map_coherent coherent)
{
unsigned long offs = hypervisor_header.page_offset;
pgd_t *pgd;
virt += PAGE_SIZE, size -= PAGE_SIZE) {
switch (levels) {
case 4:
- pgd = pgd_offset(page_table, virt);
+ pgd = pgd_offset(pg_structs->root_table, virt);
if (!pgd_valid(pgd))
continue;
break;
case 3:
pgd = 0; /* silence compiler warning */
- pud = pud3l_offset(page_table, virt);
+ pud = pud3l_offset(pg_structs->root_table, virt);
break;
default:
return;
phys = arch_page_map_gphys2phys(cpu_data, page_table_paddr);
if (phys == INVALID_PHYS_ADDR)
return NULL;
- err = page_map_create(hv_page_table, phys, PAGE_SIZE, page_virt,
+ err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE, page_virt,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
if (err)
(unsigned long)pud4l_offset(pgd, 0, 0));
if (phys == INVALID_PHYS_ADDR)
return NULL;
- err = page_map_create(hv_page_table, phys, PAGE_SIZE, page_virt,
+ err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE, page_virt,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
if (err)
(unsigned long)pmd_offset(pud, 0, 0));
if (phys == INVALID_PHYS_ADDR)
return NULL;
- err = page_map_create(hv_page_table, phys, PAGE_SIZE, page_virt,
+ err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE, page_virt,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,
PAGE_DIR_LEVELS, PAGE_MAP_NON_COHERENT);
if (err)
(unsigned long)pte_offset(pmd, 0, 0));
if (phys == INVALID_PHYS_ADDR)
return NULL;
- err = page_map_create(hv_page_table, phys, PAGE_SIZE,
+ err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE,
page_virt, PAGE_READONLY_FLAGS,
PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
if (phys == INVALID_PHYS_ADDR)
return NULL;
- err = page_map_create(hv_page_table, phys, PAGE_SIZE, page_virt,
+ err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE, page_virt,
flags, PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
PAGE_MAP_NON_COHERENT);
if (err)
for (n = 0; n < remap_pool.used_pages; n++)
set_bit(n, remap_pool.used_bitmap);
- hv_page_table = page_alloc(&mem_pool, 1);
- if (!hv_page_table)
+ hv_paging_structs.root_table = page_alloc(&mem_pool, 1);
+ if (!hv_paging_structs.root_table)
goto error_nomem;
/* Replicate hypervisor mapping of Linux */
- err = page_map_create(hv_page_table,
+ err = page_map_create(&hv_paging_structs,
page_map_hvirt2phys(&hypervisor_header),
hypervisor_header.size,
(unsigned long)&hypervisor_header,
/* Make sure any remappings to the temporary regions can be performed
* without allocations of page table pages. */
- err = page_map_create(hv_page_table, 0,
+ err = page_map_create(&hv_paging_structs, 0,
remap_pool.used_pages * PAGE_SIZE,
TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS,
return;
}
- error = page_map_create(hv_page_table,
+ error = page_map_create(&hv_paging_structs,
system_config->config_memory.phys_start,
size, (unsigned long)config_memory,
PAGE_READONLY_FLAGS, PAGE_DEFAULT_FLAGS,