void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
void arch_dbg_write(const char *msg) {}
void arch_shutdown(void) {}
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
- unsigned long gphys,
- unsigned long flags)
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+ unsigned long gphys, unsigned long flags)
{ return INVALID_PHYS_ADDR; }
void arch_paging_init(void) { }
xapic_page = page_alloc(&remap_pool, 1);
if (!xapic_page)
return -ENOMEM;
- err = page_map_create(&hv_paging_structs, XAPIC_BASE,
- PAGE_SIZE, (unsigned long)xapic_page,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, XAPIC_BASE, PAGE_SIZE,
+ (unsigned long)xapic_page,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGING_NON_COHERENT);
if (err)
return err;
apic_ops.read = read_xapic;
ioapic_page = page_alloc(&remap_pool, 1);
if (!ioapic_page)
return -ENOMEM;
- err = page_map_create(&hv_paging_structs, IOAPIC_BASE_ADDR, PAGE_SIZE,
- (unsigned long)ioapic_page,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, IOAPIC_BASE_ADDR, PAGE_SIZE,
+ (unsigned long)ioapic_page,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGING_NON_COHERENT);
if (err)
return err;
ioapic = ioapic_page;
* and we have to map a new one now. */
if (current_page && ((pc & ~PAGE_MASK) != 0))
return current_page;
- return page_map_get_guest_pages(pg_structs, pc, 1,
- PAGE_READONLY_FLAGS);
+ return paging_get_guest_pages(pg_structs, pc, 1, PAGE_READONLY_FLAGS);
}
struct mmio_access mmio_parse(unsigned long pc,
X86_64_PAGING_COMMON,
.get_entry = x86_64_get_entry_l4,
/* set_terminal not valid */
- .get_phys = page_map_get_phys_invalid,
+ .get_phys = paging_get_phys_invalid,
.get_next_pt = x86_64_get_next_pt_l4,
},
{
/* swap CR3 */
cpu_data->linux_cr3 = read_cr3();
- write_cr3(page_map_hvirt2phys(hv_paging_structs.root_table));
+ write_cr3(paging_hvirt2phys(hv_paging_structs.root_table));
cpu_data->linux_efer = read_msr(MSR_EFER);
unsigned long vmxon_addr;
u8 ok;
- vmxon_addr = page_map_hvirt2phys(&cpu_data->vmxon_region);
+ vmxon_addr = paging_hvirt2phys(&cpu_data->vmxon_region);
asm volatile(
"vmxon (%1)\n\t"
"seta %0"
static bool vmcs_clear(struct per_cpu *cpu_data)
{
- unsigned long vmcs_addr = page_map_hvirt2phys(&cpu_data->vmcs);
+ unsigned long vmcs_addr = paging_hvirt2phys(&cpu_data->vmcs);
u8 ok;
asm volatile(
static bool vmcs_load(struct per_cpu *cpu_data)
{
- unsigned long vmcs_addr = page_map_hvirt2phys(&cpu_data->vmcs);
+ unsigned long vmcs_addr = paging_hvirt2phys(&cpu_data->vmcs);
u8 ok;
asm volatile(
return vmx_cell_init(&root_cell);
}
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
- unsigned long gphys,
- unsigned long flags)
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+ unsigned long gphys, unsigned long flags)
{
- return page_map_virt2phys(&cpu_data->cell->vmx.ept_structs, gphys,
- flags);
+ return paging_virt2phys(&cpu_data->cell->vmx.ept_structs, gphys,
+ flags);
}
int vmx_cell_init(struct cell *cell)
if (!cell->vmx.ept_structs.root_table)
return -ENOMEM;
- err = page_map_create(&cell->vmx.ept_structs,
- page_map_hvirt2phys(apic_access_page),
- PAGE_SIZE, XAPIC_BASE,
- EPT_FLAG_READ|EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&cell->vmx.ept_structs,
+ paging_hvirt2phys(apic_access_page),
+ PAGE_SIZE, XAPIC_BASE,
+ EPT_FLAG_READ | EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
+ PAGING_NON_COHERENT);
if (err) {
vmx_cell_exit(cell);
return err;
if (mem->flags & JAILHOUSE_MEM_EXECUTE)
flags |= EPT_FLAG_EXECUTE;
if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
- phys_start = page_map_hvirt2phys(&cell->comm_page);
+ phys_start = paging_hvirt2phys(&cell->comm_page);
- return page_map_create(&cell->vmx.ept_structs, phys_start, mem->size,
- mem->virt_start, flags, PAGE_MAP_NON_COHERENT);
+ return paging_create(&cell->vmx.ept_structs, phys_start, mem->size,
+ mem->virt_start, flags, PAGING_NON_COHERENT);
}
int vmx_unmap_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{
- return page_map_destroy(&cell->vmx.ept_structs, mem->virt_start,
- mem->size, PAGE_MAP_NON_COHERENT);
+ return paging_destroy(&cell->vmx.ept_structs, mem->virt_start,
+ mem->size, PAGING_NON_COHERENT);
}
void vmx_cell_exit(struct cell *cell)
u32 pio_bitmap_size = cell->config->pio_bitmap_size;
u8 *b;
- page_map_destroy(&cell->vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
- PAGE_MAP_NON_COHERENT);
+ paging_destroy(&cell->vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
+ PAGING_NON_COHERENT);
if (root_cell.config->pio_bitmap_size < pio_bitmap_size)
pio_bitmap_size = root_cell.config->pio_bitmap_size;
bool ok = true;
io_bitmap = cell->vmx.io_bitmap;
- ok &= vmcs_write64(IO_BITMAP_A, page_map_hvirt2phys(io_bitmap));
+ ok &= vmcs_write64(IO_BITMAP_A, paging_hvirt2phys(io_bitmap));
ok &= vmcs_write64(IO_BITMAP_B,
- page_map_hvirt2phys(io_bitmap + PAGE_SIZE));
+ paging_hvirt2phys(io_bitmap + PAGE_SIZE));
ok &= vmcs_write64(EPT_POINTER,
- page_map_hvirt2phys(cell->vmx.ept_structs.root_table) |
+ paging_hvirt2phys(cell->vmx.ept_structs.root_table) |
EPT_TYPE_WRITEBACK | EPT_PAGE_WALK_LEN);
return ok;
val &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
ok &= vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, val);
- ok &= vmcs_write64(MSR_BITMAP, page_map_hvirt2phys(msr_bitmap));
+ ok &= vmcs_write64(MSR_BITMAP, paging_hvirt2phys(msr_bitmap));
val = read_msr(MSR_IA32_VMX_PROCBASED_CTLS2);
val |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
ok &= vmcs_write32(SECONDARY_VM_EXEC_CONTROL, val);
ok &= vmcs_write64(APIC_ACCESS_ADDR,
- page_map_hvirt2phys(apic_access_page));
+ paging_hvirt2phys(apic_access_page));
ok &= vmx_set_cell_config(cpu_data->cell);
struct vtd_entry inv_wait = {
.lo_word = VTD_REQ_INV_WAIT | VTD_INV_WAIT_SW |
VTD_INV_WAIT_FN | (1UL << VTD_INV_WAIT_SDATA_SHIFT),
- .hi_word = page_map_hvirt2phys(&completed),
+ .hi_word = paging_hvirt2phys(&completed),
};
unsigned int index;
!(inv_desc.lo_word & VTD_INV_WAIT_SW))
return -EINVAL;
- status_page = page_map_get_guest_pages(NULL, inv_desc.hi_word,
- 1, PAGE_DEFAULT_FLAGS);
+ status_page = paging_get_guest_pages(NULL, inv_desc.hi_word, 1,
+ PAGE_DEFAULT_FLAGS);
if (!status_page)
return -EINVAL;
if (reg == VTD_IQT_REG && is_write) {
while (unit->iqh != (*value & ~PAGE_MASK)) {
inv_desc_page =
- page_map_get_guest_pages(NULL, unit->iqa, 1,
- PAGE_READONLY_FLAGS);
+ paging_get_guest_pages(NULL, unit->iqa, 1,
+ PAGE_READONLY_FLAGS);
if (!inv_desc_page)
goto invalid_iq_entry;
/* Set root entry table pointer */
mmio_write64(reg_base + VTD_RTADDR_REG,
- page_map_hvirt2phys(root_entry_table));
+ paging_hvirt2phys(root_entry_table));
vtd_update_gcmd_reg(reg_base, VTD_GCMD_SRTP, 1);
/* Set interrupt remapping table pointer */
mmio_write64(reg_base + VTD_IRTA_REG,
- page_map_hvirt2phys(int_remap_table) |
+ paging_hvirt2phys(int_remap_table) |
(using_x2apic ? VTD_IRTA_EIME : 0) |
(int_remap_table_size_log2 - 1));
vtd_update_gcmd_reg(reg_base, VTD_GCMD_SIRTP, 1);
/* Setup and activate invalidation queue */
mmio_write64(reg_base + VTD_IQT_REG, 0);
- mmio_write64(reg_base + VTD_IQA_REG, page_map_hvirt2phys(inv_queue));
+ mmio_write64(reg_base + VTD_IQA_REG, paging_hvirt2phys(inv_queue));
vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 1);
vtd_submit_iq_request(reg_base, inv_queue, &inv_global_context);
reg_base = dmar_reg_base + n * PAGE_SIZE;
- err = page_map_create(&hv_paging_structs, base_addr, PAGE_SIZE,
- (unsigned long)reg_base,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, base_addr, PAGE_SIZE,
+ (unsigned long)reg_base,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGING_NON_COHERENT);
if (err)
return err;
if (*root_entry_lo & VTD_ROOT_PRESENT) {
context_entry_table =
- page_map_phys2hvirt(*root_entry_lo & PAGE_MASK);
+ paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
} else {
context_entry_table = page_alloc(&mem_pool, 1);
if (!context_entry_table)
goto error_nomem;
*root_entry_lo = VTD_ROOT_PRESENT |
- page_map_hvirt2phys(context_entry_table);
+ paging_hvirt2phys(context_entry_table);
flush_cache(root_entry_lo, sizeof(u64));
}
context_entry = &context_entry_table[PCI_DEVFN(bdf)];
context_entry->lo_word = VTD_CTX_PRESENT | VTD_CTX_TTYPE_MLP_UNTRANS |
- page_map_hvirt2phys(cell->vtd.pg_structs.root_table);
+ paging_hvirt2phys(cell->vtd.pg_structs.root_table);
context_entry->hi_word =
(dmar_pt_levels == 3 ? VTD_CTX_AGAW_39 : VTD_CTX_AGAW_48) |
(cell->id << VTD_CTX_DID_SHIFT);
if (dmar_units == 0)
return;
- context_entry_table = page_map_phys2hvirt(*root_entry_lo & PAGE_MASK);
+ context_entry_table = paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
context_entry = &context_entry_table[PCI_DEVFN(bdf)];
context_entry->lo_word &= ~VTD_CTX_PRESENT;
if (mem->flags & JAILHOUSE_MEM_WRITE)
flags |= VTD_PAGE_WRITE;
- return page_map_create(&cell->vtd.pg_structs, mem->phys_start,
- mem->size, mem->virt_start, flags,
- PAGE_MAP_COHERENT);
+ return paging_create(&cell->vtd.pg_structs, mem->phys_start,
+ mem->size, mem->virt_start, flags,
+ PAGING_COHERENT);
}
int vtd_unmap_memory_region(struct cell *cell,
if (!(mem->flags & JAILHOUSE_MEM_DMA))
return 0;
- return page_map_destroy(&cell->vtd.pg_structs, mem->virt_start,
- mem->size, PAGE_MAP_COHERENT);
+ return paging_destroy(&cell->vtd.pg_structs, mem->virt_start,
+ mem->size, PAGING_COHERENT);
}
struct apic_irq_message
irte_addr = (unit->irta & VTD_IRTA_ADDR_MASK) +
remap_index * sizeof(union vtd_irte);
- irte_page = page_map_get_guest_pages(NULL, irte_addr, 1,
- PAGE_READONLY_FLAGS);
+ irte_page = paging_get_guest_pages(NULL, irte_addr, 1,
+ PAGE_READONLY_FLAGS);
if (!irte_page)
return irq_msg;
* until the hardware is in sync with the Linux state again.
*/
iqh =unit->iqh;
- root_inv_queue = page_map_get_guest_pages(NULL, unit->iqa, 1,
- PAGE_DEFAULT_FLAGS);
+ root_inv_queue = paging_get_guest_pages(NULL, unit->iqa, 1,
+ PAGE_DEFAULT_FLAGS);
if (root_inv_queue)
while (mmio_read64(reg_base + VTD_IQH_REG) != iqh)
vtd_submit_iq_request(reg_base, root_inv_queue, NULL);
}
cfg_pages = PAGES(cfg_page_offs + sizeof(struct jailhouse_cell_desc));
- cfg_mapping = page_map_get_guest_pages(NULL, config_address, cfg_pages,
- PAGE_READONLY_FLAGS);
+ cfg_mapping = paging_get_guest_pages(NULL, config_address, cfg_pages,
+ PAGE_READONLY_FLAGS);
if (!cfg_mapping) {
err = -ENOMEM;
goto err_resume;
goto err_resume;
}
- if (!page_map_get_guest_pages(NULL, config_address, cfg_pages,
- PAGE_READONLY_FLAGS)) {
+ if (!paging_get_guest_pages(NULL, config_address, cfg_pages,
+ PAGE_READONLY_FLAGS)) {
err = -ENOMEM;
goto err_resume;
}
printk("Created cell \"%s\"\n", cell->config->name);
- page_map_dump_stats("after cell creation");
+ paging_dump_stats("after cell creation");
cell_resume(cpu_data);
num_cells--;
page_free(&mem_pool, cell, cell->data_pages);
- page_map_dump_stats("after cell destruction");
+ paging_dump_stats("after cell destruction");
cell_reconfig_completed();
/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2013
+ * Copyright (c) Siemens AG, 2013, 2014
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
unsigned long flags;
};
-enum page_map_coherent {
- PAGE_MAP_COHERENT,
- PAGE_MAP_NON_COHERENT,
+enum paging_coherent {
+ PAGING_COHERENT,
+ PAGING_NON_COHERENT,
};
typedef pt_entry_t page_table_t;
extern struct paging_structures hv_paging_structs;
-unsigned long page_map_get_phys_invalid(pt_entry_t pte, unsigned long virt);
+unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt);
void *page_alloc(struct page_pool *pool, unsigned int num);
void page_free(struct page_pool *pool, void *first_page, unsigned int num);
-static inline unsigned long page_map_hvirt2phys(const volatile void *hvirt)
+static inline unsigned long paging_hvirt2phys(const volatile void *hvirt)
{
return (unsigned long)hvirt - page_offset;
}
-static inline void *page_map_phys2hvirt(unsigned long phys)
+static inline void *paging_phys2hvirt(unsigned long phys)
{
return (void *)phys + page_offset;
}
-unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
- unsigned long virt, unsigned long flags);
+unsigned long paging_virt2phys(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long flags);
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
- unsigned long gphys,
- unsigned long flags);
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+ unsigned long gphys, unsigned long flags);
-int page_map_create(const struct paging_structures *pg_structs,
+int paging_create(const struct paging_structures *pg_structs,
unsigned long phys, unsigned long size, unsigned long virt,
- unsigned long flags, enum page_map_coherent coherent);
-int page_map_destroy(const struct paging_structures *pg_structs,
- unsigned long virt, unsigned long size,
- enum page_map_coherent coherent);
+ unsigned long flags, enum paging_coherent coherent);
+int paging_destroy(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ enum paging_coherent coherent);
-void *
-page_map_get_guest_pages(const struct guest_paging_structures *pg_structs,
- unsigned long gaddr, unsigned int num,
- unsigned long flags);
+void *paging_get_guest_pages(const struct guest_paging_structures *pg_structs,
+ unsigned long gaddr, unsigned int num,
+ unsigned long flags);
int paging_init(void);
void arch_paging_init(void);
-void page_map_dump_stats(const char *when);
+void paging_dump_stats(const char *when);
#endif /* !_JAILHOUSE_PAGING_H */
/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2013
+ * Copyright (c) Siemens AG, 2013, 2014
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
struct paging_structures hv_paging_structs;
-unsigned long page_map_get_phys_invalid(pt_entry_t pte, unsigned long virt)
+unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt)
{
return INVALID_PHYS_ADDR;
}
}
}
-unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
- unsigned long virt, unsigned long flags)
+unsigned long paging_virt2phys(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long flags)
{
const struct paging *paging = pg_structs->root_paging;
page_table_t pt = pg_structs->root_table;
phys = paging->get_phys(pte, virt);
if (phys != INVALID_PHYS_ADDR)
return phys;
- pt = page_map_phys2hvirt(paging->get_next_pt(pte));
+ pt = paging_phys2hvirt(paging->get_next_pt(pte));
paging++;
}
}
-static void flush_pt_entry(pt_entry_t pte, enum page_map_coherent coherent)
+static void flush_pt_entry(pt_entry_t pte, enum paging_coherent coherent)
{
- if (coherent == PAGE_MAP_COHERENT)
+ if (coherent == PAGING_COHERENT)
flush_cache(pte, sizeof(*pte));
}
static int split_hugepage(const struct paging *paging, pt_entry_t pte,
- unsigned long virt, enum page_map_coherent coherent)
+ unsigned long virt, enum paging_coherent coherent)
{
unsigned long phys = paging->get_phys(pte, virt);
struct paging_structures sub_structs;
sub_structs.root_table = page_alloc(&mem_pool, 1);
if (!sub_structs.root_table)
return -ENOMEM;
- paging->set_next_pt(pte, page_map_hvirt2phys(sub_structs.root_table));
+ paging->set_next_pt(pte, paging_hvirt2phys(sub_structs.root_table));
flush_pt_entry(pte, coherent);
- return page_map_create(&sub_structs, phys, paging->page_size, virt,
- flags, coherent);
+ return paging_create(&sub_structs, phys, paging->page_size, virt,
+ flags, coherent);
}
-int page_map_create(const struct paging_structures *pg_structs,
- unsigned long phys, unsigned long size, unsigned long virt,
- unsigned long flags, enum page_map_coherent coherent)
+int paging_create(const struct paging_structures *pg_structs,
+ unsigned long phys, unsigned long size, unsigned long virt,
+ unsigned long flags, enum paging_coherent coherent)
{
phys &= PAGE_MASK;
virt &= PAGE_MASK;
* boundaries.
*/
if (paging->page_size > PAGE_SIZE)
- page_map_destroy(pg_structs, virt,
- paging->page_size,
- coherent);
+ paging_destroy(pg_structs, virt,
+ paging->page_size,
+ coherent);
paging->set_terminal(pte, phys, flags);
flush_pt_entry(pte, coherent);
break;
coherent);
if (err)
return err;
- pt = page_map_phys2hvirt(
+ pt = paging_phys2hvirt(
paging->get_next_pt(pte));
} else {
pt = page_alloc(&mem_pool, 1);
if (!pt)
return -ENOMEM;
paging->set_next_pt(pte,
- page_map_hvirt2phys(pt));
+ paging_hvirt2phys(pt));
flush_pt_entry(pte, coherent);
}
paging++;
return 0;
}
-int page_map_destroy(const struct paging_structures *pg_structs,
- unsigned long virt, unsigned long size,
- enum page_map_coherent coherent)
+int paging_destroy(const struct paging_structures *pg_structs,
+ unsigned long virt, unsigned long size,
+ enum paging_coherent coherent)
{
size = PAGE_ALIGN(size);
} else
break;
}
- pt[++n] = page_map_phys2hvirt(
- paging->get_next_pt(pte));
+ pt[++n] = paging_phys2hvirt(paging->get_next_pt(pte));
paging++;
}
/* advance by page size of current level paging */
}
static unsigned long
-page_map_gvirt2gphys(const struct guest_paging_structures *pg_structs,
- unsigned long gvirt, unsigned long tmp_page,
- unsigned long flags)
+paging_gvirt2gphys(const struct guest_paging_structures *pg_structs,
+ unsigned long gvirt, unsigned long tmp_page,
+ unsigned long flags)
{
unsigned long page_table_gphys = pg_structs->root_table_gphys;
const struct paging *paging = pg_structs->root_paging;
while (1) {
/* map guest page table */
- phys = arch_page_map_gphys2phys(this_cpu_data(),
+ phys = arch_paging_gphys2phys(this_cpu_data(),
page_table_gphys,
PAGE_READONLY_FLAGS);
if (phys == INVALID_PHYS_ADDR)
return INVALID_PHYS_ADDR;
- err = page_map_create(&hv_paging_structs, phys,
- PAGE_SIZE, tmp_page,
- PAGE_READONLY_FLAGS,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, phys, PAGE_SIZE,
+ tmp_page, PAGE_READONLY_FLAGS,
+ PAGING_NON_COHERENT);
if (err)
return INVALID_PHYS_ADDR;
}
}
-void *
-page_map_get_guest_pages(const struct guest_paging_structures *pg_structs,
- unsigned long gaddr, unsigned int num,
- unsigned long flags)
+void *paging_get_guest_pages(const struct guest_paging_structures *pg_structs,
+ unsigned long gaddr, unsigned int num,
+ unsigned long flags)
{
unsigned long page_base = TEMPORARY_MAPPING_BASE +
this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
return NULL;
while (num-- > 0) {
if (pg_structs)
- gphys = page_map_gvirt2gphys(pg_structs, gaddr,
- page_virt, flags);
+ gphys = paging_gvirt2gphys(pg_structs, gaddr,
+ page_virt, flags);
else
gphys = gaddr;
- phys = arch_page_map_gphys2phys(this_cpu_data(), gphys, flags);
+ phys = arch_paging_gphys2phys(this_cpu_data(), gphys, flags);
if (phys == INVALID_PHYS_ADDR)
return NULL;
/* map guest page */
- err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE,
- page_virt, flags, PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, phys, PAGE_SIZE,
+ page_virt, flags, PAGING_NON_COHERENT);
if (err)
return NULL;
gaddr += PAGE_SIZE;
goto error_nomem;
/* Replicate hypervisor mapping of Linux */
- err = page_map_create(&hv_paging_structs,
- page_map_hvirt2phys(&hypervisor_header),
- system_config->hypervisor_memory.size,
- (unsigned long)&hypervisor_header,
- PAGE_DEFAULT_FLAGS, PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs,
+ paging_hvirt2phys(&hypervisor_header),
+ system_config->hypervisor_memory.size,
+ (unsigned long)&hypervisor_header,
+ PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
if (err)
goto error_nomem;
/* Make sure any remappings to the temporary regions can be performed
* without allocations of page table pages. */
- err = page_map_create(&hv_paging_structs, 0,
- remap_pool.used_pages * PAGE_SIZE,
- TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs, 0,
+ remap_pool.used_pages * PAGE_SIZE,
+ TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
+ PAGING_NON_COHERENT);
if (err)
goto error_nomem;
return -ENOMEM;
}
-void page_map_dump_stats(const char *when)
+void paging_dump_stats(const char *when)
{
printk("Page pool usage %s: mem %d/%d, remap %d/%d\n", when,
mem_pool.used_pages, mem_pool.pages,
if (!pci_space)
return -ENOMEM;
- return page_map_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
- (unsigned long)pci_space,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
- PAGE_MAP_NON_COHERENT);
+ return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
+ (unsigned long)pci_space,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGING_NON_COHERENT);
}
static int pci_msix_access_handler(const struct cell *cell, bool is_write,
goto error_remove_dev;
}
- err = page_map_create(&hv_paging_structs,
- device->info->msix_address, size,
- (unsigned long)device->msix_table,
- PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
- PAGE_MAP_NON_COHERENT);
+ err = paging_create(&hv_paging_structs,
+ device->info->msix_address, size,
+ (unsigned long)device->msix_table,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+ PAGING_NON_COHERENT);
if (err)
goto error_page_free;
return;
/* cannot fail, destruction of same size as construction */
- page_map_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
- size, PAGE_MAP_NON_COHERENT);
+ paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
+ size, PAGING_NON_COHERENT);
page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
prev_msix_device = device->cell->msix_device_list;
* pages for Linux. This allows to fault-in the hypervisor region into
* Linux' page table before shutdown without triggering violations.
*/
- hv_page.phys_start = page_map_hvirt2phys(empty_page);
- hv_page.virt_start = page_map_hvirt2phys(&hypervisor_header);
+ hv_page.phys_start = paging_hvirt2phys(empty_page);
+ hv_page.virt_start = paging_hvirt2phys(&hypervisor_header);
hv_page.size = PAGE_SIZE;
hv_page.flags = JAILHOUSE_MEM_READ;
core_and_percpu_size = (unsigned long)system_config - JAILHOUSE_BASE;
hv_page.virt_start += PAGE_SIZE;
}
- page_map_dump_stats("after early setup");
+ paging_dump_stats("after early setup");
printk("Initializing processors:\n");
}
arch_config_commit(cpu_data, &root_cell);
- page_map_dump_stats("after late setup");
+ paging_dump_stats("after late setup");
}
int entry(unsigned int cpu_id, struct per_cpu *cpu_data)