]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
core: Rename page_map_* -> paging_*
authorJan Kiszka <jan.kiszka@siemens.com>
Sat, 23 Aug 2014 12:50:16 +0000 (14:50 +0200)
committerJan Kiszka <jan.kiszka@siemens.com>
Thu, 25 Sep 2014 13:47:23 +0000 (15:47 +0200)
The module is already called "paging", so lets call its functions like
this as well. No functional changes.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
13 files changed:
hypervisor/arch/arm/setup.c
hypervisor/arch/x86/apic.c
hypervisor/arch/x86/ioapic.c
hypervisor/arch/x86/mmio.c
hypervisor/arch/x86/paging.c
hypervisor/arch/x86/setup.c
hypervisor/arch/x86/vmx.c
hypervisor/arch/x86/vtd.c
hypervisor/control.c
hypervisor/include/jailhouse/paging.h
hypervisor/paging.c
hypervisor/pci.c
hypervisor/setup.c

index e0fc7322710ef7fb54f89911ae74fcccb618bd57..9eb68ba08fbadc9f003320ff7511b892e7416d96 100644 (file)
@@ -63,9 +63,8 @@ void arch_config_commit(struct per_cpu *cpu_data,
 void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
 void arch_dbg_write(const char *msg) {}
 void arch_shutdown(void) {}
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
-                                      unsigned long gphys,
-                                      unsigned long flags)
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+                                    unsigned long gphys, unsigned long flags)
 { return INVALID_PHYS_ADDR; }
 void arch_paging_init(void) { }
 
index d64ec3b316ebf5f520a455b3509fed0d364276b1..7a4714d6a781fb2837f1c6b077f9fec108b7a013 100644 (file)
@@ -149,10 +149,10 @@ int apic_init(void)
                xapic_page = page_alloc(&remap_pool, 1);
                if (!xapic_page)
                        return -ENOMEM;
-               err = page_map_create(&hv_paging_structs, XAPIC_BASE,
-                                     PAGE_SIZE, (unsigned long)xapic_page,
-                                     PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
-                                     PAGE_MAP_NON_COHERENT);
+               err = paging_create(&hv_paging_structs, XAPIC_BASE, PAGE_SIZE,
+                                   (unsigned long)xapic_page,
+                                   PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+                                   PAGING_NON_COHERENT);
                if (err)
                        return err;
                apic_ops.read = read_xapic;
index 10a44c665694f746b1e801053654be99d4af6657..1ece8d8efc2b89ba6c7e0face36daa503f531178 100644 (file)
@@ -175,10 +175,10 @@ int ioapic_init(void)
        ioapic_page = page_alloc(&remap_pool, 1);
        if (!ioapic_page)
                return -ENOMEM;
-       err = page_map_create(&hv_paging_structs, IOAPIC_BASE_ADDR, PAGE_SIZE,
-                             (unsigned long)ioapic_page,
-                             PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
-                             PAGE_MAP_NON_COHERENT);
+       err = paging_create(&hv_paging_structs, IOAPIC_BASE_ADDR, PAGE_SIZE,
+                           (unsigned long)ioapic_page,
+                           PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+                           PAGING_NON_COHERENT);
        if (err)
                return err;
        ioapic = ioapic_page;
index 20748288cee8631986c045514f9d970b8159be46..41d55d2830926669180b7d1a4f25b692d88ef3fa 100644 (file)
@@ -40,8 +40,7 @@ static u8 *map_code_page(const struct guest_paging_structures *pg_structs,
         * and we have to map a new one now. */
        if (current_page && ((pc & ~PAGE_MASK) != 0))
                return current_page;
-       return page_map_get_guest_pages(pg_structs, pc, 1,
-                                       PAGE_READONLY_FLAGS);
+       return paging_get_guest_pages(pg_structs, pc, 1, PAGE_READONLY_FLAGS);
 }
 
 struct mmio_access mmio_parse(unsigned long pc,
index db0f916108dc979a320b9ad134489b224c539911..ee102a60ca6e98c57ed108b7793ff7521282e8c6 100644 (file)
@@ -134,7 +134,7 @@ const struct paging x86_64_paging[] = {
                X86_64_PAGING_COMMON,
                .get_entry      = x86_64_get_entry_l4,
                /* set_terminal not valid */
-               .get_phys       = page_map_get_phys_invalid,
+               .get_phys       = paging_get_phys_invalid,
                .get_next_pt    = x86_64_get_next_pt_l4,
        },
        {
index 7a6af01b486224fb3acee1d4d78d39c42b54950a..bacf65360ad91a8809b6055e73b382be6b8e404b 100644 (file)
@@ -188,7 +188,7 @@ int arch_cpu_init(struct per_cpu *cpu_data)
 
        /* swap CR3 */
        cpu_data->linux_cr3 = read_cr3();
-       write_cr3(page_map_hvirt2phys(hv_paging_structs.root_table));
+       write_cr3(paging_hvirt2phys(hv_paging_structs.root_table));
 
        cpu_data->linux_efer = read_msr(MSR_EFER);
 
index b4d6eb0a5e87106efbff62c3691e8a5649ca3268..99ee9dde390782c06c439dd869a4dafb3ca2f7d5 100644 (file)
@@ -73,7 +73,7 @@ static bool vmxon(struct per_cpu *cpu_data)
        unsigned long vmxon_addr;
        u8 ok;
 
-       vmxon_addr = page_map_hvirt2phys(&cpu_data->vmxon_region);
+       vmxon_addr = paging_hvirt2phys(&cpu_data->vmxon_region);
        asm volatile(
                "vmxon (%1)\n\t"
                "seta %0"
@@ -85,7 +85,7 @@ static bool vmxon(struct per_cpu *cpu_data)
 
 static bool vmcs_clear(struct per_cpu *cpu_data)
 {
-       unsigned long vmcs_addr = page_map_hvirt2phys(&cpu_data->vmcs);
+       unsigned long vmcs_addr = paging_hvirt2phys(&cpu_data->vmcs);
        u8 ok;
 
        asm volatile(
@@ -99,7 +99,7 @@ static bool vmcs_clear(struct per_cpu *cpu_data)
 
 static bool vmcs_load(struct per_cpu *cpu_data)
 {
-       unsigned long vmcs_addr = page_map_hvirt2phys(&cpu_data->vmcs);
+       unsigned long vmcs_addr = paging_hvirt2phys(&cpu_data->vmcs);
        u8 ok;
 
        asm volatile(
@@ -253,12 +253,11 @@ int vmx_init(void)
        return vmx_cell_init(&root_cell);
 }
 
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
-                                      unsigned long gphys,
-                                      unsigned long flags)
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+                                    unsigned long gphys, unsigned long flags)
 {
-       return page_map_virt2phys(&cpu_data->cell->vmx.ept_structs, gphys,
-                                 flags);
+       return paging_virt2phys(&cpu_data->cell->vmx.ept_structs, gphys,
+                               flags);
 }
 
 int vmx_cell_init(struct cell *cell)
@@ -280,11 +279,11 @@ int vmx_cell_init(struct cell *cell)
        if (!cell->vmx.ept_structs.root_table)
                return -ENOMEM;
 
-       err = page_map_create(&cell->vmx.ept_structs,
-                             page_map_hvirt2phys(apic_access_page),
-                             PAGE_SIZE, XAPIC_BASE,
-                             EPT_FLAG_READ|EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
-                             PAGE_MAP_NON_COHERENT);
+       err = paging_create(&cell->vmx.ept_structs,
+                           paging_hvirt2phys(apic_access_page),
+                           PAGE_SIZE, XAPIC_BASE,
+                           EPT_FLAG_READ | EPT_FLAG_WRITE|EPT_FLAG_WB_TYPE,
+                           PAGING_NON_COHERENT);
        if (err) {
                vmx_cell_exit(cell);
                return err;
@@ -338,17 +337,17 @@ int vmx_map_memory_region(struct cell *cell,
        if (mem->flags & JAILHOUSE_MEM_EXECUTE)
                flags |= EPT_FLAG_EXECUTE;
        if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
-               phys_start = page_map_hvirt2phys(&cell->comm_page);
+               phys_start = paging_hvirt2phys(&cell->comm_page);
 
-       return page_map_create(&cell->vmx.ept_structs, phys_start, mem->size,
-                              mem->virt_start, flags, PAGE_MAP_NON_COHERENT);
+       return paging_create(&cell->vmx.ept_structs, phys_start, mem->size,
+                            mem->virt_start, flags, PAGING_NON_COHERENT);
 }
 
 int vmx_unmap_memory_region(struct cell *cell,
                            const struct jailhouse_memory *mem)
 {
-       return page_map_destroy(&cell->vmx.ept_structs, mem->virt_start,
-                               mem->size, PAGE_MAP_NON_COHERENT);
+       return paging_destroy(&cell->vmx.ept_structs, mem->virt_start,
+                             mem->size, PAGING_NON_COHERENT);
 }
 
 void vmx_cell_exit(struct cell *cell)
@@ -359,8 +358,8 @@ void vmx_cell_exit(struct cell *cell)
        u32 pio_bitmap_size = cell->config->pio_bitmap_size;
        u8 *b;
 
-       page_map_destroy(&cell->vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
-                        PAGE_MAP_NON_COHERENT);
+       paging_destroy(&cell->vmx.ept_structs, XAPIC_BASE, PAGE_SIZE,
+                      PAGING_NON_COHERENT);
 
        if (root_cell.config->pio_bitmap_size < pio_bitmap_size)
                pio_bitmap_size = root_cell.config->pio_bitmap_size;
@@ -437,12 +436,12 @@ static bool vmx_set_cell_config(struct cell *cell)
        bool ok = true;
 
        io_bitmap = cell->vmx.io_bitmap;
-       ok &= vmcs_write64(IO_BITMAP_A, page_map_hvirt2phys(io_bitmap));
+       ok &= vmcs_write64(IO_BITMAP_A, paging_hvirt2phys(io_bitmap));
        ok &= vmcs_write64(IO_BITMAP_B,
-                          page_map_hvirt2phys(io_bitmap + PAGE_SIZE));
+                          paging_hvirt2phys(io_bitmap + PAGE_SIZE));
 
        ok &= vmcs_write64(EPT_POINTER,
-                       page_map_hvirt2phys(cell->vmx.ept_structs.root_table) |
+                       paging_hvirt2phys(cell->vmx.ept_structs.root_table) |
                        EPT_TYPE_WRITEBACK | EPT_PAGE_WALK_LEN);
 
        return ok;
@@ -555,7 +554,7 @@ static bool vmcs_setup(struct per_cpu *cpu_data)
        val &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
        ok &= vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, val);
 
-       ok &= vmcs_write64(MSR_BITMAP, page_map_hvirt2phys(msr_bitmap));
+       ok &= vmcs_write64(MSR_BITMAP, paging_hvirt2phys(msr_bitmap));
 
        val = read_msr(MSR_IA32_VMX_PROCBASED_CTLS2);
        val |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
@@ -564,7 +563,7 @@ static bool vmcs_setup(struct per_cpu *cpu_data)
        ok &= vmcs_write32(SECONDARY_VM_EXEC_CONTROL, val);
 
        ok &= vmcs_write64(APIC_ACCESS_ADDR,
-                          page_map_hvirt2phys(apic_access_page));
+                          paging_hvirt2phys(apic_access_page));
 
        ok &= vmx_set_cell_config(cpu_data->cell);
 
index f741b23b217959adbc60e6b5452f201f7b417070..002b7c52f8b08e1a4d5ef88864989ed90959dd14 100644 (file)
@@ -84,7 +84,7 @@ static void vtd_submit_iq_request(void *reg_base, void *inv_queue,
        struct vtd_entry inv_wait = {
                .lo_word = VTD_REQ_INV_WAIT | VTD_INV_WAIT_SW |
                        VTD_INV_WAIT_FN | (1UL << VTD_INV_WAIT_SDATA_SHIFT),
-               .hi_word = page_map_hvirt2phys(&completed),
+               .hi_word = paging_hvirt2phys(&completed),
        };
        unsigned int index;
 
@@ -277,8 +277,8 @@ static int vtd_emulate_qi_request(unsigned int unit_no,
                    !(inv_desc.lo_word & VTD_INV_WAIT_SW))
                        return -EINVAL;
 
-               status_page = page_map_get_guest_pages(NULL, inv_desc.hi_word,
-                                                      1, PAGE_DEFAULT_FLAGS);
+               status_page = paging_get_guest_pages(NULL, inv_desc.hi_word, 1,
+                                                    PAGE_DEFAULT_FLAGS);
                if (!status_page)
                        return -EINVAL;
 
@@ -308,8 +308,8 @@ static int vtd_unit_access_handler(unsigned int unit_no, bool is_write,
        if (reg == VTD_IQT_REG && is_write) {
                while (unit->iqh != (*value & ~PAGE_MASK)) {
                        inv_desc_page =
-                               page_map_get_guest_pages(NULL, unit->iqa, 1,
-                                                        PAGE_READONLY_FLAGS);
+                               paging_get_guest_pages(NULL, unit->iqa, 1,
+                                                      PAGE_READONLY_FLAGS);
                        if (!inv_desc_page)
                                goto invalid_iq_entry;
 
@@ -374,19 +374,19 @@ static void vtd_init_unit(void *reg_base, void *inv_queue)
 
        /* Set root entry table pointer */
        mmio_write64(reg_base + VTD_RTADDR_REG,
-                    page_map_hvirt2phys(root_entry_table));
+                    paging_hvirt2phys(root_entry_table));
        vtd_update_gcmd_reg(reg_base, VTD_GCMD_SRTP, 1);
 
        /* Set interrupt remapping table pointer */
        mmio_write64(reg_base + VTD_IRTA_REG,
-                    page_map_hvirt2phys(int_remap_table) |
+                    paging_hvirt2phys(int_remap_table) |
                     (using_x2apic ? VTD_IRTA_EIME : 0) |
                     (int_remap_table_size_log2 - 1));
        vtd_update_gcmd_reg(reg_base, VTD_GCMD_SIRTP, 1);
 
        /* Setup and activate invalidation queue */
        mmio_write64(reg_base + VTD_IQT_REG, 0);
-       mmio_write64(reg_base + VTD_IQA_REG, page_map_hvirt2phys(inv_queue));
+       mmio_write64(reg_base + VTD_IQA_REG, paging_hvirt2phys(inv_queue));
        vtd_update_gcmd_reg(reg_base, VTD_GCMD_QIE, 1);
 
        vtd_submit_iq_request(reg_base, inv_queue, &inv_global_context);
@@ -467,10 +467,10 @@ int vtd_init(void)
 
                reg_base = dmar_reg_base + n * PAGE_SIZE;
 
-               err = page_map_create(&hv_paging_structs, base_addr, PAGE_SIZE,
-                                     (unsigned long)reg_base,
-                                     PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
-                                     PAGE_MAP_NON_COHERENT);
+               err = paging_create(&hv_paging_structs, base_addr, PAGE_SIZE,
+                                   (unsigned long)reg_base,
+                                   PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+                                   PAGING_NON_COHERENT);
                if (err)
                        return err;
 
@@ -650,19 +650,19 @@ int vtd_add_pci_device(struct cell *cell, struct pci_device *device)
 
        if (*root_entry_lo & VTD_ROOT_PRESENT) {
                context_entry_table =
-                       page_map_phys2hvirt(*root_entry_lo & PAGE_MASK);
+                       paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
        } else {
                context_entry_table = page_alloc(&mem_pool, 1);
                if (!context_entry_table)
                        goto error_nomem;
                *root_entry_lo = VTD_ROOT_PRESENT |
-                       page_map_hvirt2phys(context_entry_table);
+                       paging_hvirt2phys(context_entry_table);
                flush_cache(root_entry_lo, sizeof(u64));
        }
 
        context_entry = &context_entry_table[PCI_DEVFN(bdf)];
        context_entry->lo_word = VTD_CTX_PRESENT | VTD_CTX_TTYPE_MLP_UNTRANS |
-               page_map_hvirt2phys(cell->vtd.pg_structs.root_table);
+               paging_hvirt2phys(cell->vtd.pg_structs.root_table);
        context_entry->hi_word =
                (dmar_pt_levels == 3 ? VTD_CTX_AGAW_39 : VTD_CTX_AGAW_48) |
                (cell->id << VTD_CTX_DID_SHIFT);
@@ -687,7 +687,7 @@ void vtd_remove_pci_device(struct pci_device *device)
        if (dmar_units == 0)
                return;
 
-       context_entry_table = page_map_phys2hvirt(*root_entry_lo & PAGE_MASK);
+       context_entry_table = paging_phys2hvirt(*root_entry_lo & PAGE_MASK);
        context_entry = &context_entry_table[PCI_DEVFN(bdf)];
 
        context_entry->lo_word &= ~VTD_CTX_PRESENT;
@@ -756,9 +756,9 @@ int vtd_map_memory_region(struct cell *cell,
        if (mem->flags & JAILHOUSE_MEM_WRITE)
                flags |= VTD_PAGE_WRITE;
 
-       return page_map_create(&cell->vtd.pg_structs, mem->phys_start,
-                              mem->size, mem->virt_start, flags,
-                              PAGE_MAP_COHERENT);
+       return paging_create(&cell->vtd.pg_structs, mem->phys_start,
+                            mem->size, mem->virt_start, flags,
+                            PAGING_COHERENT);
 }
 
 int vtd_unmap_memory_region(struct cell *cell,
@@ -771,8 +771,8 @@ int vtd_unmap_memory_region(struct cell *cell,
        if (!(mem->flags & JAILHOUSE_MEM_DMA))
                return 0;
 
-       return page_map_destroy(&cell->vtd.pg_structs, mem->virt_start,
-                               mem->size, PAGE_MAP_COHERENT);
+       return paging_destroy(&cell->vtd.pg_structs, mem->virt_start,
+                             mem->size, PAGING_COHERENT);
 }
 
 struct apic_irq_message
@@ -800,8 +800,8 @@ vtd_get_remapped_root_int(unsigned int iommu, u16 device_id,
 
        irte_addr = (unit->irta & VTD_IRTA_ADDR_MASK) +
                remap_index * sizeof(union vtd_irte);
-       irte_page = page_map_get_guest_pages(NULL, irte_addr, 1,
-                                            PAGE_READONLY_FLAGS);
+       irte_page = paging_get_guest_pages(NULL, irte_addr, 1,
+                                          PAGE_READONLY_FLAGS);
        if (!irte_page)
                return irq_msg;
 
@@ -959,8 +959,8 @@ static void vtd_restore_ir(unsigned int unit_no, void *reg_base)
         * until the hardware is in sync with the Linux state again.
         */
        iqh =unit->iqh;
-       root_inv_queue = page_map_get_guest_pages(NULL, unit->iqa, 1,
-                                                 PAGE_DEFAULT_FLAGS);
+       root_inv_queue = paging_get_guest_pages(NULL, unit->iqa, 1,
+                                               PAGE_DEFAULT_FLAGS);
        if (root_inv_queue)
                while (mmio_read64(reg_base + VTD_IQH_REG) != iqh)
                        vtd_submit_iq_request(reg_base, root_inv_queue, NULL);
index 7bc2f5846ef912f4cc592f8e3afcfa3a481376cc..916b7005c73275137def276dd67a9624fc349fe1 100644 (file)
@@ -310,8 +310,8 @@ static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
        }
 
        cfg_pages = PAGES(cfg_page_offs + sizeof(struct jailhouse_cell_desc));
-       cfg_mapping = page_map_get_guest_pages(NULL, config_address, cfg_pages,
-                                              PAGE_READONLY_FLAGS);
+       cfg_mapping = paging_get_guest_pages(NULL, config_address, cfg_pages,
+                                            PAGE_READONLY_FLAGS);
        if (!cfg_mapping) {
                err = -ENOMEM;
                goto err_resume;
@@ -332,8 +332,8 @@ static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
                goto err_resume;
        }
 
-       if (!page_map_get_guest_pages(NULL, config_address, cfg_pages,
-                                     PAGE_READONLY_FLAGS)) {
+       if (!paging_get_guest_pages(NULL, config_address, cfg_pages,
+                                   PAGE_READONLY_FLAGS)) {
                err = -ENOMEM;
                goto err_resume;
        }
@@ -417,7 +417,7 @@ static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
 
        printk("Created cell \"%s\"\n", cell->config->name);
 
-       page_map_dump_stats("after cell creation");
+       paging_dump_stats("after cell creation");
 
        cell_resume(cpu_data);
 
@@ -582,7 +582,7 @@ static int cell_destroy(struct per_cpu *cpu_data, unsigned long id)
        num_cells--;
 
        page_free(&mem_pool, cell, cell->data_pages);
-       page_map_dump_stats("after cell destruction");
+       paging_dump_stats("after cell destruction");
 
        cell_reconfig_completed();
 
index b7341ad19123bd698575f0512e6448778fa14820..fe95405c5167c63b1dcf940176dd744ff9dd0082 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Jailhouse, a Linux-based partitioning hypervisor
  *
- * Copyright (c) Siemens AG, 2013
+ * Copyright (c) Siemens AG, 2013, 2014
  *
  * Authors:
  *  Jan Kiszka <jan.kiszka@siemens.com>
@@ -30,9 +30,9 @@ struct page_pool {
        unsigned long flags;
 };
 
-enum page_map_coherent {
-       PAGE_MAP_COHERENT,
-       PAGE_MAP_NON_COHERENT,
+enum paging_coherent {
+       PAGING_COHERENT,
+       PAGING_NON_COHERENT,
 };
 
 typedef pt_entry_t page_table_t;
@@ -91,42 +91,40 @@ extern struct page_pool remap_pool;
 
 extern struct paging_structures hv_paging_structs;
 
-unsigned long page_map_get_phys_invalid(pt_entry_t pte, unsigned long virt);
+unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt);
 
 void *page_alloc(struct page_pool *pool, unsigned int num);
 void page_free(struct page_pool *pool, void *first_page, unsigned int num);
 
-static inline unsigned long page_map_hvirt2phys(const volatile void *hvirt)
+static inline unsigned long paging_hvirt2phys(const volatile void *hvirt)
 {
        return (unsigned long)hvirt - page_offset;
 }
 
-static inline void *page_map_phys2hvirt(unsigned long phys)
+static inline void *paging_phys2hvirt(unsigned long phys)
 {
        return (void *)phys + page_offset;
 }
 
-unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
-                                unsigned long virt, unsigned long flags);
+unsigned long paging_virt2phys(const struct paging_structures *pg_structs,
+                              unsigned long virt, unsigned long flags);
 
-unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
-                                      unsigned long gphys,
-                                      unsigned long flags);
+unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
+                                    unsigned long gphys, unsigned long flags);
 
-int page_map_create(const struct paging_structures *pg_structs,
+int paging_create(const struct paging_structures *pg_structs,
                    unsigned long phys, unsigned long size, unsigned long virt,
-                   unsigned long flags, enum page_map_coherent coherent);
-int page_map_destroy(const struct paging_structures *pg_structs,
-                    unsigned long virt, unsigned long size,
-                    enum page_map_coherent coherent);
+                   unsigned long flags, enum paging_coherent coherent);
+int paging_destroy(const struct paging_structures *pg_structs,
+                  unsigned long virt, unsigned long size,
+                  enum paging_coherent coherent);
 
-void *
-page_map_get_guest_pages(const struct guest_paging_structures *pg_structs,
-                        unsigned long gaddr, unsigned int num,
-                        unsigned long flags);
+void *paging_get_guest_pages(const struct guest_paging_structures *pg_structs,
+                            unsigned long gaddr, unsigned int num,
+                            unsigned long flags);
 
 int paging_init(void);
 void arch_paging_init(void);
-void page_map_dump_stats(const char *when);
+void paging_dump_stats(const char *when);
 
 #endif /* !_JAILHOUSE_PAGING_H */
index fc02cb194f865d0ea4133966ddcb50a52b3e5c49..6f1d63b74df811d686b3505fefa3b2e98f974d1f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Jailhouse, a Linux-based partitioning hypervisor
  *
- * Copyright (c) Siemens AG, 2013
+ * Copyright (c) Siemens AG, 2013, 2014
  *
  * Authors:
  *  Jan Kiszka <jan.kiszka@siemens.com>
@@ -34,7 +34,7 @@ struct page_pool remap_pool = {
 
 struct paging_structures hv_paging_structs;
 
-unsigned long page_map_get_phys_invalid(pt_entry_t pte, unsigned long virt)
+unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt)
 {
        return INVALID_PHYS_ADDR;
 }
@@ -116,8 +116,8 @@ void page_free(struct page_pool *pool, void *page, unsigned int num)
        }
 }
 
-unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
-                                unsigned long virt, unsigned long flags)
+unsigned long paging_virt2phys(const struct paging_structures *pg_structs,
+                              unsigned long virt, unsigned long flags)
 {
        const struct paging *paging = pg_structs->root_paging;
        page_table_t pt = pg_structs->root_table;
@@ -131,19 +131,19 @@ unsigned long page_map_virt2phys(const struct paging_structures *pg_structs,
                phys = paging->get_phys(pte, virt);
                if (phys != INVALID_PHYS_ADDR)
                        return phys;
-               pt = page_map_phys2hvirt(paging->get_next_pt(pte));
+               pt = paging_phys2hvirt(paging->get_next_pt(pte));
                paging++;
        }
 }
 
-static void flush_pt_entry(pt_entry_t pte, enum page_map_coherent coherent)
+static void flush_pt_entry(pt_entry_t pte, enum paging_coherent coherent)
 {
-       if (coherent == PAGE_MAP_COHERENT)
+       if (coherent == PAGING_COHERENT)
                flush_cache(pte, sizeof(*pte));
 }
 
 static int split_hugepage(const struct paging *paging, pt_entry_t pte,
-                         unsigned long virt, enum page_map_coherent coherent)
+                         unsigned long virt, enum paging_coherent coherent)
 {
        unsigned long phys = paging->get_phys(pte, virt);
        struct paging_structures sub_structs;
@@ -162,16 +162,16 @@ static int split_hugepage(const struct paging *paging, pt_entry_t pte,
        sub_structs.root_table = page_alloc(&mem_pool, 1);
        if (!sub_structs.root_table)
                return -ENOMEM;
-       paging->set_next_pt(pte, page_map_hvirt2phys(sub_structs.root_table));
+       paging->set_next_pt(pte, paging_hvirt2phys(sub_structs.root_table));
        flush_pt_entry(pte, coherent);
 
-       return page_map_create(&sub_structs, phys, paging->page_size, virt,
-                              flags, coherent);
+       return paging_create(&sub_structs, phys, paging->page_size, virt,
+                            flags, coherent);
 }
 
-int page_map_create(const struct paging_structures *pg_structs,
-                   unsigned long phys, unsigned long size, unsigned long virt,
-                   unsigned long flags, enum page_map_coherent coherent)
+int paging_create(const struct paging_structures *pg_structs,
+                 unsigned long phys, unsigned long size, unsigned long virt,
+                 unsigned long flags, enum paging_coherent coherent)
 {
        phys &= PAGE_MASK;
        virt &= PAGE_MASK;
@@ -195,9 +195,9 @@ int page_map_create(const struct paging_structures *pg_structs,
                                 * boundaries.
                                 */
                                if (paging->page_size > PAGE_SIZE)
-                                       page_map_destroy(pg_structs, virt,
-                                                        paging->page_size,
-                                                        coherent);
+                                       paging_destroy(pg_structs, virt,
+                                                      paging->page_size,
+                                                      coherent);
                                paging->set_terminal(pte, phys, flags);
                                flush_pt_entry(pte, coherent);
                                break;
@@ -207,14 +207,14 @@ int page_map_create(const struct paging_structures *pg_structs,
                                                     coherent);
                                if (err)
                                        return err;
-                               pt = page_map_phys2hvirt(
+                               pt = paging_phys2hvirt(
                                                paging->get_next_pt(pte));
                        } else {
                                pt = page_alloc(&mem_pool, 1);
                                if (!pt)
                                        return -ENOMEM;
                                paging->set_next_pt(pte,
-                                                   page_map_hvirt2phys(pt));
+                                                   paging_hvirt2phys(pt));
                                flush_pt_entry(pte, coherent);
                        }
                        paging++;
@@ -229,9 +229,9 @@ int page_map_create(const struct paging_structures *pg_structs,
        return 0;
 }
 
-int page_map_destroy(const struct paging_structures *pg_structs,
-                    unsigned long virt, unsigned long size,
-                    enum page_map_coherent coherent)
+int paging_destroy(const struct paging_structures *pg_structs,
+                  unsigned long virt, unsigned long size,
+                  enum paging_coherent coherent)
 {
        size = PAGE_ALIGN(size);
 
@@ -258,8 +258,7 @@ int page_map_destroy(const struct paging_structures *pg_structs,
                                } else
                                        break;
                        }
-                       pt[++n] = page_map_phys2hvirt(
-                                       paging->get_next_pt(pte));
+                       pt[++n] = paging_phys2hvirt(paging->get_next_pt(pte));
                        paging++;
                }
                /* advance by page size of current level paging */
@@ -287,9 +286,9 @@ int page_map_destroy(const struct paging_structures *pg_structs,
 }
 
 static unsigned long
-page_map_gvirt2gphys(const struct guest_paging_structures *pg_structs,
-                    unsigned long gvirt, unsigned long tmp_page,
-                    unsigned long flags)
+paging_gvirt2gphys(const struct guest_paging_structures *pg_structs,
+                  unsigned long gvirt, unsigned long tmp_page,
+                  unsigned long flags)
 {
        unsigned long page_table_gphys = pg_structs->root_table_gphys;
        const struct paging *paging = pg_structs->root_paging;
@@ -299,15 +298,14 @@ page_map_gvirt2gphys(const struct guest_paging_structures *pg_structs,
 
        while (1) {
                /* map guest page table */
-               phys = arch_page_map_gphys2phys(this_cpu_data(),
+               phys = arch_paging_gphys2phys(this_cpu_data(),
                                                page_table_gphys,
                                                PAGE_READONLY_FLAGS);
                if (phys == INVALID_PHYS_ADDR)
                        return INVALID_PHYS_ADDR;
-               err = page_map_create(&hv_paging_structs, phys,
-                                     PAGE_SIZE, tmp_page,
-                                     PAGE_READONLY_FLAGS,
-                                     PAGE_MAP_NON_COHERENT);
+               err = paging_create(&hv_paging_structs, phys, PAGE_SIZE,
+                                   tmp_page, PAGE_READONLY_FLAGS,
+                                   PAGING_NON_COHERENT);
                if (err)
                        return INVALID_PHYS_ADDR;
 
@@ -323,10 +321,9 @@ page_map_gvirt2gphys(const struct guest_paging_structures *pg_structs,
        }
 }
 
-void *
-page_map_get_guest_pages(const struct guest_paging_structures *pg_structs,
-                        unsigned long gaddr, unsigned int num,
-                        unsigned long flags)
+void *paging_get_guest_pages(const struct guest_paging_structures *pg_structs,
+                            unsigned long gaddr, unsigned int num,
+                            unsigned long flags)
 {
        unsigned long page_base = TEMPORARY_MAPPING_BASE +
                this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES;
@@ -337,17 +334,17 @@ page_map_get_guest_pages(const struct guest_paging_structures *pg_structs,
                return NULL;
        while (num-- > 0) {
                if (pg_structs)
-                       gphys = page_map_gvirt2gphys(pg_structs, gaddr,
-                                                    page_virt, flags);
+                       gphys = paging_gvirt2gphys(pg_structs, gaddr,
+                                                  page_virt, flags);
                else
                        gphys = gaddr;
 
-               phys = arch_page_map_gphys2phys(this_cpu_data(), gphys, flags);
+               phys = arch_paging_gphys2phys(this_cpu_data(), gphys, flags);
                if (phys == INVALID_PHYS_ADDR)
                        return NULL;
                /* map guest page */
-               err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE,
-                                     page_virt, flags, PAGE_MAP_NON_COHERENT);
+               err = paging_create(&hv_paging_structs, phys, PAGE_SIZE,
+                                   page_virt, flags, PAGING_NON_COHERENT);
                if (err)
                        return NULL;
                gaddr += PAGE_SIZE;
@@ -401,20 +398,20 @@ int paging_init(void)
                goto error_nomem;
 
        /* Replicate hypervisor mapping of Linux */
-       err = page_map_create(&hv_paging_structs,
-                             page_map_hvirt2phys(&hypervisor_header),
-                             system_config->hypervisor_memory.size,
-                             (unsigned long)&hypervisor_header,
-                             PAGE_DEFAULT_FLAGS, PAGE_MAP_NON_COHERENT);
+       err = paging_create(&hv_paging_structs,
+                            paging_hvirt2phys(&hypervisor_header),
+                            system_config->hypervisor_memory.size,
+                            (unsigned long)&hypervisor_header,
+                            PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
        if (err)
                goto error_nomem;
 
        /* Make sure any remappings to the temporary regions can be performed
         * without allocations of page table pages. */
-       err = page_map_create(&hv_paging_structs, 0,
-                             remap_pool.used_pages * PAGE_SIZE,
-                             TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
-                             PAGE_MAP_NON_COHERENT);
+       err = paging_create(&hv_paging_structs, 0,
+                           remap_pool.used_pages * PAGE_SIZE,
+                           TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
+                           PAGING_NON_COHERENT);
        if (err)
                goto error_nomem;
 
@@ -425,7 +422,7 @@ error_nomem:
        return -ENOMEM;
 }
 
-void page_map_dump_stats(const char *when)
+void paging_dump_stats(const char *when)
 {
        printk("Page pool usage %s: mem %d/%d, remap %d/%d\n", when,
               mem_pool.used_pages, mem_pool.pages,
index 6530ba638eb58f2cc6bb7395b84bb8af34389b0c..86b0ad9a1de677135e47f8b98fe48a66b3c67d47 100644 (file)
@@ -317,10 +317,10 @@ int pci_init(void)
        if (!pci_space)
                return -ENOMEM;
 
-       return page_map_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
-                              (unsigned long)pci_space,
-                              PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
-                              PAGE_MAP_NON_COHERENT);
+       return paging_create(&hv_paging_structs, mmcfg_start, mmcfg_size,
+                            (unsigned long)pci_space,
+                            PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+                            PAGING_NON_COHERENT);
 }
 
 static int pci_msix_access_handler(const struct cell *cell, bool is_write,
@@ -536,11 +536,11 @@ static int pci_add_device(struct cell *cell, struct pci_device *device)
                        goto error_remove_dev;
                }
 
-               err = page_map_create(&hv_paging_structs,
-                                     device->info->msix_address, size,
-                                     (unsigned long)device->msix_table,
-                                     PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
-                                     PAGE_MAP_NON_COHERENT);
+               err = paging_create(&hv_paging_structs,
+                                   device->info->msix_address, size,
+                                   (unsigned long)device->msix_table,
+                                   PAGE_DEFAULT_FLAGS | PAGE_FLAG_UNCACHED,
+                                   PAGING_NON_COHERENT);
                if (err)
                        goto error_page_free;
 
@@ -571,8 +571,8 @@ static void pci_remove_device(struct pci_device *device)
                return;
 
        /* cannot fail, destruction of same size as construction */
-       page_map_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
-                        size, PAGE_MAP_NON_COHERENT);
+       paging_destroy(&hv_paging_structs, (unsigned long)device->msix_table,
+                      size, PAGING_NON_COHERENT);
        page_free(&remap_pool, device->msix_table, size / PAGE_SIZE);
 
        prev_msix_device = device->cell->msix_device_list;
index c6a9932fdf768c9735876058fce49065d0b55e88..f4389cf9a1c69a673077163eb3a6d51c9694cc24 100644 (file)
@@ -67,8 +67,8 @@ static void init_early(unsigned int cpu_id)
         * pages for Linux. This allows to fault-in the hypervisor region into
         * Linux' page table before shutdown without triggering violations.
         */
-       hv_page.phys_start = page_map_hvirt2phys(empty_page);
-       hv_page.virt_start = page_map_hvirt2phys(&hypervisor_header);
+       hv_page.phys_start = paging_hvirt2phys(empty_page);
+       hv_page.virt_start = paging_hvirt2phys(&hypervisor_header);
        hv_page.size = PAGE_SIZE;
        hv_page.flags = JAILHOUSE_MEM_READ;
        core_and_percpu_size = (unsigned long)system_config - JAILHOUSE_BASE;
@@ -80,7 +80,7 @@ static void init_early(unsigned int cpu_id)
                hv_page.virt_start += PAGE_SIZE;
        }
 
-       page_map_dump_stats("after early setup");
+       paging_dump_stats("after early setup");
        printk("Initializing processors:\n");
 }
 
@@ -149,7 +149,7 @@ static void init_late(struct per_cpu *cpu_data)
 
        arch_config_commit(cpu_data, &root_cell);
 
-       page_map_dump_stats("after late setup");
+       paging_dump_stats("after late setup");
 }
 
 int entry(unsigned int cpu_id, struct per_cpu *cpu_data)