]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
core: Enhance page_map_get_guest_page(s)
authorJan Kiszka <jan.kiszka@siemens.com>
Thu, 21 Aug 2014 07:16:48 +0000 (09:16 +0200)
committerJan Kiszka <jan.kiszka@siemens.com>
Thu, 28 Aug 2014 06:36:09 +0000 (08:36 +0200)
Generalize page_map_get_guest_page to map multiple pages in a single
run. Moreover, accept both guest-physical and guest-virtual addresses as
input: if pg_structs is NULL, a physical address is provided.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/x86/mmio.c
hypervisor/include/jailhouse/paging.h
hypervisor/paging.c

index 363bad7e4e01342c1822e5ad07cfa7572dc1f124..00c2de05f95ada1e3c32fa5dba38c286451c1977 100644 (file)
@@ -41,8 +41,8 @@ static u8 *map_code_page(struct per_cpu *cpu_data,
         * and we have to map a new one now. */
        if (current_page && ((pc & ~PAGE_MASK) != 0))
                return current_page;
-       return page_map_get_guest_page(cpu_data, pg_structs, pc,
-                                      PAGE_READONLY_FLAGS);
+       return page_map_get_guest_pages(cpu_data, pg_structs, pc, 1,
+                                       PAGE_READONLY_FLAGS);
 }
 
 struct mmio_access mmio_parse(struct per_cpu *cpu_data, unsigned long pc,
index 51472b2622152c16dc699bd8bee32c0de57c3bdc..11e0cfecface60b81e7dcfac281274981052550c 100644 (file)
@@ -120,9 +120,11 @@ int page_map_destroy(const struct paging_structures *pg_structs,
                     unsigned long virt, unsigned long size,
                     enum page_map_coherent coherent);
 
-void *page_map_get_guest_page(struct per_cpu *cpu_data,
-                             const struct guest_paging_structures *pg_structs,
-                             unsigned long virt, unsigned long flags);
+void *
+page_map_get_guest_pages(struct per_cpu *cpu_data,
+                        const struct guest_paging_structures *pg_structs,
+                        unsigned long gaddr, unsigned int num,
+                        unsigned long flags);
 
 int paging_init(void);
 void arch_paging_init(void);
index c5fdbd3fd603976ffea229f118d75be09c24c3d3..8c8ab5579d52fc4c54433e11da5850f41263f621 100644 (file)
@@ -286,52 +286,73 @@ int page_map_destroy(const struct paging_structures *pg_structs,
        return 0;
 }
 
-void *page_map_get_guest_page(struct per_cpu *cpu_data,
-                             const struct guest_paging_structures *pg_structs,
-                             unsigned long virt, unsigned long flags)
+static unsigned long
+page_map_gvirt2gphys(struct per_cpu *cpu_data,
+                    const struct guest_paging_structures *pg_structs,
+                    unsigned long gvirt, unsigned long tmp_page)
 {
        unsigned long page_table_gphys = pg_structs->root_table_gphys;
        const struct paging *paging = pg_structs->root_paging;
-       unsigned long page_virt, phys, gphys;
+       unsigned long gphys, phys;
        pt_entry_t pte;
        int err;
 
-       page_virt = TEMPORARY_MAPPING_BASE +
-               cpu_data->cpu_id * PAGE_SIZE * NUM_TEMPORARY_PAGES;
-
        while (1) {
                /* map guest page table */
                phys = arch_page_map_gphys2phys(cpu_data, page_table_gphys);
                if (phys == INVALID_PHYS_ADDR)
-                       return NULL;
+                       return INVALID_PHYS_ADDR;
                err = page_map_create(&hv_paging_structs, phys,
-                                     PAGE_SIZE, page_virt,
+                                     PAGE_SIZE, tmp_page,
                                      PAGE_READONLY_FLAGS,
                                      PAGE_MAP_NON_COHERENT);
                if (err)
-                       return NULL;
+                       return INVALID_PHYS_ADDR;
 
                /* evaluate page table entry */
-               pte = paging->get_entry((page_table_t)page_virt, virt);
+               pte = paging->get_entry((page_table_t)tmp_page, gvirt);
                if (!paging->entry_valid(pte))
-                       return NULL;
-               gphys = paging->get_phys(pte, virt);
+                       return INVALID_PHYS_ADDR;
+               gphys = paging->get_phys(pte, gvirt);
                if (gphys != INVALID_PHYS_ADDR)
-                       break;
+                       return gphys;
                page_table_gphys = paging->get_next_pt(pte);
                paging++;
        }
+}
 
-       phys = arch_page_map_gphys2phys(cpu_data, gphys);
-       if (phys == INVALID_PHYS_ADDR)
-               return NULL;
-       /* map guest page */
-       err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE, page_virt,
-                             flags, PAGE_MAP_NON_COHERENT);
-       if (err)
+void *
+page_map_get_guest_pages(struct per_cpu *cpu_data,
+                        const struct guest_paging_structures *pg_structs,
+                        unsigned long gaddr, unsigned int num,
+                        unsigned long flags)
+{
+       unsigned long page_base = TEMPORARY_MAPPING_BASE +
+               cpu_data->cpu_id * PAGE_SIZE * NUM_TEMPORARY_PAGES;
+       unsigned long phys, gphys, page_virt = page_base;
+       int err;
+
+       if (num > NUM_TEMPORARY_PAGES)
                return NULL;
+       while (num-- > 0) {
+               if (pg_structs)
+                       gphys = page_map_gvirt2gphys(cpu_data, pg_structs,
+                                                    gaddr, page_virt);
+               else
+                       gphys = gaddr;
 
-       return (void *)page_virt;
+               phys = arch_page_map_gphys2phys(cpu_data, gphys);
+               if (phys == INVALID_PHYS_ADDR)
+                       return NULL;
+               /* map guest page */
+               err = page_map_create(&hv_paging_structs, phys, PAGE_SIZE,
+                                     page_virt, flags, PAGE_MAP_NON_COHERENT);
+               if (err)
+                       return NULL;
+               gaddr += PAGE_SIZE;
+               page_virt += PAGE_SIZE;
+       }
+       return (void *)page_base;
 }
 
 int paging_init(void)