extern u8 __page_pool[];
+/**
+ * Offset between virtual and physical hypervisor addresses.
+ *
+ * @note Private, use page_map_hvirt2phys() or page_map_phys2hvirt() instead.
+ */
unsigned long page_offset;
+/** Page pool containing physical pages for use by the hypervisor. */
struct page_pool mem_pool;
+/** Page pool containing virtual pages for remappings by the hypervisor. */
struct page_pool remap_pool = {
.base_address = (void *)REMAP_BASE,
.pages = BITS_PER_PAGE * NUM_REMAP_BITMAP_PAGES,
};
+/** Descriptor of the hypervisor paging structures. */
struct paging_structures hv_paging_structs;
+/**
+ * Trivial implementation of paging::get_phys (for non-terminal levels)
+ * @param pte See paging::get_phys.
+ * @param virt See paging::get_phys.
+ *
+ * @return @c INVALID_PHYS_ADDR.
+ *
+ * @see paging
+ */
unsigned long paging_get_phys_invalid(pt_entry_t pte, unsigned long virt)
{
return INVALID_PHYS_ADDR;
return INVALID_PAGE_NR;
}
-void *page_alloc(struct page_pool *pool, unsigned int num)
+/**
+ * Allocate consecutive pages from the specified pool.
+ * @param pool Page pool to allocate from.
+ * @param num Number of pages.
+ * @param align_mask Choose start so that start_page_no & align_mask == 0.
+ *
+ * @return Pointer to first page or NULL if allocation failed.
+ *
+ * @see page_free
+ */
+static void *page_alloc_internal(struct page_pool *pool, unsigned int num,
+ unsigned long align_mask)
{
- unsigned long start, last, next;
+ /* The pool itself might not be aligned as required. */
+ unsigned long aligned_start =
+ ((unsigned long)pool->base_address >> PAGE_SHIFT) & align_mask;
+ unsigned long next = aligned_start;
+ unsigned long start, last;
unsigned int allocated;
- start = find_next_free_page(pool, 0);
- if (start == INVALID_PAGE_NR)
+restart:
+ /* Forward the search start to the next aligned page. */
+ if ((next - aligned_start) & align_mask)
+ next += num - ((next - aligned_start) & align_mask);
+
+ start = next = find_next_free_page(pool, next);
+ if (start == INVALID_PAGE_NR || num == 0)
return NULL;
-restart:
+ /* Enforce alignment (none of align_mask is 0). */
+ if ((start - aligned_start) & align_mask)
+ goto restart;
+
for (allocated = 1, last = start; allocated < num;
allocated++, last = next) {
next = find_next_free_page(pool, last + 1);
if (next == INVALID_PAGE_NR)
return NULL;
- if (next != last + 1) {
- start = next;
- goto restart;
- }
+ if (next != last + 1)
+ goto restart; /* not consecutive */
}
for (allocated = 0; allocated < num; allocated++)
return pool->base_address + start * PAGE_SIZE;
}
+/**
+ * Allocate consecutive pages from the specified pool.
+ * @param pool Page pool to allocate from.
+ * @param num Number of pages.
+ *
+ * @return Pointer to first page or NULL if allocation failed.
+ *
+ * @see page_free
+ */
+void *page_alloc(struct page_pool *pool, unsigned int num)
+{
+ return page_alloc_internal(pool, num, 0);
+}
+
+/**
+ * Allocate aligned consecutive pages from the specified pool.
+ * @param pool Page pool to allocate from.
+ * @param num Number of pages. Num needs to be a power of 2.
+ *
+ * @return Pointer to first page or NULL if allocation failed.
+ *
+ * @see page_free
+ */
+void *page_alloc_aligned(struct page_pool *pool, unsigned int num)
+{
+ return page_alloc_internal(pool, num, num - 1);
+}
+
+/**
+ * Release pages to the specified pool.
+ * @param pool Page pool to release to.
+ * @param page Address of first page.
+ * @param num Number of pages.
+ *
+ * @see page_alloc
+ */
void page_free(struct page_pool *pool, void *page, unsigned int num)
{
unsigned long page_nr;
}
}
+/**
+ * Translate virtual to physical address according to given paging structures.
+ * @param pg_structs Paging structures to use for translation.
+ * @param virt Virtual address.
+ * @param flags Access flags that have to be supported by the mapping,
+ * see @ref PAGE_FLAGS.
+ *
+ * @return Physical address on success or @c INVALID_PHYS_ADDR if the virtual
+ * address could not be translated or the requested access is not
+ * supported by the mapping.
+ *
+ * @see paging_phys2hvirt
+ * @see paging_hvirt2phys
+ * @see arch_paging_gphys2phys
+ */
unsigned long paging_virt2phys(const struct paging_structures *pg_structs,
unsigned long virt, unsigned long flags)
{
flags, coherent);
}
+/**
+ * Create or modify a page map.
+ * @param pg_structs Descriptor of paging structures to be used.
+ * @param phys Physical address of the region to be mapped.
+ * @param size Size of the region.
+ * @param virt Virtual address the region should be mapped to.
+ * @param flags Flags describing the permitted access, see
+ * @ref PAGE_FLAGS.
+ * @param coherent Coherency of mapping.
+ *
+ * @return 0 on success, negative error code otherwise.
+ *
+ * @note The function aims at using the largest possible page size for the
+ * mapping but does not consolidate with neighboring mappings.
+ *
+ * @see paging_destroy
+ * @see paging_get_guest_pages
+ */
int paging_create(const struct paging_structures *pg_structs,
unsigned long phys, unsigned long size, unsigned long virt,
unsigned long flags, enum paging_coherent coherent)
return 0;
}
+/**
+ * Destroy a page map.
+ * @param pg_structs Descriptor of paging structures to be used.
+ * @param virt Virtual address the region to be unmapped.
+ * @param size Size of the region.
+ * @param coherent Coherency of mapping.
+ *
+ * @return 0 on success, negative error code otherwise.
+ *
+ * @note If required, this function tries to break up hugepages if they should
+ * be unmapped only partially. This may require allocating additional pages for
+ * the paging structures, thus can fail. Unmap request that covers only full
+ * pages never fail.
+ *
+ * @see paging_create
+ */
int paging_destroy(const struct paging_structures *pg_structs,
unsigned long virt, unsigned long size,
enum paging_coherent coherent)
}
}
+/**
+ * Map guest (cell) pages into the hypervisor address space.
+ * @param pg_structs Descriptor of the guest paging structures if @c gaddr
+ * is a guest-virtual address or @c NULL if it is a
+ * guest-physical address.
+ * @param gaddr Guest address of the first page to be mapped.
+ * @param num Number of pages to be mapped.
+ * @param flags Access flags for the hypervisor mapping, see
+ * @ref PAGE_FLAGS.
+ *
+ * @return Pointer to first mapped page or @c NULL on error.
+ *
+ * @note The mapping is done only for the calling CPU and must thus only be
+ * used by the very same CPU.
+ *
+ * @note The mapping is only temporary, valid until the next invocation of
+ * page_map_get_guest_pages() on this CPU. It does not require explicit
+ * unmapping when it is no longer needed.
+ */
void *paging_get_guest_pages(const struct guest_paging_structures *pg_structs,
unsigned long gaddr, unsigned int num,
unsigned long flags)
return (void *)page_base;
}
+/**
+ * Initialize the page mapping subsystem.
+ *
+ * @return 0 on success, negative error code otherwise.
+ */
int paging_init(void)
{
- unsigned long n, per_cpu_pages, config_pages, bitmap_pages;
+ unsigned long n, per_cpu_pages, config_pages, bitmap_pages, vaddr;
int err;
per_cpu_pages = hypervisor_header.max_cpus *
sizeof(struct per_cpu) / PAGE_SIZE;
- system_config = (struct jailhouse_system *)
- (__page_pool + per_cpu_pages * PAGE_SIZE);
config_pages = PAGES(jailhouse_system_config_size(system_config));
page_offset = JAILHOUSE_BASE -
bitmap_pages = (mem_pool.pages + BITS_PER_PAGE - 1) / BITS_PER_PAGE;
if (mem_pool.pages <= per_cpu_pages + config_pages + bitmap_pages)
- goto error_nomem;
+ return -ENOMEM;
mem_pool.base_address = __page_pool;
mem_pool.used_bitmap =
hv_paging_structs.root_paging = hv_paging;
hv_paging_structs.root_table = page_alloc(&mem_pool, 1);
if (!hv_paging_structs.root_table)
- goto error_nomem;
+ return -ENOMEM;
/* Replicate hypervisor mapping of Linux */
err = paging_create(&hv_paging_structs,
(unsigned long)&hypervisor_header,
PAGE_DEFAULT_FLAGS, PAGING_NON_COHERENT);
if (err)
- goto error_nomem;
+ return err;
+
+ if (system_config->debug_console.flags & JAILHOUSE_MEM_IO) {
+ vaddr = (unsigned long)hypervisor_header.debug_console_base;
+ /* check if console overlaps remapping region */
+ if (vaddr + system_config->debug_console.size >= REMAP_BASE &&
+ vaddr < REMAP_BASE + remap_pool.pages * PAGE_SIZE)
+ return trace_error(-EINVAL);
+
+ err = paging_create(&hv_paging_structs,
+ system_config->debug_console.phys_start,
+ system_config->debug_console.size, vaddr,
+ PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
+ PAGING_NON_COHERENT);
+ if (err)
+ return err;
+ }
/* Make sure any remappings to the temporary regions can be performed
* without allocations of page table pages. */
- err = paging_create(&hv_paging_structs, 0,
- remap_pool.used_pages * PAGE_SIZE,
- TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
- PAGING_NON_COHERENT);
- if (err)
- goto error_nomem;
-
- return 0;
-
-error_nomem:
- printk("FATAL: page pool much too small\n");
- return -ENOMEM;
+ return paging_create(&hv_paging_structs, 0,
+ remap_pool.used_pages * PAGE_SIZE,
+ TEMPORARY_MAPPING_BASE, PAGE_NONPRESENT_FLAGS,
+ PAGING_NON_COHERENT);
}
+/**
+ * Dump usage statistic of the page pools.
+ * @param when String that characterizes the associated event.
+ */
void paging_dump_stats(const char *when)
{
printk("Page pool usage %s: mem %d/%d, remap %d/%d\n", when,