/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2013, 2014
+ * Copyright (c) Siemens AG, 2013-2016
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
#include <jailhouse/processor.h>
#include <jailhouse/printk.h>
#include <jailhouse/entry.h>
+#include <jailhouse/mmio.h>
#include <jailhouse/paging.h>
#include <jailhouse/control.h>
#include <jailhouse/string.h>
#include <generated/version.h>
#include <asm/spinlock.h>
-extern u8 __text_start[], __hv_core_end[];
+extern u8 __text_start[], __page_pool[];
static const __attribute__((aligned(PAGE_SIZE))) u8 empty_page[PAGE_SIZE];
static void init_early(unsigned int cpu_id)
{
- unsigned long core_and_percpu_size;
+ unsigned long core_and_percpu_size = hypervisor_header.core_size +
+ sizeof(struct per_cpu) * hypervisor_header.max_cpus;
+ unsigned long hyp_phys_start, hyp_phys_end;
struct jailhouse_memory hv_page;
master_cpu_id = cpu_id;
+ system_config = (struct jailhouse_system *)
+ (JAILHOUSE_BASE + core_and_percpu_size);
+
arch_dbg_write_init();
printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n",
root_cell.config = &system_config->root_cell;
- error = check_mem_regions(&system_config->root_cell);
- if (error)
- return;
-
root_cell.id = -1;
error = cell_init(&root_cell);
if (error)
* pages for Linux. This allows to fault-in the hypervisor region into
* Linux' page table before shutdown without triggering violations.
*/
+ hyp_phys_start = system_config->hypervisor_memory.phys_start;
+ hyp_phys_end = hyp_phys_start + system_config->hypervisor_memory.size;
+
hv_page.phys_start = paging_hvirt2phys(empty_page);
- hv_page.virt_start = paging_hvirt2phys(&hypervisor_header);
+ hv_page.virt_start = hyp_phys_start;
hv_page.size = PAGE_SIZE;
hv_page.flags = JAILHOUSE_MEM_READ;
- core_and_percpu_size = (unsigned long)system_config - JAILHOUSE_BASE;
- while (core_and_percpu_size > 0) {
+ while (hv_page.virt_start < hyp_phys_end) {
error = arch_map_memory_region(&root_cell, &hv_page);
if (error)
return;
- core_and_percpu_size -= PAGE_SIZE;
hv_page.virt_start += PAGE_SIZE;
}
int map_root_memory_regions(void)
{
- const struct jailhouse_memory *mem =
- jailhouse_cell_mem_regions(root_cell.config);
+ const struct jailhouse_memory *mem;
unsigned int n;
int err;
- for (n = 0; n < root_cell.config->num_memory_regions; n++, mem++) {
- err = arch_map_memory_region(&root_cell, mem);
+ for_each_mem_region(mem, root_cell.config, n) {
+ if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
+ err = mmio_subpage_register(&root_cell, mem);
+ else
+ err = arch_map_memory_region(&root_cell, mem);
if (err)
return err;
}
struct jailhouse_header __attribute__((section(".header")))
hypervisor_header = {
.signature = JAILHOUSE_SIGNATURE,
- .core_size = (unsigned long)__hv_core_end - JAILHOUSE_BASE,
+ .core_size = (unsigned long)__page_pool - JAILHOUSE_BASE,
.percpu_size = sizeof(struct per_cpu),
- .entry = arch_entry,
+ .entry = arch_entry - JAILHOUSE_BASE,
};