2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013-2016
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/processor.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/entry.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/string.h>
20 #include <generated/version.h>
21 #include <asm/spinlock.h>
23 extern u8 __text_start[], __page_pool[];
25 static const __attribute__((aligned(PAGE_SIZE))) u8 empty_page[PAGE_SIZE];
27 static DEFINE_SPINLOCK(init_lock);
28 static unsigned int master_cpu_id = -1;
29 static volatile unsigned int initialized_cpus;
30 static volatile int error;
32 static void init_early(unsigned int cpu_id)
34 unsigned long core_and_percpu_size = hypervisor_header.core_size +
35 sizeof(struct per_cpu) * hypervisor_header.max_cpus;
36 unsigned long hyp_phys_start, hyp_phys_end;
37 struct jailhouse_memory hv_page;
39 master_cpu_id = cpu_id;
41 system_config = (struct jailhouse_system *)
42 (JAILHOUSE_BASE + core_and_percpu_size);
44 arch_dbg_write_init();
46 printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n",
47 JAILHOUSE_VERSION, cpu_id);
48 printk("Code location: %p\n", __text_start);
50 error = paging_init();
54 root_cell.config = &system_config->root_cell;
57 error = cell_init(&root_cell);
61 error = arch_init_early();
66 * Back the region of the hypervisor core and per-CPU page with empty
67 * pages for Linux. This allows to fault-in the hypervisor region into
68 * Linux' page table before shutdown without triggering violations.
70 hyp_phys_start = system_config->hypervisor_memory.phys_start;
71 hyp_phys_end = hyp_phys_start + system_config->hypervisor_memory.size;
73 hv_page.phys_start = paging_hvirt2phys(empty_page);
74 hv_page.virt_start = hyp_phys_start;
75 hv_page.size = PAGE_SIZE;
76 hv_page.flags = JAILHOUSE_MEM_READ;
77 while (hv_page.virt_start < hyp_phys_end) {
78 error = arch_map_memory_region(&root_cell, &hv_page);
81 hv_page.virt_start += PAGE_SIZE;
84 paging_dump_stats("after early setup");
85 printk("Initializing processors:\n");
88 static void cpu_init(struct per_cpu *cpu_data)
92 printk(" CPU %d... ", cpu_data->cpu_id);
94 if (!cpu_id_valid(cpu_data->cpu_id))
97 cpu_data->cell = &root_cell;
99 err = arch_cpu_init(cpu_data);
106 * If this CPU is last, make sure everything was committed before we
107 * signal the other CPUs spinning on initialized_cpus that they can
119 int map_root_memory_regions(void)
121 const struct jailhouse_memory *mem;
125 for_each_mem_region(mem, root_cell.config, n) {
126 if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
127 err = mmio_subpage_register(&root_cell, mem);
129 err = arch_map_memory_region(&root_cell, mem);
136 static void init_late(void)
138 unsigned int cpu, expected_cpus = 0;
140 for_each_cpu(cpu, root_cell.cpu_set)
142 if (hypervisor_header.online_cpus != expected_cpus) {
147 error = arch_init_late();
151 config_commit(&root_cell);
153 paging_dump_stats("after late setup");
156 int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
158 static volatile bool activate;
161 cpu_data->cpu_id = cpu_id;
163 spin_lock(&init_lock);
165 if (master_cpu_id == -1) {
173 spin_unlock(&init_lock);
175 while (!error && initialized_cpus < hypervisor_header.online_cpus)
178 if (!error && master) {
182 * Make sure everything was committed before we signal
183 * the other CPUs that they can continue.
189 while (!error && !activate)
196 arch_cpu_restore(cpu_data, error);
201 printk("Activating hypervisor\n");
203 /* point of no return */
204 arch_cpu_activate_vmm(cpu_data);
207 /** Hypervisor description header. */
208 struct jailhouse_header __attribute__((section(".header")))
209 hypervisor_header = {
210 .signature = JAILHOUSE_SIGNATURE,
211 .core_size = (unsigned long)__page_pool - JAILHOUSE_BASE,
212 .percpu_size = sizeof(struct per_cpu),