]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/setup.c
jailhouse: inmates: bench: Add -R option -- repeats count.
[jailhouse.git] / hypervisor / setup.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013-2016
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/processor.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/entry.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/string.h>
20 #include <generated/version.h>
21 #include <asm/spinlock.h>
22
23 extern u8 __text_start[], __page_pool[];
24
25 static const __attribute__((aligned(PAGE_SIZE))) u8 empty_page[PAGE_SIZE];
26
27 static DEFINE_SPINLOCK(init_lock);
28 static unsigned int master_cpu_id = -1;
29 static volatile unsigned int initialized_cpus;
30 static volatile int error;
31
32 static void init_early(unsigned int cpu_id)
33 {
34         unsigned long core_and_percpu_size = hypervisor_header.core_size +
35                 sizeof(struct per_cpu) * hypervisor_header.max_cpus;
36         unsigned long hyp_phys_start, hyp_phys_end;
37         struct jailhouse_memory hv_page;
38
39         master_cpu_id = cpu_id;
40
41         system_config = (struct jailhouse_system *)
42                 (JAILHOUSE_BASE + core_and_percpu_size);
43
44         arch_dbg_write_init();
45
46         printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n",
47                JAILHOUSE_VERSION, cpu_id);
48         printk("Code location: %p\n", __text_start);
49
50         error = paging_init();
51         if (error)
52                 return;
53
54         root_cell.config = &system_config->root_cell;
55
56         root_cell.id = -1;
57         error = cell_init(&root_cell);
58         if (error)
59                 return;
60
61         error = arch_init_early();
62         if (error)
63                 return;
64
65         /*
66          * Back the region of the hypervisor core and per-CPU page with empty
67          * pages for Linux. This allows to fault-in the hypervisor region into
68          * Linux' page table before shutdown without triggering violations.
69          */
70         hyp_phys_start = system_config->hypervisor_memory.phys_start;
71         hyp_phys_end = hyp_phys_start + system_config->hypervisor_memory.size;
72
73         hv_page.phys_start = paging_hvirt2phys(empty_page);
74         hv_page.virt_start = hyp_phys_start;
75         hv_page.size = PAGE_SIZE;
76         hv_page.flags = JAILHOUSE_MEM_READ;
77         while (hv_page.virt_start < hyp_phys_end) {
78                 error = arch_map_memory_region(&root_cell, &hv_page);
79                 if (error)
80                         return;
81                 hv_page.virt_start += PAGE_SIZE;
82         }
83
84         paging_dump_stats("after early setup");
85         printk("Initializing processors:\n");
86 }
87
88 static void cpu_init(struct per_cpu *cpu_data)
89 {
90         int err = -EINVAL;
91
92         printk(" CPU %d... ", cpu_data->cpu_id);
93
94         if (!cpu_id_valid(cpu_data->cpu_id))
95                 goto failed;
96
97         cpu_data->cell = &root_cell;
98
99         err = arch_cpu_init(cpu_data);
100         if (err)
101                 goto failed;
102
103         printk("OK\n");
104
105         /*
106          * If this CPU is last, make sure everything was committed before we
107          * signal the other CPUs spinning on initialized_cpus that they can
108          * continue.
109          */
110         memory_barrier();
111         initialized_cpus++;
112         return;
113
114 failed:
115         printk("FAILED\n");
116         error = err;
117 }
118
119 int map_root_memory_regions(void)
120 {
121         const struct jailhouse_memory *mem;
122         unsigned int n;
123         int err;
124
125         for_each_mem_region(mem, root_cell.config, n) {
126                 if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
127                         err = mmio_subpage_register(&root_cell, mem);
128                 else
129                         err = arch_map_memory_region(&root_cell, mem);
130                 if (err)
131                         return err;
132         }
133         return 0;
134 }
135
136 static void init_late(void)
137 {
138         unsigned int cpu, expected_cpus = 0;
139
140         for_each_cpu(cpu, root_cell.cpu_set)
141                 expected_cpus++;
142         if (hypervisor_header.online_cpus != expected_cpus) {
143                 error = -EINVAL;
144                 return;
145         }
146
147         error = arch_init_late();
148         if (error)
149                 return;
150
151         config_commit(&root_cell);
152
153         paging_dump_stats("after late setup");
154 }
155
156 int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
157 {
158         static volatile bool activate;
159         bool master = false;
160
161         cpu_data->cpu_id = cpu_id;
162
163         spin_lock(&init_lock);
164
165         if (master_cpu_id == -1) {
166                 master = true;
167                 init_early(cpu_id);
168         }
169
170         if (!error)
171                 cpu_init(cpu_data);
172
173         spin_unlock(&init_lock);
174
175         while (!error && initialized_cpus < hypervisor_header.online_cpus)
176                 cpu_relax();
177
178         if (!error && master) {
179                 init_late();
180                 if (!error) {
181                         /*
182                          * Make sure everything was committed before we signal
183                          * the other CPUs that they can continue.
184                          */
185                         memory_barrier();
186                         activate = true;
187                 }
188         } else {
189                 while (!error && !activate)
190                         cpu_relax();
191         }
192
193         if (error) {
194                 if (master)
195                         arch_shutdown();
196                 arch_cpu_restore(cpu_data, error);
197                 return error;
198         }
199
200         if (master)
201                 printk("Activating hypervisor\n");
202
203         /* point of no return */
204         arch_cpu_activate_vmm(cpu_data);
205 }
206
207 /** Hypervisor description header. */
208 struct jailhouse_header __attribute__((section(".header")))
209 hypervisor_header = {
210         .signature = JAILHOUSE_SIGNATURE,
211         .core_size = (unsigned long)__page_pool - JAILHOUSE_BASE,
212         .percpu_size = sizeof(struct per_cpu),
213         .entry = arch_entry - JAILHOUSE_BASE,
214 };