]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/setup.c
core: Remove memory regions check
[jailhouse.git] / hypervisor / setup.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013, 2014
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/processor.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/entry.h>
16 #include <jailhouse/paging.h>
17 #include <jailhouse/control.h>
18 #include <jailhouse/string.h>
19 #include <generated/version.h>
20 #include <asm/spinlock.h>
21
22 extern u8 __text_start[], __hv_core_end[];
23
24 static const __attribute__((aligned(PAGE_SIZE))) u8 empty_page[PAGE_SIZE];
25
26 static DEFINE_SPINLOCK(init_lock);
27 static unsigned int master_cpu_id = -1;
28 static volatile unsigned int initialized_cpus;
29 static volatile int error;
30
31 static void init_early(unsigned int cpu_id)
32 {
33         unsigned long core_and_percpu_size;
34         struct jailhouse_memory hv_page;
35
36         master_cpu_id = cpu_id;
37
38         arch_dbg_write_init();
39
40         printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n",
41                JAILHOUSE_VERSION, cpu_id);
42         printk("Code location: %p\n", __text_start);
43
44         error = paging_init();
45         if (error)
46                 return;
47
48         root_cell.config = &system_config->root_cell;
49
50         root_cell.id = -1;
51         error = cell_init(&root_cell);
52         if (error)
53                 return;
54
55         error = arch_init_early();
56         if (error)
57                 return;
58
59         /*
60          * Back the region of the hypervisor core and per-CPU page with empty
61          * pages for Linux. This allows to fault-in the hypervisor region into
62          * Linux' page table before shutdown without triggering violations.
63          */
64         hv_page.phys_start = paging_hvirt2phys(empty_page);
65         hv_page.virt_start = paging_hvirt2phys(&hypervisor_header);
66         hv_page.size = PAGE_SIZE;
67         hv_page.flags = JAILHOUSE_MEM_READ;
68         core_and_percpu_size = (unsigned long)system_config - JAILHOUSE_BASE;
69         while (core_and_percpu_size > 0) {
70                 error = arch_map_memory_region(&root_cell, &hv_page);
71                 if (error)
72                         return;
73                 core_and_percpu_size -= PAGE_SIZE;
74                 hv_page.virt_start += PAGE_SIZE;
75         }
76
77         paging_dump_stats("after early setup");
78         printk("Initializing processors:\n");
79 }
80
81 static void cpu_init(struct per_cpu *cpu_data)
82 {
83         int err = -EINVAL;
84
85         printk(" CPU %d... ", cpu_data->cpu_id);
86
87         if (!cpu_id_valid(cpu_data->cpu_id))
88                 goto failed;
89
90         cpu_data->cell = &root_cell;
91
92         err = arch_cpu_init(cpu_data);
93         if (err)
94                 goto failed;
95
96         printk("OK\n");
97
98         /*
99          * If this CPU is last, make sure everything was committed before we
100          * signal the other CPUs spinning on initialized_cpus that they can
101          * continue.
102          */
103         memory_barrier();
104         initialized_cpus++;
105         return;
106
107 failed:
108         printk("FAILED\n");
109         error = err;
110 }
111
112 int map_root_memory_regions(void)
113 {
114         const struct jailhouse_memory *mem;
115         unsigned int n;
116         int err;
117
118         for_each_mem_region(mem, root_cell.config, n) {
119                 err = arch_map_memory_region(&root_cell, mem);
120                 if (err)
121                         return err;
122         }
123         return 0;
124 }
125
126 static void init_late(void)
127 {
128         unsigned int cpu, expected_cpus = 0;
129
130         for_each_cpu(cpu, root_cell.cpu_set)
131                 expected_cpus++;
132         if (hypervisor_header.online_cpus != expected_cpus) {
133                 error = -EINVAL;
134                 return;
135         }
136
137         error = arch_init_late();
138         if (error)
139                 return;
140
141         config_commit(&root_cell);
142
143         paging_dump_stats("after late setup");
144 }
145
146 int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
147 {
148         static volatile bool activate;
149         bool master = false;
150
151         cpu_data->cpu_id = cpu_id;
152
153         spin_lock(&init_lock);
154
155         if (master_cpu_id == -1) {
156                 master = true;
157                 init_early(cpu_id);
158         }
159
160         if (!error)
161                 cpu_init(cpu_data);
162
163         spin_unlock(&init_lock);
164
165         while (!error && initialized_cpus < hypervisor_header.online_cpus)
166                 cpu_relax();
167
168         if (!error && master) {
169                 init_late();
170                 if (!error) {
171                         /*
172                          * Make sure everything was committed before we signal
173                          * the other CPUs that they can continue.
174                          */
175                         memory_barrier();
176                         activate = true;
177                 }
178         } else {
179                 while (!error && !activate)
180                         cpu_relax();
181         }
182
183         if (error) {
184                 if (master)
185                         arch_shutdown();
186                 arch_cpu_restore(cpu_data, error);
187                 return error;
188         }
189
190         if (master)
191                 printk("Activating hypervisor\n");
192
193         /* point of no return */
194         arch_cpu_activate_vmm(cpu_data);
195 }
196
197 /** Hypervisor description header. */
198 struct jailhouse_header __attribute__((section(".header")))
199 hypervisor_header = {
200         .signature = JAILHOUSE_SIGNATURE,
201         .core_size = (unsigned long)__hv_core_end - JAILHOUSE_BASE,
202         .percpu_size = sizeof(struct per_cpu),
203         .entry = arch_entry,
204 };