]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/setup.c
x86: Implement amd_iommu cell management functions
[jailhouse.git] / hypervisor / setup.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013-2016
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/processor.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/entry.h>
16 #include <jailhouse/mmio.h>
17 #include <jailhouse/paging.h>
18 #include <jailhouse/control.h>
19 #include <jailhouse/string.h>
20 #include <generated/version.h>
21 #include <asm/spinlock.h>
22
23 extern u8 __text_start[], __page_pool[];
24
25 static const __attribute__((aligned(PAGE_SIZE))) u8 empty_page[PAGE_SIZE];
26
27 static DEFINE_SPINLOCK(init_lock);
28 static unsigned int master_cpu_id = -1;
29 static volatile unsigned int initialized_cpus;
30 static volatile int error;
31
32 static void init_early(unsigned int cpu_id)
33 {
34         unsigned long core_and_percpu_size = hypervisor_header.core_size +
35                 sizeof(struct per_cpu) * hypervisor_header.max_cpus;
36         struct jailhouse_memory hv_page;
37
38         master_cpu_id = cpu_id;
39
40         system_config = (struct jailhouse_system *)
41                 (JAILHOUSE_BASE + core_and_percpu_size);
42
43         arch_dbg_write_init();
44
45         printk("\nInitializing Jailhouse hypervisor %s on CPU %d\n",
46                JAILHOUSE_VERSION, cpu_id);
47         printk("Code location: %p\n", __text_start);
48
49         error = paging_init();
50         if (error)
51                 return;
52
53         root_cell.config = &system_config->root_cell;
54
55         root_cell.id = -1;
56         error = cell_init(&root_cell);
57         if (error)
58                 return;
59
60         error = arch_init_early();
61         if (error)
62                 return;
63
64         /*
65          * Back the region of the hypervisor core and per-CPU page with empty
66          * pages for Linux. This allows to fault-in the hypervisor region into
67          * Linux' page table before shutdown without triggering violations.
68          */
69         hv_page.phys_start = paging_hvirt2phys(empty_page);
70         hv_page.virt_start = paging_hvirt2phys(&hypervisor_header);
71         hv_page.size = PAGE_SIZE;
72         hv_page.flags = JAILHOUSE_MEM_READ;
73         while (core_and_percpu_size > 0) {
74                 error = arch_map_memory_region(&root_cell, &hv_page);
75                 if (error)
76                         return;
77                 core_and_percpu_size -= PAGE_SIZE;
78                 hv_page.virt_start += PAGE_SIZE;
79         }
80
81         paging_dump_stats("after early setup");
82         printk("Initializing processors:\n");
83 }
84
85 static void cpu_init(struct per_cpu *cpu_data)
86 {
87         int err = -EINVAL;
88
89         printk(" CPU %d... ", cpu_data->cpu_id);
90
91         if (!cpu_id_valid(cpu_data->cpu_id))
92                 goto failed;
93
94         cpu_data->cell = &root_cell;
95
96         err = arch_cpu_init(cpu_data);
97         if (err)
98                 goto failed;
99
100         printk("OK\n");
101
102         /*
103          * If this CPU is last, make sure everything was committed before we
104          * signal the other CPUs spinning on initialized_cpus that they can
105          * continue.
106          */
107         memory_barrier();
108         initialized_cpus++;
109         return;
110
111 failed:
112         printk("FAILED\n");
113         error = err;
114 }
115
116 int map_root_memory_regions(void)
117 {
118         const struct jailhouse_memory *mem;
119         unsigned int n;
120         int err;
121
122         for_each_mem_region(mem, root_cell.config, n) {
123                 if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
124                         err = mmio_subpage_register(&root_cell, mem);
125                 else
126                         err = arch_map_memory_region(&root_cell, mem);
127                 if (err)
128                         return err;
129         }
130         return 0;
131 }
132
133 static void init_late(void)
134 {
135         unsigned int cpu, expected_cpus = 0;
136
137         for_each_cpu(cpu, root_cell.cpu_set)
138                 expected_cpus++;
139         if (hypervisor_header.online_cpus != expected_cpus) {
140                 error = -EINVAL;
141                 return;
142         }
143
144         error = arch_init_late();
145         if (error)
146                 return;
147
148         config_commit(&root_cell);
149
150         paging_dump_stats("after late setup");
151 }
152
153 int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
154 {
155         static volatile bool activate;
156         bool master = false;
157
158         cpu_data->cpu_id = cpu_id;
159
160         spin_lock(&init_lock);
161
162         if (master_cpu_id == -1) {
163                 master = true;
164                 init_early(cpu_id);
165         }
166
167         if (!error)
168                 cpu_init(cpu_data);
169
170         spin_unlock(&init_lock);
171
172         while (!error && initialized_cpus < hypervisor_header.online_cpus)
173                 cpu_relax();
174
175         if (!error && master) {
176                 init_late();
177                 if (!error) {
178                         /*
179                          * Make sure everything was committed before we signal
180                          * the other CPUs that they can continue.
181                          */
182                         memory_barrier();
183                         activate = true;
184                 }
185         } else {
186                 while (!error && !activate)
187                         cpu_relax();
188         }
189
190         if (error) {
191                 if (master)
192                         arch_shutdown();
193                 arch_cpu_restore(cpu_data, error);
194                 return error;
195         }
196
197         if (master)
198                 printk("Activating hypervisor\n");
199
200         /* point of no return */
201         arch_cpu_activate_vmm(cpu_data);
202 }
203
204 /** Hypervisor description header. */
205 struct jailhouse_header __attribute__((section(".header")))
206 hypervisor_header = {
207         .signature = JAILHOUSE_SIGNATURE,
208         .core_size = (unsigned long)__page_pool - JAILHOUSE_BASE,
209         .percpu_size = sizeof(struct per_cpu),
210         .entry = arch_entry,
211 };