]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/control.c
d4efa1a58d1ef7e1d0a95d950b9dc547fa3a4a8a
[jailhouse.git] / hypervisor / arch / x86 / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
16 #include <asm/apic.h>
17 #include <asm/control.h>
18 #include <asm/ioapic.h>
19 #include <asm/vmx.h>
20 #include <asm/vtd.h>
21
22 struct exception_frame {
23         u64 vector;
24         u64 error;
25         u64 rip;
26         u64 cs;
27         u64 flags;
28         u64 rsp;
29         u64 ss;
30 };
31
32 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
33 {
34         int err;
35
36         err = vmx_cell_init(cell);
37         if (err)
38                 return err;
39
40         err = vtd_cell_init(cell);
41         if (err)
42                 vmx_cell_exit(cell);
43
44         ioapic_cell_init(cell);
45         ioapic_root_cell_shrink(cell->config);
46
47         return 0;
48 }
49
50 int arch_map_memory_region(struct cell *cell,
51                            const struct jailhouse_memory *mem)
52 {
53         int err;
54
55         err = vmx_map_memory_region(cell, mem);
56         if (err)
57                 return err;
58
59         err = vtd_map_memory_region(cell, mem);
60         if (err)
61                 vmx_unmap_memory_region(cell, mem);
62         return err;
63 }
64
65 int arch_unmap_memory_region(struct cell *cell,
66                              const struct jailhouse_memory *mem)
67 {
68         int err;
69
70         err = vtd_unmap_memory_region(cell, mem);
71         if (err)
72                 return err;
73
74         return vmx_unmap_memory_region(cell, mem);
75 }
76
77 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
78 {
79         ioapic_cell_exit(cell);
80         vtd_cell_exit(cell);
81         vmx_cell_exit(cell);
82 }
83
84 /* all root cell CPUs (except cpu_data) have to be stopped */
85 void arch_config_commit(struct per_cpu *cpu_data,
86                         struct cell *cell_added_removed)
87 {
88         unsigned int cpu;
89
90         for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
91                 per_cpu(cpu)->flush_virt_caches = true;
92
93         if (cell_added_removed)
94                 for_each_cpu_except(cpu, cell_added_removed->cpu_set,
95                                     cpu_data->cpu_id)
96                         per_cpu(cpu)->flush_virt_caches = true;
97
98         vmx_invept();
99
100         vtd_config_commit(cell_added_removed);
101 }
102
103 void arch_shutdown(void)
104 {
105         vtd_shutdown();
106 }
107
108 void arch_suspend_cpu(unsigned int cpu_id)
109 {
110         struct per_cpu *target_data = per_cpu(cpu_id);
111         bool target_stopped;
112
113         spin_lock(&target_data->control_lock);
114
115         target_data->stop_cpu = true;
116         target_stopped = target_data->cpu_stopped;
117
118         spin_unlock(&target_data->control_lock);
119
120         if (!target_stopped) {
121                 apic_send_nmi_ipi(target_data);
122
123                 while (!target_data->cpu_stopped)
124                         cpu_relax();
125         }
126 }
127
128 void arch_resume_cpu(unsigned int cpu_id)
129 {
130         /* make any state changes visible before releasing the CPU */
131         memory_barrier();
132
133         per_cpu(cpu_id)->stop_cpu = false;
134 }
135
136 /* target cpu has to be stopped */
137 void arch_reset_cpu(unsigned int cpu_id)
138 {
139         per_cpu(cpu_id)->sipi_vector = APIC_BSP_PSEUDO_SIPI;
140
141         arch_resume_cpu(cpu_id);
142 }
143
144 /* target cpu has to be stopped */
145 void arch_park_cpu(unsigned int cpu_id)
146 {
147         per_cpu(cpu_id)->init_signaled = true;
148
149         arch_resume_cpu(cpu_id);
150 }
151
152 void arch_shutdown_cpu(unsigned int cpu_id)
153 {
154         arch_suspend_cpu(cpu_id);
155         per_cpu(cpu_id)->shutdown_cpu = true;
156         arch_resume_cpu(cpu_id);
157         /*
158          * Note: The caller has to ensure that the target CPU has enough time
159          * to reach the shutdown position before destroying the code path it
160          * has to take to get there. This can be ensured by bringing the CPU
161          * online again under Linux before cleaning up the hypervisor.
162          */
163 }
164
165 void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
166                         int sipi_vector)
167 {
168         struct per_cpu *target_data = per_cpu(cpu_id);
169         bool send_nmi = false;
170
171         spin_lock(&target_data->control_lock);
172
173         if (type == X86_INIT) {
174                 if (!target_data->wait_for_sipi) {
175                         target_data->init_signaled = true;
176                         send_nmi = true;
177                 }
178         } else if (target_data->wait_for_sipi) {
179                 target_data->sipi_vector = sipi_vector;
180                 send_nmi = true;
181         }
182
183         spin_unlock(&target_data->control_lock);
184
185         if (send_nmi)
186                 apic_send_nmi_ipi(target_data);
187 }
188
189 /* control_lock has to be held */
190 static void x86_enter_wait_for_sipi(struct per_cpu *cpu_data)
191 {
192         cpu_data->init_signaled = false;
193         cpu_data->wait_for_sipi = true;
194         apic_clear();
195         vmx_cpu_park(cpu_data);
196 }
197
198 int x86_handle_events(struct per_cpu *cpu_data)
199 {
200         int sipi_vector = -1;
201
202         spin_lock(&cpu_data->control_lock);
203
204         do {
205                 if (cpu_data->init_signaled && !cpu_data->stop_cpu) {
206                         x86_enter_wait_for_sipi(cpu_data);
207                         sipi_vector = -1;
208                         break;
209                 }
210
211                 cpu_data->cpu_stopped = true;
212
213                 spin_unlock(&cpu_data->control_lock);
214
215                 while (cpu_data->stop_cpu)
216                         cpu_relax();
217
218                 if (cpu_data->shutdown_cpu) {
219                         apic_clear();
220                         vmx_cpu_exit(cpu_data);
221                         asm volatile("1: hlt; jmp 1b");
222                 }
223
224                 spin_lock(&cpu_data->control_lock);
225
226                 cpu_data->cpu_stopped = false;
227
228                 if (cpu_data->sipi_vector >= 0) {
229                         if (!cpu_data->failed) {
230                                 cpu_data->wait_for_sipi = false;
231                                 sipi_vector = cpu_data->sipi_vector;
232                         }
233                         cpu_data->sipi_vector = -1;
234                 }
235         } while (cpu_data->init_signaled);
236
237         if (cpu_data->flush_virt_caches) {
238                 cpu_data->flush_virt_caches = false;
239                 vmx_invept();
240         }
241
242         spin_unlock(&cpu_data->control_lock);
243
244         return sipi_vector;
245 }
246
247 void x86_exception_handler(struct exception_frame *frame)
248 {
249         panic_printk("FATAL: Jailhouse triggered exception #%d\n",
250                      frame->vector);
251         if (frame->error != -1)
252                 panic_printk("Error code: %x\n", frame->error);
253         panic_printk("Physical CPU ID: %d\n", phys_processor_id());
254         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", frame->rip, frame->rsp,
255                      frame->flags);
256
257         panic_stop(NULL);
258 }
259
260 void arch_panic_stop(struct per_cpu *cpu_data)
261 {
262         asm volatile("1: hlt; jmp 1b");
263         __builtin_unreachable();
264 }
265
266 void arch_panic_halt(struct per_cpu *cpu_data)
267 {
268         spin_lock(&cpu_data->control_lock);
269         x86_enter_wait_for_sipi(cpu_data);
270         spin_unlock(&cpu_data->control_lock);
271 }