]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/x86/control.c
x86: Provide PM timer access to all cells
[jailhouse.git] / hypervisor / arch / x86 / control.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) Siemens AG, 2013
5  *
6  * Authors:
7  *  Jan Kiszka <jan.kiszka@siemens.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
16 #include <asm/apic.h>
17 #include <asm/control.h>
18 #include <asm/ioapic.h>
19 #include <asm/vmx.h>
20 #include <asm/vtd.h>
21
22 struct exception_frame {
23         u64 vector;
24         u64 error;
25         u64 rip;
26         u64 cs;
27         u64 flags;
28         u64 rsp;
29         u64 ss;
30 };
31
32 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
33 {
34         int err;
35
36         err = vmx_cell_init(cell);
37         if (err)
38                 return err;
39
40         err = vtd_cell_init(cell);
41         if (err)
42                 vmx_cell_exit(cell);
43
44         ioapic_cell_init(cell);
45         ioapic_root_cell_shrink(cell->config);
46
47         cell->comm_page.comm_region.pm_timer_address =
48                 system_config->platform_info.x86.pm_timer_address;
49
50         return 0;
51 }
52
53 int arch_map_memory_region(struct cell *cell,
54                            const struct jailhouse_memory *mem)
55 {
56         int err;
57
58         err = vmx_map_memory_region(cell, mem);
59         if (err)
60                 return err;
61
62         err = vtd_map_memory_region(cell, mem);
63         if (err)
64                 vmx_unmap_memory_region(cell, mem);
65         return err;
66 }
67
68 int arch_unmap_memory_region(struct cell *cell,
69                              const struct jailhouse_memory *mem)
70 {
71         int err;
72
73         err = vtd_unmap_memory_region(cell, mem);
74         if (err)
75                 return err;
76
77         return vmx_unmap_memory_region(cell, mem);
78 }
79
80 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
81 {
82         ioapic_cell_exit(cell);
83         vtd_cell_exit(cell);
84         vmx_cell_exit(cell);
85 }
86
87 /* all root cell CPUs (except cpu_data) have to be stopped */
88 void arch_config_commit(struct per_cpu *cpu_data,
89                         struct cell *cell_added_removed)
90 {
91         unsigned int cpu;
92
93         for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
94                 per_cpu(cpu)->flush_virt_caches = true;
95
96         if (cell_added_removed)
97                 for_each_cpu_except(cpu, cell_added_removed->cpu_set,
98                                     cpu_data->cpu_id)
99                         per_cpu(cpu)->flush_virt_caches = true;
100
101         vmx_invept();
102
103         vtd_config_commit(cell_added_removed);
104 }
105
106 void arch_shutdown(void)
107 {
108         vtd_shutdown();
109 }
110
111 void arch_suspend_cpu(unsigned int cpu_id)
112 {
113         struct per_cpu *target_data = per_cpu(cpu_id);
114         bool target_stopped;
115
116         spin_lock(&target_data->control_lock);
117
118         target_data->stop_cpu = true;
119         target_stopped = target_data->cpu_stopped;
120
121         spin_unlock(&target_data->control_lock);
122
123         if (!target_stopped) {
124                 apic_send_nmi_ipi(target_data);
125
126                 while (!target_data->cpu_stopped)
127                         cpu_relax();
128         }
129 }
130
131 void arch_resume_cpu(unsigned int cpu_id)
132 {
133         /* make any state changes visible before releasing the CPU */
134         memory_barrier();
135
136         per_cpu(cpu_id)->stop_cpu = false;
137 }
138
139 /* target cpu has to be stopped */
140 void arch_reset_cpu(unsigned int cpu_id)
141 {
142         per_cpu(cpu_id)->sipi_vector = APIC_BSP_PSEUDO_SIPI;
143
144         arch_resume_cpu(cpu_id);
145 }
146
147 /* target cpu has to be stopped */
148 void arch_park_cpu(unsigned int cpu_id)
149 {
150         per_cpu(cpu_id)->init_signaled = true;
151
152         arch_resume_cpu(cpu_id);
153 }
154
155 void arch_shutdown_cpu(unsigned int cpu_id)
156 {
157         arch_suspend_cpu(cpu_id);
158         per_cpu(cpu_id)->shutdown_cpu = true;
159         arch_resume_cpu(cpu_id);
160         /*
161          * Note: The caller has to ensure that the target CPU has enough time
162          * to reach the shutdown position before destroying the code path it
163          * has to take to get there. This can be ensured by bringing the CPU
164          * online again under Linux before cleaning up the hypervisor.
165          */
166 }
167
168 void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
169                         int sipi_vector)
170 {
171         struct per_cpu *target_data = per_cpu(cpu_id);
172         bool send_nmi = false;
173
174         spin_lock(&target_data->control_lock);
175
176         if (type == X86_INIT) {
177                 if (!target_data->wait_for_sipi) {
178                         target_data->init_signaled = true;
179                         send_nmi = true;
180                 }
181         } else if (target_data->wait_for_sipi) {
182                 target_data->sipi_vector = sipi_vector;
183                 send_nmi = true;
184         }
185
186         spin_unlock(&target_data->control_lock);
187
188         if (send_nmi)
189                 apic_send_nmi_ipi(target_data);
190 }
191
192 /* control_lock has to be held */
193 static void x86_enter_wait_for_sipi(struct per_cpu *cpu_data)
194 {
195         cpu_data->init_signaled = false;
196         cpu_data->wait_for_sipi = true;
197         apic_clear();
198         vmx_cpu_park(cpu_data);
199 }
200
201 int x86_handle_events(struct per_cpu *cpu_data)
202 {
203         int sipi_vector = -1;
204
205         spin_lock(&cpu_data->control_lock);
206
207         do {
208                 if (cpu_data->init_signaled && !cpu_data->stop_cpu) {
209                         x86_enter_wait_for_sipi(cpu_data);
210                         sipi_vector = -1;
211                         break;
212                 }
213
214                 cpu_data->cpu_stopped = true;
215
216                 spin_unlock(&cpu_data->control_lock);
217
218                 while (cpu_data->stop_cpu)
219                         cpu_relax();
220
221                 if (cpu_data->shutdown_cpu) {
222                         apic_clear();
223                         vmx_cpu_exit(cpu_data);
224                         asm volatile("1: hlt; jmp 1b");
225                 }
226
227                 spin_lock(&cpu_data->control_lock);
228
229                 cpu_data->cpu_stopped = false;
230
231                 if (cpu_data->sipi_vector >= 0) {
232                         if (!cpu_data->failed) {
233                                 cpu_data->wait_for_sipi = false;
234                                 sipi_vector = cpu_data->sipi_vector;
235                         }
236                         cpu_data->sipi_vector = -1;
237                 }
238         } while (cpu_data->init_signaled);
239
240         if (cpu_data->flush_virt_caches) {
241                 cpu_data->flush_virt_caches = false;
242                 vmx_invept();
243         }
244
245         spin_unlock(&cpu_data->control_lock);
246
247         return sipi_vector;
248 }
249
250 void x86_exception_handler(struct exception_frame *frame)
251 {
252         panic_printk("FATAL: Jailhouse triggered exception #%d\n",
253                      frame->vector);
254         if (frame->error != -1)
255                 panic_printk("Error code: %x\n", frame->error);
256         panic_printk("Physical CPU ID: %d\n", phys_processor_id());
257         panic_printk("RIP: %p RSP: %p FLAGS: %x\n", frame->rip, frame->rsp,
258                      frame->flags);
259
260         panic_stop(NULL);
261 }
262
263 void arch_panic_stop(struct per_cpu *cpu_data)
264 {
265         asm volatile("1: hlt; jmp 1b");
266         __builtin_unreachable();
267 }
268
269 void arch_panic_halt(struct per_cpu *cpu_data)
270 {
271         spin_lock(&cpu_data->control_lock);
272         x86_enter_wait_for_sipi(cpu_data);
273         spin_unlock(&cpu_data->control_lock);
274 }