2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
17 #include <asm/control.h>
18 #include <asm/ioapic.h>
22 struct exception_frame {
32 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
36 err = vmx_cell_init(cell);
40 err = vtd_cell_init(cell);
44 ioapic_cell_init(cell);
45 ioapic_root_cell_shrink(cell->config);
47 cell->comm_page.comm_region.pm_timer_address =
48 system_config->platform_info.x86.pm_timer_address;
53 int arch_map_memory_region(struct cell *cell,
54 const struct jailhouse_memory *mem)
58 err = vmx_map_memory_region(cell, mem);
62 err = vtd_map_memory_region(cell, mem);
64 vmx_unmap_memory_region(cell, mem);
68 int arch_unmap_memory_region(struct cell *cell,
69 const struct jailhouse_memory *mem)
73 err = vtd_unmap_memory_region(cell, mem);
77 return vmx_unmap_memory_region(cell, mem);
80 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
82 ioapic_cell_exit(cell);
87 /* all root cell CPUs (except cpu_data) have to be stopped */
88 void arch_config_commit(struct per_cpu *cpu_data,
89 struct cell *cell_added_removed)
93 for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
94 per_cpu(cpu)->flush_virt_caches = true;
96 if (cell_added_removed)
97 for_each_cpu_except(cpu, cell_added_removed->cpu_set,
99 per_cpu(cpu)->flush_virt_caches = true;
103 vtd_config_commit(cell_added_removed);
106 void arch_shutdown(void)
111 void arch_suspend_cpu(unsigned int cpu_id)
113 struct per_cpu *target_data = per_cpu(cpu_id);
116 spin_lock(&target_data->control_lock);
118 target_data->stop_cpu = true;
119 target_stopped = target_data->cpu_stopped;
121 spin_unlock(&target_data->control_lock);
123 if (!target_stopped) {
124 apic_send_nmi_ipi(target_data);
126 while (!target_data->cpu_stopped)
131 void arch_resume_cpu(unsigned int cpu_id)
133 /* make any state changes visible before releasing the CPU */
136 per_cpu(cpu_id)->stop_cpu = false;
139 /* target cpu has to be stopped */
140 void arch_reset_cpu(unsigned int cpu_id)
142 per_cpu(cpu_id)->sipi_vector = APIC_BSP_PSEUDO_SIPI;
144 arch_resume_cpu(cpu_id);
147 /* target cpu has to be stopped */
148 void arch_park_cpu(unsigned int cpu_id)
150 per_cpu(cpu_id)->init_signaled = true;
152 arch_resume_cpu(cpu_id);
155 void arch_shutdown_cpu(unsigned int cpu_id)
157 arch_suspend_cpu(cpu_id);
158 per_cpu(cpu_id)->shutdown_cpu = true;
159 arch_resume_cpu(cpu_id);
161 * Note: The caller has to ensure that the target CPU has enough time
162 * to reach the shutdown position before destroying the code path it
163 * has to take to get there. This can be ensured by bringing the CPU
164 * online again under Linux before cleaning up the hypervisor.
168 void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
171 struct per_cpu *target_data = per_cpu(cpu_id);
172 bool send_nmi = false;
174 spin_lock(&target_data->control_lock);
176 if (type == X86_INIT) {
177 if (!target_data->wait_for_sipi) {
178 target_data->init_signaled = true;
181 } else if (target_data->wait_for_sipi) {
182 target_data->sipi_vector = sipi_vector;
186 spin_unlock(&target_data->control_lock);
189 apic_send_nmi_ipi(target_data);
192 /* control_lock has to be held */
193 static void x86_enter_wait_for_sipi(struct per_cpu *cpu_data)
195 cpu_data->init_signaled = false;
196 cpu_data->wait_for_sipi = true;
198 vmx_cpu_park(cpu_data);
201 int x86_handle_events(struct per_cpu *cpu_data)
203 int sipi_vector = -1;
205 spin_lock(&cpu_data->control_lock);
208 if (cpu_data->init_signaled && !cpu_data->stop_cpu) {
209 x86_enter_wait_for_sipi(cpu_data);
214 cpu_data->cpu_stopped = true;
216 spin_unlock(&cpu_data->control_lock);
218 while (cpu_data->stop_cpu)
221 if (cpu_data->shutdown_cpu) {
223 vmx_cpu_exit(cpu_data);
224 asm volatile("1: hlt; jmp 1b");
227 spin_lock(&cpu_data->control_lock);
229 cpu_data->cpu_stopped = false;
231 if (cpu_data->sipi_vector >= 0) {
232 if (!cpu_data->failed) {
233 cpu_data->wait_for_sipi = false;
234 sipi_vector = cpu_data->sipi_vector;
236 cpu_data->sipi_vector = -1;
238 } while (cpu_data->init_signaled);
240 if (cpu_data->flush_virt_caches) {
241 cpu_data->flush_virt_caches = false;
245 spin_unlock(&cpu_data->control_lock);
250 void x86_exception_handler(struct exception_frame *frame)
252 panic_printk("FATAL: Jailhouse triggered exception #%d\n",
254 if (frame->error != -1)
255 panic_printk("Error code: %x\n", frame->error);
256 panic_printk("Physical CPU ID: %d\n", phys_processor_id());
257 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", frame->rip, frame->rsp,
263 void arch_panic_stop(struct per_cpu *cpu_data)
265 asm volatile("1: hlt; jmp 1b");
266 __builtin_unreachable();
269 void arch_panic_halt(struct per_cpu *cpu_data)
271 spin_lock(&cpu_data->control_lock);
272 x86_enter_wait_for_sipi(cpu_data);
273 spin_unlock(&cpu_data->control_lock);