2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Siemens AG, 2013
7 * Jan Kiszka <jan.kiszka@siemens.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/printk.h>
15 #include <jailhouse/processor.h>
17 #include <asm/control.h>
18 #include <asm/ioapic.h>
22 struct exception_frame {
32 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
36 err = vmx_cell_init(cell);
40 err = vtd_cell_init(cell);
44 ioapic_cell_init(cell);
45 ioapic_root_cell_shrink(cell->config);
50 int arch_map_memory_region(struct cell *cell,
51 const struct jailhouse_memory *mem)
55 err = vmx_map_memory_region(cell, mem);
59 err = vtd_map_memory_region(cell, mem);
61 vmx_unmap_memory_region(cell, mem);
65 int arch_unmap_memory_region(struct cell *cell,
66 const struct jailhouse_memory *mem)
70 err = vtd_unmap_memory_region(cell, mem);
74 return vmx_unmap_memory_region(cell, mem);
77 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
79 ioapic_cell_exit(cell);
84 /* all root cell CPUs (except cpu_data) have to be stopped */
85 void arch_config_commit(struct per_cpu *cpu_data,
86 struct cell *cell_added_removed)
90 for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
91 per_cpu(cpu)->flush_virt_caches = true;
93 if (cell_added_removed)
94 for_each_cpu_except(cpu, cell_added_removed->cpu_set,
96 per_cpu(cpu)->flush_virt_caches = true;
100 vtd_config_commit(cell_added_removed);
103 void arch_shutdown(void)
108 void arch_suspend_cpu(unsigned int cpu_id)
110 struct per_cpu *target_data = per_cpu(cpu_id);
113 spin_lock(&target_data->control_lock);
115 target_data->stop_cpu = true;
116 target_stopped = target_data->cpu_stopped;
118 spin_unlock(&target_data->control_lock);
120 if (!target_stopped) {
121 apic_send_nmi_ipi(target_data);
123 while (!target_data->cpu_stopped)
128 void arch_resume_cpu(unsigned int cpu_id)
130 /* make any state changes visible before releasing the CPU */
133 per_cpu(cpu_id)->stop_cpu = false;
136 /* target cpu has to be stopped */
137 void arch_reset_cpu(unsigned int cpu_id)
139 per_cpu(cpu_id)->sipi_vector = APIC_BSP_PSEUDO_SIPI;
141 arch_resume_cpu(cpu_id);
144 /* target cpu has to be stopped */
145 void arch_park_cpu(unsigned int cpu_id)
147 per_cpu(cpu_id)->init_signaled = true;
149 arch_resume_cpu(cpu_id);
152 void arch_shutdown_cpu(unsigned int cpu_id)
154 arch_suspend_cpu(cpu_id);
155 per_cpu(cpu_id)->shutdown_cpu = true;
156 arch_resume_cpu(cpu_id);
158 * Note: The caller has to ensure that the target CPU has enough time
159 * to reach the shutdown position before destroying the code path it
160 * has to take to get there. This can be ensured by bringing the CPU
161 * online again under Linux before cleaning up the hypervisor.
165 void x86_send_init_sipi(unsigned int cpu_id, enum x86_init_sipi type,
168 struct per_cpu *target_data = per_cpu(cpu_id);
169 bool send_nmi = false;
171 spin_lock(&target_data->control_lock);
173 if (type == X86_INIT) {
174 if (!target_data->wait_for_sipi) {
175 target_data->init_signaled = true;
178 } else if (target_data->wait_for_sipi) {
179 target_data->sipi_vector = sipi_vector;
183 spin_unlock(&target_data->control_lock);
186 apic_send_nmi_ipi(target_data);
189 /* control_lock has to be held */
190 static void x86_enter_wait_for_sipi(struct per_cpu *cpu_data)
192 cpu_data->init_signaled = false;
193 cpu_data->wait_for_sipi = true;
195 vmx_cpu_park(cpu_data);
198 int x86_handle_events(struct per_cpu *cpu_data)
200 int sipi_vector = -1;
202 spin_lock(&cpu_data->control_lock);
205 if (cpu_data->init_signaled && !cpu_data->stop_cpu) {
206 x86_enter_wait_for_sipi(cpu_data);
211 cpu_data->cpu_stopped = true;
213 spin_unlock(&cpu_data->control_lock);
215 while (cpu_data->stop_cpu)
218 if (cpu_data->shutdown_cpu) {
220 vmx_cpu_exit(cpu_data);
221 asm volatile("1: hlt; jmp 1b");
224 spin_lock(&cpu_data->control_lock);
226 cpu_data->cpu_stopped = false;
228 if (cpu_data->sipi_vector >= 0) {
229 if (!cpu_data->failed) {
230 cpu_data->wait_for_sipi = false;
231 sipi_vector = cpu_data->sipi_vector;
233 cpu_data->sipi_vector = -1;
235 } while (cpu_data->init_signaled);
237 if (cpu_data->flush_virt_caches) {
238 cpu_data->flush_virt_caches = false;
242 spin_unlock(&cpu_data->control_lock);
247 void x86_exception_handler(struct exception_frame *frame)
249 panic_printk("FATAL: Jailhouse triggered exception #%d\n",
251 if (frame->error != -1)
252 panic_printk("Error code: %x\n", frame->error);
253 panic_printk("Physical CPU ID: %d\n", phys_processor_id());
254 panic_printk("RIP: %p RSP: %p FLAGS: %x\n", frame->rip, frame->rsp,
260 void arch_panic_stop(struct per_cpu *cpu_data)
262 asm volatile("1: hlt; jmp 1b");
263 __builtin_unreachable();
266 void arch_panic_halt(struct per_cpu *cpu_data)
268 spin_lock(&cpu_data->control_lock);
269 x86_enter_wait_for_sipi(cpu_data);
270 spin_unlock(&cpu_data->control_lock);