void arch_suspend_cpu(unsigned int cpu_id) {}
void arch_resume_cpu(unsigned int cpu_id) {}
void arch_reset_cpu(unsigned int cpu_id) {}
+void arch_park_cpu(unsigned int cpu_id) {}
void arch_shutdown_cpu(unsigned int cpu_id) {}
int arch_cell_create(struct per_cpu *cpu_data, struct cell *new_cell)
{ return -ENOSYS; }
+void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *new_cell) {}
void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
void arch_dbg_write(const char *msg) {}
arch_resume_cpu(cpu_id);
}
+void arch_park_cpu(unsigned int cpu_id)
+{
+ per_cpu(cpu_id)->init_signaled = true;
+
+ /* make state change visible before signaling the CPU */
+ memory_barrier();
+
+ apic_send_nmi_ipi(per_cpu(cpu_id));
+}
+
void arch_shutdown_cpu(unsigned int cpu_id)
{
arch_suspend_cpu(cpu_id);
flush_linux_cpu_caches(cpu_data);
return vmx_cell_init(cell);
}
+
+void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
+{
+ vmx_cell_exit(cell);
+ flush_linux_cpu_caches(cpu_data);
+}
int vmx_cell_init(struct cell *cell);
void vmx_cell_shrink(struct cell *cell, struct jailhouse_cell_desc *config);
+void vmx_cell_exit(struct cell *cell);
int vmx_cpu_init(struct per_cpu *cpu_data);
void vmx_cpu_exit(struct per_cpu *cpu_data);
vmx_invept();
}
+static bool address_in_region(unsigned long addr,
+ struct jailhouse_memory *region)
+{
+ return addr >= region->phys_start &&
+ addr < (region->phys_start + region->size);
+}
+
+static void vmx_remap_to_linux(struct jailhouse_memory *mem)
+{
+ struct jailhouse_memory *linux_mem, overlap;
+ int n, err;
+
+ linux_mem = (void *)linux_cell.config +
+ sizeof(struct jailhouse_cell_desc) +
+ linux_cell.config->cpu_set_size;
+
+ for (n = 0; n < linux_cell.config->num_memory_regions;
+ n++, linux_mem++) {
+ if (address_in_region(mem->phys_start, linux_mem)) {
+ overlap.phys_start = mem->phys_start;
+ overlap.size = linux_mem->size -
+ (overlap.phys_start - linux_mem->phys_start);
+ if (overlap.size > mem->size)
+ overlap.size = mem->size;
+ } else if (address_in_region(linux_mem->phys_start, mem)) {
+ overlap.phys_start = linux_mem->phys_start;
+ overlap.size = mem->size -
+ (overlap.phys_start - mem->phys_start);
+ if (overlap.size > linux_mem->size)
+ overlap.size = linux_mem->size;
+ } else
+ continue;
+
+ overlap.virt_start = linux_mem->virt_start +
+ overlap.phys_start - linux_mem->phys_start;
+ overlap.access_flags = linux_mem->access_flags;
+
+ err = vmx_map_memory_region(&linux_cell, &overlap);
+ if (err)
+ printk("WARNING: Failed to re-assign memory region "
+ "to Linux cell\n");
+ }
+}
+
+void vmx_cell_exit(struct cell *cell)
+{
+ struct jailhouse_cell_desc *config = cell->config;
+ u8 *pio_bitmap, *linux_pio_bitmap, *b;
+ struct jailhouse_memory *mem;
+ u32 pio_bitmap_size;
+ int n;
+
+ mem = (void *)config + sizeof(struct jailhouse_cell_desc) +
+ config->cpu_set_size;
+
+ for (n = 0; n < config->num_memory_regions; n++, mem++) {
+ page_map_destroy(cell->vmx.ept, mem->virt_start, mem->size,
+ PAGE_DIR_LEVELS);
+ vmx_remap_to_linux(mem);
+ }
+ page_map_destroy(cell->vmx.ept, XAPIC_BASE, PAGE_SIZE,
+ PAGE_DIR_LEVELS);
+
+ pio_bitmap = (void *)mem +
+ config->num_irq_lines * sizeof(struct jailhouse_irq_line);
+ pio_bitmap_size = config->pio_bitmap_size;
+
+ linux_pio_bitmap = (void *)linux_cell.config +
+ sizeof(struct jailhouse_cell_desc) +
+ linux_cell.config->cpu_set_size +
+ linux_cell.config->num_memory_regions *
+ sizeof(struct jailhouse_memory) +
+ linux_cell.config->num_irq_lines *
+ sizeof(struct jailhouse_irq_line);
+ if (linux_cell.config->pio_bitmap_size < pio_bitmap_size)
+ pio_bitmap_size = linux_cell.config->pio_bitmap_size;
+
+ for (b = linux_cell.vmx.io_bitmap; pio_bitmap_size > 0;
+ b++, pio_bitmap++, linux_pio_bitmap++, pio_bitmap_size--)
+ *b &= *pio_bitmap | *linux_pio_bitmap;
+
+ page_free(&mem_pool, cell->vmx.ept, 1);
+}
+
void vmx_invept(void)
{
unsigned long ept_cap = read_msr(MSR_IA32_VMX_EPT_VPID_CAP);
int cell_destroy(struct per_cpu *cpu_data, unsigned long name_address)
{
- return -ENOSYS;
+ unsigned long mapping_addr = FOREIGN_MAPPING_BASE +
+ cpu_data->cpu_id * PAGE_SIZE * NUM_FOREIGN_PAGES;
+ struct cell *cell, *previous;
+ unsigned long name_size;
+ const char *name;
+ unsigned int cpu;
+ int err = 0;
+
+ // TODO: access control
+
+ /* We do not support destruction over non-Linux cells so far */
+ if (cpu_data->cell != &linux_cell)
+ return -EINVAL;
+
+ cell_suspend(cpu_data);
+
+ name_size = (name_address & ~PAGE_MASK) + JAILHOUSE_CELL_NAME_MAXLEN;
+
+ err = page_map_create(hv_page_table, name_address & PAGE_MASK,
+ name_size, mapping_addr, PAGE_READONLY_FLAGS,
+ PAGE_DEFAULT_FLAGS, PAGE_DIR_LEVELS);
+ if (err)
+ goto resume_out;
+
+ name = (const char *)(mapping_addr + (name_address & ~PAGE_MASK));
+
+ cell = cell_find(name);
+ if (!cell) {
+ err = -ENOENT;
+ goto resume_out;
+ }
+
+ /* Linux cell cannot be destroyed */
+ if (cell == &linux_cell) {
+ err = -EINVAL;
+ goto resume_out;
+ }
+
+ printk("Closing cell \"%s\"\n", name);
+
+ for_each_cpu(cpu, cell->cpu_set) {
+ printk(" Parking CPU %d\n", cpu);
+ arch_park_cpu(cpu);
+
+ set_bit(cpu, linux_cell.cpu_set->bitmap);
+ per_cpu(cpu)->cell = &linux_cell;
+ }
+
+ arch_cell_destroy(cpu_data, cell);
+
+ previous = &linux_cell;
+ while (previous->next != cell)
+ previous = previous->next;
+ previous->next = cell->next;
+
+ page_free(&mem_pool, cell, cell->data_pages);
+ page_map_dump_stats("after cell destruction");
+
+resume_out:
+ cell_resume(cpu_data);
+
+ return err;
}
int shutdown(struct per_cpu *cpu_data)
void arch_suspend_cpu(unsigned int cpu_id);
void arch_resume_cpu(unsigned int cpu_id);
void arch_reset_cpu(unsigned int cpu_id);
+void arch_park_cpu(unsigned int cpu_id);
void arch_shutdown_cpu(unsigned int cpu_id);
-int arch_cell_create(struct per_cpu *cpu_data, struct cell *new_cell);
+int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell);
+void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell);
#include <jailhouse/cell-config.h>
+#define ENOENT 2
#define EIO 5
#define E2BIG 7
#define ENOMEM 12