const struct jailhouse_memory *mem)
{ return -ENOSYS; }
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *new_cell) {}
+void arch_config_commit(struct per_cpu *cpu_data,
+ struct cell *cell_added_removed) {}
void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
void arch_dbg_write(const char *msg) {}
void arch_shutdown(void) {}
u64 ss;
};
-/* all cell CPUs (except cpu_data) have to be stopped */
-static void flush_root_cell_cpu_caches(struct per_cpu *cpu_data)
-{
- unsigned int cpu;
-
- for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
- per_cpu(cpu)->flush_caches = true;
-}
-
int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
{
int err;
err = vmx_cell_init(cell);
if (err)
return err;
- flush_root_cell_cpu_caches(cpu_data);
err = vtd_cell_init(cell);
if (err)
{
vtd_cell_exit(cell);
vmx_cell_exit(cell);
- flush_root_cell_cpu_caches(cpu_data);
+}
+
+/* all root cell CPUs (except cpu_data) have to be stopped */
+void arch_config_commit(struct per_cpu *cpu_data,
+ struct cell *cell_added_removed)
+{
+ unsigned int cpu;
+
+ for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
+ per_cpu(cpu)->flush_caches = true;
+
+ if (cell_added_removed)
+ for_each_cpu_except(cpu, cell_added_removed->cpu_set,
+ cpu_data->cpu_id)
+ per_cpu(cpu)->flush_caches = true;
+
+ x86_tlb_flush_all();
+ vmx_invept();
+
+ vtd_config_commit(cell_added_removed);
}
void arch_shutdown(void)
const struct jailhouse_memory *mem);
void vtd_cell_exit(struct cell *cell);
+void vtd_config_commit(struct cell *cell_added_removed);
+
void vtd_shutdown(void);
void vtd_check_pending_faults(struct per_cpu *cpu_data);
jailhouse_cell_mem_regions(cell->config);
const struct jailhouse_pci_device *dev =
jailhouse_cell_pci_devices(cell->config);
- void *reg_base = dmar_reg_base;
int n, err;
// HACK for QEMU
return -ENOMEM;
}
- vtd_flush_domain_caches(root_cell.id);
-
vtd_init_fault_nmi();
- if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
- for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
- mmio_write64(reg_base + VTD_RTADDR_REG,
- page_map_hvirt2phys(root_entry_table));
- mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
- while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
- VTD_GSTS_SRTP))
- cpu_relax();
-
- vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
- VTD_IOTLB_IIRG_GLOBAL);
-
- mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
- while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
- VTD_GSTS_TES))
- cpu_relax();
- }
-
return 0;
}
"root cell\n");
}
- vtd_flush_domain_caches(cell->id);
+ page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1);
+}
+
+void vtd_config_commit(struct cell *cell_added_removed)
+{
+ void *reg_base = dmar_reg_base;
+ int n;
+
+ // HACK for QEMU
+ if (dmar_units == 0)
+ return;
+
+ if (cell_added_removed)
+ vtd_flush_domain_caches(cell_added_removed->id);
vtd_flush_domain_caches(root_cell.id);
- page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1);
+ if (mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)
+ return;
+
+ for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
+ mmio_write64(reg_base + VTD_RTADDR_REG,
+ page_map_hvirt2phys(root_entry_table));
+ mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
+ while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_SRTP))
+ cpu_relax();
+
+ vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
+ VTD_IOTLB_IIRG_GLOBAL);
+
+ mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
+ while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
+ cpu_relax();
+ }
}
void vtd_shutdown(void)
}
arch_cell_destroy(cpu_data, cell);
+
+ arch_config_commit(cpu_data, cell);
}
static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
if (err)
goto err_restore_root;
+ arch_config_commit(cpu_data, cell);
+
cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
last = &root_cell;
remap_to_root_cell(mem, WARN_ON_ERROR);
for_each_cpu(cpu, cell->cpu_set)
set_bit(cpu, root_cell.cpu_set->bitmap);
+ arch_config_commit(cpu_data, NULL);
err_free_cpu_set:
destroy_cpu_set(cell);
err_free_cell:
if (err)
goto out_resume;
}
+
+ arch_config_commit(cpu_data, NULL);
+
cell->loadable = false;
}
goto out_resume;
}
+ arch_config_commit(cpu_data, NULL);
+
printk("Cell \"%s\" can be loaded\n", cell->config->name);
out_resume:
int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell);
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell);
+void arch_config_commit(struct per_cpu *cpu_data,
+ struct cell *cell_added_removed);
+
void arch_shutdown(void);
void __attribute__((noreturn)) arch_panic_stop(struct per_cpu *cpu_data);
error = err;
}
-static void init_late(void)
+static void init_late(struct per_cpu *cpu_data)
{
error = arch_init_late();
if (error)
return;
+ arch_config_commit(cpu_data, NULL);
+
page_map_dump_stats("after late setup");
printk("Initializing remaining processors:\n");
}
cpu_init(cpu_data);
if (master && !error)
- init_late();
+ init_late(cpu_data);
}
spin_unlock(&init_lock);