int arch_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem)
{ return -ENOSYS; }
-void arch_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem) {}
+int arch_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem)
+{ return -ENOSYS; }
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *new_cell) {}
void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
void arch_dbg_write(const char *msg) {}
return err;
}
-void arch_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem)
+int arch_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem)
{
- vtd_unmap_memory_region(cell, mem);
- vmx_unmap_memory_region(cell, mem);
+ int err;
+
+ err = vtd_unmap_memory_region(cell, mem);
+ if (!err)
+ return err;
+
+ return vmx_unmap_memory_region(cell, mem);
}
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
int vmx_root_cell_shrink(struct jailhouse_cell_desc *config);
int vmx_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem);
-void vmx_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
+int vmx_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
void vmx_cell_exit(struct cell *cell);
int vmx_cpu_init(struct per_cpu *cpu_data);
int vtd_root_cell_shrink(struct jailhouse_cell_desc *config);
int vtd_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem);
-void vtd_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
+int vtd_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
void vtd_cell_exit(struct cell *cell);
void vtd_shutdown(void);
mem->virt_start, flags, PAGE_MAP_NON_COHERENT);
}
-void vmx_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem)
+int vmx_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem)
{
- /* This cannot fail. The region was mapped as a whole before, thus no
- * hugepages need to be broken up to unmap it. */
- page_map_destroy(&cell->vmx.ept_structs, mem->virt_start, mem->size,
- PAGE_MAP_NON_COHERENT);
+ return page_map_destroy(&cell->vmx.ept_structs, mem->virt_start,
+ mem->size, PAGE_MAP_NON_COHERENT);
}
unsigned long arch_page_map_gphys2phys(struct per_cpu *cpu_data,
PAGE_MAP_COHERENT);
}
-void vtd_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem)
+int vtd_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem)
{
// HACK for QEMU
if (dmar_units == 0)
- return;
+ return 0;
+
+ if (!(mem->flags & JAILHOUSE_MEM_DMA))
+ return 0;
- if (mem->flags & JAILHOUSE_MEM_DMA)
- /* This cannot fail. The region was mapped as a whole before,
- * thus no hugepages need to be broken up to unmap it. */
- page_map_destroy(&cell->vtd.pg_structs, mem->virt_start,
- mem->size, PAGE_MAP_COHERENT);
+ return page_map_destroy(&cell->vtd.pg_structs, mem->virt_start,
+ mem->size, PAGE_MAP_COHERENT);
}
static bool
mem = jailhouse_cell_mem_regions(cell->config);
for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
+ /*
+ * This cannot fail. The region was mapped as a whole before,
+ * thus no hugepages need to be broken up to unmap it.
+ */
arch_unmap_memory_region(cell, mem);
if (!(mem->flags & JAILHOUSE_MEM_COMM_REGION))
remap_to_root_cell(mem);
int arch_map_memory_region(struct cell *cell,
const struct jailhouse_memory *mem);
-void arch_unmap_memory_region(struct cell *cell,
- const struct jailhouse_memory *mem);
+int arch_unmap_memory_region(struct cell *cell,
+ const struct jailhouse_memory *mem);
int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell);
void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell);