/*
* Jailhouse, a Linux-based partitioning hypervisor
*
- * Copyright (c) Siemens AG, 2013-2015
+ * Copyright (c) Siemens AG, 2013-2016
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
page_free(&mem_pool, cell->cpu_set, 1);
}
-/**
- * Perform basic validation of cell memory regions.
- * @param config Cell configuration description.
- *
- * @return 0 if the regions are valid, @c -EINVAL if the validation failed.
- *
- * Checks performed on the memory regions are:
- * \li Page alignment of physical and virtual address and the size.
- * \li Use of supported flags only.
- */
-int check_mem_regions(const struct jailhouse_cell_desc *config)
-{
- const struct jailhouse_memory *mem =
- jailhouse_cell_mem_regions(config);
- unsigned int n;
-
- for (n = 0; n < config->num_memory_regions; n++, mem++) {
- if (mem->phys_start & ~PAGE_MASK ||
- mem->virt_start & ~PAGE_MASK ||
- mem->size & ~PAGE_MASK ||
- mem->flags & ~JAILHOUSE_MEM_VALID_FLAGS)
- return trace_error(-EINVAL);
- }
- return 0;
-}
-
/**
* Apply system configuration changes.
* @param cell_added_removed Cell that was added or removed to/from the
static int unmap_from_root_cell(const struct jailhouse_memory *mem)
{
/*
- * arch_unmap_memory_region uses the virtual address of the memory
- * region. As only the root cell has a guaranteed 1:1 mapping, make a
- * copy where we ensure this.
+ * arch_unmap_memory_region and mmio_subpage_unregister use the
+ * virtual address of the memory region for their job. As only the root
+ * cell has a guaranteed 1:1 mapping, make a copy where we ensure this.
*/
struct jailhouse_memory tmp = *mem;
tmp.virt_start = tmp.phys_start;
+
+ if (JAILHOUSE_MEMORY_IS_SUBPAGE(&tmp)) {
+ mmio_subpage_unregister(&root_cell, &tmp);
+ return 0;
+ }
+
return arch_unmap_memory_region(&root_cell, &tmp);
}
static int remap_to_root_cell(const struct jailhouse_memory *mem,
enum failure_mode mode)
{
- const struct jailhouse_memory *root_mem =
- jailhouse_cell_mem_regions(root_cell.config);
+ const struct jailhouse_memory *root_mem;
struct jailhouse_memory overlap;
unsigned int n;
int err = 0;
- for (n = 0; n < root_cell.config->num_memory_regions;
- n++, root_mem++) {
+ for_each_mem_region(root_mem, root_cell.config, n) {
if (address_in_region(mem->phys_start, root_mem)) {
overlap.phys_start = mem->phys_start;
overlap.size = root_mem->size -
overlap.phys_start - root_mem->phys_start;
overlap.flags = root_mem->flags;
- err = arch_map_memory_region(&root_cell, &overlap);
+ if (JAILHOUSE_MEMORY_IS_SUBPAGE(&overlap))
+ err = mmio_subpage_register(&root_cell, &overlap);
+ else
+ err = arch_map_memory_region(&root_cell, &overlap);
if (err) {
if (mode == ABORT_ON_ERROR)
break;
static void cell_destroy_internal(struct per_cpu *cpu_data, struct cell *cell)
{
- const struct jailhouse_memory *mem =
- jailhouse_cell_mem_regions(cell->config);
+ const struct jailhouse_memory *mem;
unsigned int cpu, n;
for_each_cpu(cpu, cell->cpu_set) {
memset(per_cpu(cpu)->stats, 0, sizeof(per_cpu(cpu)->stats));
}
- for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
- /*
- * This cannot fail. The region was mapped as a whole before,
- * thus no hugepages need to be broken up to unmap it.
- */
- arch_unmap_memory_region(cell, mem);
+ for_each_mem_region(mem, cell->config, n) {
+ if (!JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
+ /*
+ * This cannot fail. The region was mapped as a whole
+ * before, thus no hugepages need to be broken up to
+ * unmap it.
+ */
+ arch_unmap_memory_region(cell, mem);
+
if (!(mem->flags & (JAILHOUSE_MEM_COMM_REGION |
JAILHOUSE_MEM_ROOTSHARED)))
remap_to_root_cell(mem, WARN_ON_ERROR);
goto err_resume;
}
- err = check_mem_regions(cfg);
- if (err)
- goto err_resume;
-
cell_pages = PAGES(sizeof(*cell) + cfg_total_size);
cell = page_alloc(&mem_pool, cell_pages);
if (!cell) {
* Unmap the cell's memory regions from the root cell and map them to
* the new cell instead.
*/
- mem = jailhouse_cell_mem_regions(cell->config);
- for (n = 0; n < cell->config->num_memory_regions; n++, mem++) {
+ for_each_mem_region(mem, cell->config, n) {
/*
* Unmap exceptions:
* - the communication region is not backed by root memory
goto err_destroy_cell;
}
- err = arch_map_memory_region(cell, mem);
+ if (JAILHOUSE_MEMORY_IS_SUBPAGE(mem))
+ err = mmio_subpage_register(cell, mem);
+ else
+ err = arch_map_memory_region(cell, mem);
if (err)
goto err_destroy_cell;
}
if (cell->loadable) {
/* unmap all loadable memory regions from the root cell */
- mem = jailhouse_cell_mem_regions(cell->config);
- for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
+ for_each_mem_region(mem, cell->config, n)
if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
err = unmap_from_root_cell(mem);
if (err)
cell->loadable = true;
/* map all loadable memory regions into the root cell */
- mem = jailhouse_cell_mem_regions(cell->config);
- for (n = 0; n < cell->config->num_memory_regions; n++, mem++)
+ for_each_mem_region(mem, cell->config, n)
if (mem->flags & JAILHOUSE_MEM_LOADABLE) {
err = remap_to_root_cell(mem, ABORT_ON_ERROR);
if (err)
*/
void __attribute__((noreturn)) panic_stop(void)
{
+ struct cell *cell = this_cell();
+
panic_printk("Stopping CPU %d (Cell: \"%s\")\n", this_cpu_id(),
- this_cell()->config->name);
+ cell && cell->config ? cell->config->name : "<UNSET>");
if (phys_processor_id() == panic_cpu)
panic_in_progress = 0;