]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
core: Introduce arch_config_commit
authorJan Kiszka <jan.kiszka@siemens.com>
Fri, 6 Jun 2014 09:08:03 +0000 (11:08 +0200)
committerJan Kiszka <jan.kiszka@siemens.com>
Sun, 15 Jun 2014 05:08:50 +0000 (07:08 +0200)
This function allows us to consistently flush affected caches after
configuration changes. We did this after cell creation, partially did it
after destruction, but forgot about it on load/start. Flushing is now
extended to the CPU performing the changes as well as all CPUs of a
created or destroyed cell.

This change also enables the split-up of IOMMU activation and related
root cell and memory region mapping setup, a precondition for generic
memory region mapping.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/arm/setup.c
hypervisor/arch/x86/control.c
hypervisor/arch/x86/include/asm/vtd.h
hypervisor/arch/x86/vtd.c
hypervisor/control.c
hypervisor/include/jailhouse/control.h
hypervisor/setup.c

index b474a7006491376a68921c76a4f416ea90ae6fa3..a282685173e1fcf114b57c2833f947d3288f24be 100644 (file)
@@ -58,6 +58,8 @@ int arch_unmap_memory_region(struct cell *cell,
                             const struct jailhouse_memory *mem)
 { return -ENOSYS; }
 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *new_cell) {}
+void arch_config_commit(struct per_cpu *cpu_data,
+                       struct cell *cell_added_removed) {}
 void *memcpy(void *dest, const void *src, unsigned long n) { return NULL; }
 void arch_dbg_write(const char *msg) {}
 void arch_shutdown(void) {}
index 90b53f1e5161f6a8558e2b0f8ad9f7915281bd0d..57cc3e176bab6a98ff6721a24ca4fc0a5b2044b2 100644 (file)
@@ -28,15 +28,6 @@ struct exception_frame {
        u64 ss;
 };
 
-/* all cell CPUs (except cpu_data) have to be stopped */
-static void flush_root_cell_cpu_caches(struct per_cpu *cpu_data)
-{
-       unsigned int cpu;
-
-       for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
-               per_cpu(cpu)->flush_caches = true;
-}
-
 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
 {
        int err;
@@ -44,7 +35,6 @@ int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell)
        err = vmx_cell_init(cell);
        if (err)
                return err;
-       flush_root_cell_cpu_caches(cpu_data);
 
        err = vtd_cell_init(cell);
        if (err)
@@ -84,7 +74,26 @@ void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell)
 {
        vtd_cell_exit(cell);
        vmx_cell_exit(cell);
-       flush_root_cell_cpu_caches(cpu_data);
+}
+
+/* all root cell CPUs (except cpu_data) have to be stopped */
+void arch_config_commit(struct per_cpu *cpu_data,
+                       struct cell *cell_added_removed)
+{
+       unsigned int cpu;
+
+       for_each_cpu_except(cpu, root_cell.cpu_set, cpu_data->cpu_id)
+               per_cpu(cpu)->flush_caches = true;
+
+       if (cell_added_removed)
+               for_each_cpu_except(cpu, cell_added_removed->cpu_set,
+                                   cpu_data->cpu_id)
+                       per_cpu(cpu)->flush_caches = true;
+
+       x86_tlb_flush_all();
+       vmx_invept();
+
+       vtd_config_commit(cell_added_removed);
 }
 
 void arch_shutdown(void)
index 662fda1f84c0f9679db6dd852cb6fd2b11fbd80e..133c0fccaa2f4779d3848a6ac4f6b7f11cfe01da 100644 (file)
@@ -143,6 +143,8 @@ int vtd_unmap_memory_region(struct cell *cell,
                            const struct jailhouse_memory *mem);
 void vtd_cell_exit(struct cell *cell);
 
+void vtd_config_commit(struct cell *cell_added_removed);
+
 void vtd_shutdown(void);
 
 void vtd_check_pending_faults(struct per_cpu *cpu_data);
index 7f629582e4c2231c86d013e7bda99c70d1b2ed25..e2b7aff5dc65533678fc333244d4e4fefa316ac9 100644 (file)
@@ -379,7 +379,6 @@ int vtd_cell_init(struct cell *cell)
                jailhouse_cell_mem_regions(cell->config);
        const struct jailhouse_pci_device *dev =
                jailhouse_cell_pci_devices(cell->config);
-       void *reg_base = dmar_reg_base;
        int n, err;
 
        // HACK for QEMU
@@ -409,28 +408,8 @@ int vtd_cell_init(struct cell *cell)
                        return -ENOMEM;
        }
 
-       vtd_flush_domain_caches(root_cell.id);
-
        vtd_init_fault_nmi();
 
-       if (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
-               for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
-                       mmio_write64(reg_base + VTD_RTADDR_REG,
-                                    page_map_hvirt2phys(root_entry_table));
-                       mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
-                       while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
-                                VTD_GSTS_SRTP))
-                               cpu_relax();
-
-                       vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
-                                             VTD_IOTLB_IIRG_GLOBAL);
-
-                       mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
-                       while (!(mmio_read32(reg_base + VTD_GSTS_REG) &
-                                VTD_GSTS_TES))
-                               cpu_relax();
-               }
-
        return 0;
 }
 
@@ -503,10 +482,39 @@ void vtd_cell_exit(struct cell *cell)
                               "root cell\n");
        }
 
-       vtd_flush_domain_caches(cell->id);
+       page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1);
+}
+
+void vtd_config_commit(struct cell *cell_added_removed)
+{
+       void *reg_base = dmar_reg_base;
+       int n;
+
+       // HACK for QEMU
+       if (dmar_units == 0)
+               return;
+
+       if (cell_added_removed)
+               vtd_flush_domain_caches(cell_added_removed->id);
        vtd_flush_domain_caches(root_cell.id);
 
-       page_free(&mem_pool, cell->vtd.pg_structs.root_table, 1);
+       if (mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES)
+               return;
+
+       for (n = 0; n < dmar_units; n++, reg_base += PAGE_SIZE) {
+               mmio_write64(reg_base + VTD_RTADDR_REG,
+                            page_map_hvirt2phys(root_entry_table));
+               mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_SRTP);
+               while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_SRTP))
+                       cpu_relax();
+
+               vtd_flush_dmar_caches(reg_base, VTD_CCMD_CIRG_GLOBAL,
+                                     VTD_IOTLB_IIRG_GLOBAL);
+
+               mmio_write32(reg_base + VTD_GCMD_REG, VTD_GCMD_TE);
+               while (!(mmio_read32(reg_base + VTD_GSTS_REG) & VTD_GSTS_TES))
+                       cpu_relax();
+       }
 }
 
 void vtd_shutdown(void)
index f4af3013d087ed1aac57b328ea4187ebcdbc146c..8dd8a41aa0f612bda5abd8096ace1ad13f078f65 100644 (file)
@@ -284,6 +284,8 @@ static void cell_destroy_internal(struct per_cpu *cpu_data, struct cell *cell)
        }
 
        arch_cell_destroy(cpu_data, cell);
+
+       arch_config_commit(cpu_data, cell);
 }
 
 static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
@@ -393,6 +395,8 @@ static int cell_create(struct per_cpu *cpu_data, unsigned long config_address)
        if (err)
                goto err_restore_root;
 
+       arch_config_commit(cpu_data, cell);
+
        cell->comm_page.comm_region.cell_state = JAILHOUSE_CELL_SHUT_DOWN;
 
        last = &root_cell;
@@ -422,6 +426,7 @@ err_restore_root:
                remap_to_root_cell(mem, WARN_ON_ERROR);
        for_each_cpu(cpu, cell->cpu_set)
                set_bit(cpu, root_cell.cpu_set->bitmap);
+       arch_config_commit(cpu_data, NULL);
 err_free_cpu_set:
        destroy_cpu_set(cell);
 err_free_cell:
@@ -495,6 +500,9 @@ static int cell_start(struct per_cpu *cpu_data, unsigned long id)
                                if (err)
                                        goto out_resume;
                        }
+
+               arch_config_commit(cpu_data, NULL);
+
                cell->loadable = false;
        }
 
@@ -546,6 +554,8 @@ static int cell_set_loadable(struct per_cpu *cpu_data, unsigned long id)
                                goto out_resume;
                }
 
+       arch_config_commit(cpu_data, NULL);
+
        printk("Cell \"%s\" can be loaded\n", cell->config->name);
 
 out_resume:
index b2c68a9ea4b22a9ee50a67a92c8c6abcc96f863b..a4cd4d65a7bc8dabd4acefbfc9621fb4e2931d2f 100644 (file)
@@ -59,6 +59,9 @@ int arch_unmap_memory_region(struct cell *cell,
 int arch_cell_create(struct per_cpu *cpu_data, struct cell *cell);
 void arch_cell_destroy(struct per_cpu *cpu_data, struct cell *cell);
 
+void arch_config_commit(struct per_cpu *cpu_data,
+                       struct cell *cell_added_removed);
+
 void arch_shutdown(void);
 
 void __attribute__((noreturn)) arch_panic_stop(struct per_cpu *cpu_data);
index 5647885d3f12bfb47b447ec1b390168e09e4fe35..bcc98f115dea850df145710b3622400159665a6f 100644 (file)
@@ -140,12 +140,14 @@ failed:
                error = err;
 }
 
-static void init_late(void)
+static void init_late(struct per_cpu *cpu_data)
 {
        error = arch_init_late();
        if (error)
                return;
 
+       arch_config_commit(cpu_data, NULL);
+
        page_map_dump_stats("after late setup");
        printk("Initializing remaining processors:\n");
 }
@@ -167,7 +169,7 @@ int entry(unsigned int cpu_id, struct per_cpu *cpu_data)
                cpu_init(cpu_data);
 
                if (master && !error)
-                       init_late();
+                       init_late(cpu_data);
        }
 
        spin_unlock(&init_lock);