]> rtime.felk.cvut.cz Git - jailhouse.git/blobdiff - hypervisor/arch/arm/mmu_cell.c
arm: put the value of VTCR for cells in a define
[jailhouse.git] / hypervisor / arch / arm / mmu_cell.c
index 90e06543e6971535520bf34f301ec28d4b6071de..d3031def7cfe98f487232d701040b7126a0ae6d9 100644 (file)
@@ -26,10 +26,7 @@ int arch_map_memory_region(struct cell *cell,
                flags |= S2_PTE_ACCESS_RO;
        if (mem->flags & JAILHOUSE_MEM_WRITE)
                flags |= S2_PTE_ACCESS_WO;
-       /*
-        * `DMA' may be a bit misleading here: it is used to define MMIO regions
-        */
-       if (mem->flags & JAILHOUSE_MEM_DMA)
+       if (mem->flags & JAILHOUSE_MEM_IO)
                flags |= S2_PTE_FLAG_DEVICE;
        else
                flags |= S2_PTE_FLAG_NORMAL;
@@ -60,8 +57,10 @@ unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
 
 int arch_mmu_cell_init(struct cell *cell)
 {
-       cell->arch.mm.root_paging = hv_paging;
-       cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+       cell->arch.mm.root_paging = cell_paging;
+       cell->arch.mm.root_table =
+               page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
        if (!cell->arch.mm.root_table)
                return -ENOMEM;
 
@@ -70,7 +69,7 @@ int arch_mmu_cell_init(struct cell *cell)
 
 void arch_mmu_cell_destroy(struct cell *cell)
 {
-       page_free(&mem_pool, cell->arch.mm.root_table, 1);
+       page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
 }
 
 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
@@ -78,12 +77,7 @@ int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
        struct cell *cell = cpu_data->cell;
        unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
        u64 vttbr = 0;
-       u32 vtcr = T0SZ
-               | SL0 << TCR_SL0_SHIFT
-               | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
-               | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
-               | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
-               | VTCR_RES1;
+       u32 vtcr = VTCR_CELL;
 
        if (cell->id > 0xff) {
                panic_printk("No cell ID available\n");
@@ -95,14 +89,27 @@ int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
        arm_write_sysreg(VTTBR_EL2, vttbr);
        arm_write_sysreg(VTCR_EL2, vtcr);
 
+       /* Ensure that the new VMID is present before flushing the caches */
        isb();
+       /*
+        * At initialisation, arch_config_commit does not act on other CPUs,
+        * since they register themselves to the root cpu_set afterwards. It
+        * means that this unconditionnal flush is redundant on master CPU.
+        */
+       arch_cpu_tlb_flush(cpu_data);
+
+       return 0;
+}
+
+void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
+{
        /*
         * Invalidate all stage-1 and 2 TLB entries for the current VMID
         * ERET will ensure completion of these ops
         */
        arm_write_sysreg(TLBIALL, 1);
-
-       return 0;
+       dsb(nsh);
+       cpu_data->flush_vcpu_caches = false;
 }
 
 void arch_cell_caches_flush(struct cell *cell)