2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) ARM Limited, 2014
7 * Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
13 #include <jailhouse/control.h>
14 #include <jailhouse/paging.h>
15 #include <jailhouse/printk.h>
16 #include <asm/sysregs.h>
17 #include <asm/control.h>
19 int arch_map_memory_region(struct cell *cell,
20 const struct jailhouse_memory *mem)
22 u64 phys_start = mem->phys_start;
23 u32 flags = PTE_FLAG_VALID | PTE_ACCESS_FLAG;
25 if (mem->flags & JAILHOUSE_MEM_READ)
26 flags |= S2_PTE_ACCESS_RO;
27 if (mem->flags & JAILHOUSE_MEM_WRITE)
28 flags |= S2_PTE_ACCESS_WO;
29 if (mem->flags & JAILHOUSE_MEM_IO)
30 flags |= S2_PTE_FLAG_DEVICE;
32 flags |= S2_PTE_FLAG_NORMAL;
33 if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
34 phys_start = paging_hvirt2phys(&cell->comm_page);
36 if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
37 flags |= S2_PAGE_ACCESS_XN;
40 return paging_create(&cell->arch.mm, phys_start, mem->size,
41 mem->virt_start, flags, PAGING_NON_COHERENT);
44 int arch_unmap_memory_region(struct cell *cell,
45 const struct jailhouse_memory *mem)
47 return paging_destroy(&cell->arch.mm, mem->virt_start, mem->size,
51 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
52 unsigned long gphys, unsigned long flags)
54 /* Translate IPA->PA */
55 return paging_virt2phys(&cpu_data->cell->arch.mm, gphys, flags);
58 int arch_mmu_cell_init(struct cell *cell)
60 cell->arch.mm.root_paging = cell_paging;
61 cell->arch.mm.root_table =
62 page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
64 if (!cell->arch.mm.root_table)
70 void arch_mmu_cell_destroy(struct cell *cell)
72 page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
75 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
77 struct cell *cell = cpu_data->cell;
78 unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
82 if (cell->id > 0xff) {
83 panic_printk("No cell ID available\n");
86 vttbr |= (u64)cell->id << VTTBR_VMID_SHIFT;
87 vttbr |= (u64)(cell_table & TTBR_MASK);
89 arm_write_sysreg(VTTBR_EL2, vttbr);
90 arm_write_sysreg(VTCR_EL2, vtcr);
92 /* Ensure that the new VMID is present before flushing the caches */
95 * At initialisation, arch_config_commit does not act on other CPUs,
96 * since they register themselves to the root cpu_set afterwards. It
97 * means that this unconditionnal flush is redundant on master CPU.
99 arch_cpu_tlb_flush(cpu_data);
104 void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
107 * Invalidate all stage-1 and 2 TLB entries for the current VMID
108 * ERET will ensure completion of these ops
110 arm_write_sysreg(TLBIALL, 1);
112 cpu_data->flush_vcpu_caches = false;
115 void arch_cell_caches_flush(struct cell *cell)
117 /* Only the first CPU needs to clean the data caches */
118 spin_lock(&cell->arch.caches_lock);
119 if (cell->arch.needs_flush) {
121 * Since there is no way to know which virtual addresses have been used
122 * by the root cell to write the new cell's data, a complete clean has
125 arch_cpu_dcaches_flush(CACHES_CLEAN_INVALIDATE);
126 cell->arch.needs_flush = false;
128 spin_unlock(&cell->arch.caches_lock);
131 * New instructions may have been written, so the I-cache needs to be
132 * invalidated even though the VMID is different.
133 * A complete invalidation is the only way to ensure all virtual aliases
134 * of these memory locations are invalidated, whatever the cache type.
136 arch_cpu_icache_flush();
138 /* ERET will ensure context synchronization */