]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/mmu_cell.c
d3031def7cfe98f487232d701040b7126a0ae6d9
[jailhouse.git] / hypervisor / arch / arm / mmu_cell.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/paging.h>
15 #include <jailhouse/printk.h>
16 #include <asm/sysregs.h>
17 #include <asm/control.h>
18
19 int arch_map_memory_region(struct cell *cell,
20                            const struct jailhouse_memory *mem)
21 {
22         u64 phys_start = mem->phys_start;
23         u32 flags = PTE_FLAG_VALID | PTE_ACCESS_FLAG;
24
25         if (mem->flags & JAILHOUSE_MEM_READ)
26                 flags |= S2_PTE_ACCESS_RO;
27         if (mem->flags & JAILHOUSE_MEM_WRITE)
28                 flags |= S2_PTE_ACCESS_WO;
29         if (mem->flags & JAILHOUSE_MEM_IO)
30                 flags |= S2_PTE_FLAG_DEVICE;
31         else
32                 flags |= S2_PTE_FLAG_NORMAL;
33         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
34                 phys_start = paging_hvirt2phys(&cell->comm_page);
35         /*
36         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
37                 flags |= S2_PAGE_ACCESS_XN;
38         */
39
40         return paging_create(&cell->arch.mm, phys_start, mem->size,
41                 mem->virt_start, flags, PAGING_NON_COHERENT);
42 }
43
44 int arch_unmap_memory_region(struct cell *cell,
45                              const struct jailhouse_memory *mem)
46 {
47         return paging_destroy(&cell->arch.mm, mem->virt_start, mem->size,
48                         PAGING_NON_COHERENT);
49 }
50
51 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
52                                      unsigned long gphys, unsigned long flags)
53 {
54         /* Translate IPA->PA */
55         return paging_virt2phys(&cpu_data->cell->arch.mm, gphys, flags);
56 }
57
58 int arch_mmu_cell_init(struct cell *cell)
59 {
60         cell->arch.mm.root_paging = cell_paging;
61         cell->arch.mm.root_table =
62                 page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
63
64         if (!cell->arch.mm.root_table)
65                 return -ENOMEM;
66
67         return 0;
68 }
69
70 void arch_mmu_cell_destroy(struct cell *cell)
71 {
72         page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
73 }
74
75 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
76 {
77         struct cell *cell = cpu_data->cell;
78         unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
79         u64 vttbr = 0;
80         u32 vtcr = VTCR_CELL;
81
82         if (cell->id > 0xff) {
83                 panic_printk("No cell ID available\n");
84                 return -E2BIG;
85         }
86         vttbr |= (u64)cell->id << VTTBR_VMID_SHIFT;
87         vttbr |= (u64)(cell_table & TTBR_MASK);
88
89         arm_write_sysreg(VTTBR_EL2, vttbr);
90         arm_write_sysreg(VTCR_EL2, vtcr);
91
92         /* Ensure that the new VMID is present before flushing the caches */
93         isb();
94         /*
95          * At initialisation, arch_config_commit does not act on other CPUs,
96          * since they register themselves to the root cpu_set afterwards. It
97          * means that this unconditionnal flush is redundant on master CPU.
98          */
99         arch_cpu_tlb_flush(cpu_data);
100
101         return 0;
102 }
103
104 void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
105 {
106         /*
107          * Invalidate all stage-1 and 2 TLB entries for the current VMID
108          * ERET will ensure completion of these ops
109          */
110         arm_write_sysreg(TLBIALL, 1);
111         dsb(nsh);
112         cpu_data->flush_vcpu_caches = false;
113 }
114
115 void arch_cell_caches_flush(struct cell *cell)
116 {
117         /* Only the first CPU needs to clean the data caches */
118         spin_lock(&cell->arch.caches_lock);
119         if (cell->arch.needs_flush) {
120                 /*
121                  * Since there is no way to know which virtual addresses have been used
122                  * by the root cell to write the new cell's data, a complete clean has
123                  * to be performed.
124                  */
125                 arch_cpu_dcaches_flush(CACHES_CLEAN_INVALIDATE);
126                 cell->arch.needs_flush = false;
127         }
128         spin_unlock(&cell->arch.caches_lock);
129
130         /*
131          * New instructions may have been written, so the I-cache needs to be
132          * invalidated even though the VMID is different.
133          * A complete invalidation is the only way to ensure all virtual aliases
134          * of these memory locations are invalidated, whatever the cache type.
135          */
136         arch_cpu_icache_flush();
137
138         /* ERET will ensure context synchronization */
139 }