]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/mmu_cell.c
4885f8ce21d4c771245858e0c86e2c92d0e485e5
[jailhouse.git] / hypervisor / arch / arm / mmu_cell.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/paging.h>
15 #include <jailhouse/printk.h>
16 #include <asm/sysregs.h>
17 #include <asm/control.h>
18
19 int arch_map_memory_region(struct cell *cell,
20                            const struct jailhouse_memory *mem)
21 {
22         u64 phys_start = mem->phys_start;
23         u32 flags = PTE_FLAG_VALID | PTE_ACCESS_FLAG;
24
25         if (mem->flags & JAILHOUSE_MEM_READ)
26                 flags |= S2_PTE_ACCESS_RO;
27         if (mem->flags & JAILHOUSE_MEM_WRITE)
28                 flags |= S2_PTE_ACCESS_WO;
29         if (mem->flags & JAILHOUSE_MEM_IO)
30                 flags |= S2_PTE_FLAG_DEVICE;
31         else
32                 flags |= S2_PTE_FLAG_NORMAL;
33         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
34                 phys_start = paging_hvirt2phys(&cell->comm_page);
35         /*
36         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
37                 flags |= S2_PAGE_ACCESS_XN;
38         */
39
40         return paging_create(&cell->arch.mm, phys_start, mem->size,
41                 mem->virt_start, flags, PAGING_NON_COHERENT);
42 }
43
44 int arch_unmap_memory_region(struct cell *cell,
45                              const struct jailhouse_memory *mem)
46 {
47         return paging_destroy(&cell->arch.mm, mem->virt_start, mem->size,
48                         PAGING_NON_COHERENT);
49 }
50
51 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
52                                      unsigned long gphys, unsigned long flags)
53 {
54         /* Translate IPA->PA */
55         return paging_virt2phys(&cpu_data->cell->arch.mm, gphys, flags);
56 }
57
58 int arch_mmu_cell_init(struct cell *cell)
59 {
60         cell->arch.mm.root_paging = hv_paging;
61         cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
62         if (!cell->arch.mm.root_table)
63                 return -ENOMEM;
64
65         return 0;
66 }
67
68 void arch_mmu_cell_destroy(struct cell *cell)
69 {
70         page_free(&mem_pool, cell->arch.mm.root_table, 1);
71 }
72
73 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
74 {
75         struct cell *cell = cpu_data->cell;
76         unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
77         u64 vttbr = 0;
78         u32 vtcr = T0SZ
79                 | SL0 << TCR_SL0_SHIFT
80                 | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
81                 | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
82                 | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
83                 | VTCR_RES1;
84
85         if (cell->id > 0xff) {
86                 panic_printk("No cell ID available\n");
87                 return -E2BIG;
88         }
89         vttbr |= (u64)cell->id << VTTBR_VMID_SHIFT;
90         vttbr |= (u64)(cell_table & TTBR_MASK);
91
92         arm_write_sysreg(VTTBR_EL2, vttbr);
93         arm_write_sysreg(VTCR_EL2, vtcr);
94
95         /* Ensure that the new VMID is present before flushing the caches */
96         isb();
97         /*
98          * At initialisation, arch_config_commit does not act on other CPUs,
99          * since they register themselves to the root cpu_set afterwards. It
100          * means that this unconditionnal flush is redundant on master CPU.
101          */
102         arch_cpu_tlb_flush(cpu_data);
103
104         return 0;
105 }
106
107 void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
108 {
109         /*
110          * Invalidate all stage-1 and 2 TLB entries for the current VMID
111          * ERET will ensure completion of these ops
112          */
113         arm_write_sysreg(TLBIALL, 1);
114         dsb(nsh);
115         cpu_data->flush_vcpu_caches = false;
116 }
117
118 void arch_cell_caches_flush(struct cell *cell)
119 {
120         /* Only the first CPU needs to clean the data caches */
121         spin_lock(&cell->arch.caches_lock);
122         if (cell->arch.needs_flush) {
123                 /*
124                  * Since there is no way to know which virtual addresses have been used
125                  * by the root cell to write the new cell's data, a complete clean has
126                  * to be performed.
127                  */
128                 arch_cpu_dcaches_flush(CACHES_CLEAN_INVALIDATE);
129                 cell->arch.needs_flush = false;
130         }
131         spin_unlock(&cell->arch.caches_lock);
132
133         /*
134          * New instructions may have been written, so the I-cache needs to be
135          * invalidated even though the VMID is different.
136          * A complete invalidation is the only way to ensure all virtual aliases
137          * of these memory locations are invalidated, whatever the cache type.
138          */
139         arch_cpu_icache_flush();
140
141         /* ERET will ensure context synchronization */
142 }