]> rtime.felk.cvut.cz Git - jailhouse.git/blob - hypervisor/arch/arm/mmu_cell.c
arm: setup stage 2 MMU for the cells
[jailhouse.git] / hypervisor / arch / arm / mmu_cell.c
1 /*
2  * Jailhouse, a Linux-based partitioning hypervisor
3  *
4  * Copyright (c) ARM Limited, 2014
5  *
6  * Authors:
7  *  Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  */
12
13 #include <jailhouse/control.h>
14 #include <jailhouse/paging.h>
15 #include <jailhouse/printk.h>
16 #include <asm/sysregs.h>
17 #include <asm/control.h>
18
19 int arch_map_memory_region(struct cell *cell,
20                            const struct jailhouse_memory *mem)
21 {
22         u64 phys_start = mem->phys_start;
23         u32 flags = PTE_FLAG_VALID | PTE_ACCESS_FLAG;
24
25         if (mem->flags & JAILHOUSE_MEM_READ)
26                 flags |= S2_PTE_ACCESS_RO;
27         if (mem->flags & JAILHOUSE_MEM_WRITE)
28                 flags |= S2_PTE_ACCESS_WO;
29         /*
30          * `DMA' may be a bit misleading here: it is used to define MMIO regions
31          */
32         if (mem->flags & JAILHOUSE_MEM_DMA)
33                 flags |= S2_PTE_FLAG_DEVICE;
34         else
35                 flags |= S2_PTE_FLAG_NORMAL;
36         if (mem->flags & JAILHOUSE_MEM_COMM_REGION)
37                 phys_start = paging_hvirt2phys(&cell->comm_page);
38         /*
39         if (!(mem->flags & JAILHOUSE_MEM_EXECUTE))
40                 flags |= S2_PAGE_ACCESS_XN;
41         */
42
43         return paging_create(&cell->arch.mm, phys_start, mem->size,
44                 mem->virt_start, flags, PAGING_NON_COHERENT);
45 }
46
47 int arch_unmap_memory_region(struct cell *cell,
48                              const struct jailhouse_memory *mem)
49 {
50         return paging_destroy(&cell->arch.mm, mem->virt_start, mem->size,
51                         PAGING_NON_COHERENT);
52 }
53
54 unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
55                                      unsigned long gphys, unsigned long flags)
56 {
57         /* Translate IPA->PA */
58         return paging_virt2phys(&cpu_data->cell->arch.mm, gphys, flags);
59 }
60
61 int arch_mmu_cell_init(struct cell *cell)
62 {
63         cell->arch.mm.root_paging = hv_paging;
64         cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
65         if (!cell->arch.mm.root_table)
66                 return -ENOMEM;
67
68         return 0;
69 }
70
71 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
72 {
73         struct cell *cell = cpu_data->cell;
74         unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
75         u64 vttbr = 0;
76         u32 vtcr = T0SZ
77                 | SL0 << TCR_SL0_SHIFT
78                 | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
79                 | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
80                 | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
81                 | VTCR_RES1;
82
83         if (cell->id > 0xff) {
84                 panic_printk("No cell ID available\n");
85                 return -E2BIG;
86         }
87         vttbr |= (u64)cell->id << VTTBR_VMID_SHIFT;
88         vttbr |= (u64)(cell_table & TTBR_MASK);
89
90         arm_write_sysreg(VTTBR_EL2, vttbr);
91         arm_write_sysreg(VTCR_EL2, vtcr);
92
93         isb();
94         /*
95          * Invalidate all stage-1 and 2 TLB entries for the current VMID
96          * ERET will ensure completion of these ops
97          */
98         arm_write_sysreg(TLBIALL, 1);
99
100         return 0;
101 }