]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
arm: prepare port for 48 bit PARange support
authorAntonios Motakis <antonios.motakis@huawei.com>
Thu, 12 May 2016 13:00:59 +0000 (15:00 +0200)
committerJan Kiszka <jan.kiszka@siemens.com>
Sun, 26 Jun 2016 07:16:28 +0000 (09:16 +0200)
We currently support 3 levels of page tables for a 39 bits PA range
on ARM. This patch implements support for 4 level page tables,
and 3 level page tables with a concatenated level 1 root page
table.

On AArch32 we stick with the current restriction of building for
a 39 bit physical address space; however this change will allow
us to support a 40 to 48 bit PARange on AArch64.

Signed-off-by: Antonios Motakis <antonios.motakis@huawei.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/arm/include/asm/paging.h
hypervisor/arch/arm/include/asm/paging_modes.h
hypervisor/arch/arm/mmu_cell.c
hypervisor/arch/arm/paging.c

index 28ba3e0aedbac02a146846fd3c84f1e639968836..98fc343e030b506b043107062fdbbb232fc25d92 100644 (file)
  * by IPA[20:12].
  * This would allows to cover a 4GB memory map by using 4 concatenated level-2
  * page tables and thus provide better table walk performances.
- * For the moment, the core doesn't allow to use concatenated pages, so we will
- * use three levels instead, starting at level 1.
+ * For the moment, we will implement the first level for AArch32 using only
+ * one level.
  *
- * TODO: add a "u32 concatenated" field to the paging struct
+ * TODO: implement larger PARange support for AArch32
  */
+#define ARM_CELL_ROOT_PT_SZ    1
+
 #if MAX_PAGE_TABLE_LEVELS < 3
 #define T0SZ                   0
 #define SL0                    0
 
 typedef u64 *pt_entry_t;
 
+extern unsigned int cpu_parange;
+
+/* return the bits supported for the physical address range for this
+ * machine; in arch_paging_init this value will be kept in
+ * cpu_parange for later reference */
+static inline unsigned int get_cpu_parange(void)
+{
+       /* TODO: implement proper PARange support on AArch32 */
+       return 39;
+}
+
 /* Only executed on hypervisor paging struct changes */
 static inline void arch_paging_flush_page_tlbs(unsigned long page_addr)
 {
index 72950eb848f52951b353a81c0aa8886afed17253..6634f9fcadf774931105ccc4d61318070696295c 100644 (file)
@@ -15,8 +15,7 @@
 #include <jailhouse/paging.h>
 
 /* Long-descriptor paging */
-extern const struct paging arm_paging[];
-
-#define hv_paging      arm_paging
+extern const struct paging *hv_paging;
+extern const struct paging *cell_paging;
 
 #endif /* !__ASSEMBLY__ */
index 4885f8ce21d4c771245858e0c86e2c92d0e485e5..fb5ad835e502d309b0deafb53b38b778343b99b7 100644 (file)
@@ -57,8 +57,10 @@ unsigned long arch_paging_gphys2phys(struct per_cpu *cpu_data,
 
 int arch_mmu_cell_init(struct cell *cell)
 {
-       cell->arch.mm.root_paging = hv_paging;
-       cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+       cell->arch.mm.root_paging = cell_paging;
+       cell->arch.mm.root_table =
+               page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
        if (!cell->arch.mm.root_table)
                return -ENOMEM;
 
@@ -67,7 +69,7 @@ int arch_mmu_cell_init(struct cell *cell)
 
 void arch_mmu_cell_destroy(struct cell *cell)
 {
-       page_free(&mem_pool, cell->arch.mm.root_table, 1);
+       page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
 }
 
 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
index 8fdd03405531bef7501c1426ac7f4afc99d23b8f..2ba7da6f710578feaaf6d00dd3ad542203083cdf 100644 (file)
@@ -12,6 +12,8 @@
 
 #include <jailhouse/paging.h>
 
+unsigned int cpu_parange = 0;
+
 static bool arm_entry_valid(pt_entry_t entry, unsigned long flags)
 {
        // FIXME: validate flags!
@@ -40,6 +42,20 @@ static bool arm_page_table_empty(page_table_t page_table)
        return true;
 }
 
+#if MAX_PAGE_TABLE_LEVELS > 3
+static pt_entry_t arm_get_l0_entry(page_table_t page_table, unsigned long virt)
+{
+       return &page_table[(virt & L0_VADDR_MASK) >> 39];
+}
+
+static unsigned long arm_get_l0_phys(pt_entry_t pte, unsigned long virt)
+{
+       if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+               return INVALID_PHYS_ADDR;
+       return (*pte & PTE_L0_BLOCK_ADDR_MASK) | (virt & BLOCK_512G_VADDR_MASK);
+}
+#endif
+
 #if MAX_PAGE_TABLE_LEVELS > 2
 static pt_entry_t arm_get_l1_entry(page_table_t page_table, unsigned long virt)
 {
@@ -59,6 +75,18 @@ static unsigned long arm_get_l1_phys(pt_entry_t pte, unsigned long virt)
 }
 #endif
 
+static pt_entry_t arm_get_l1_alt_entry(page_table_t page_table, unsigned long virt)
+{
+       return &page_table[(virt & BIT_MASK(48,30)) >> 30];
+}
+
+static unsigned long arm_get_l1_alt_phys(pt_entry_t pte, unsigned long virt)
+{
+       if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+               return INVALID_PHYS_ADDR;
+       return (*pte & BIT_MASK(48,30)) | (virt & BIT_MASK(29,0));
+}
+
 static pt_entry_t arm_get_l2_entry(page_table_t page_table, unsigned long virt)
 {
        return &page_table[(virt & L2_VADDR_MASK) >> 21];
@@ -109,7 +137,18 @@ static unsigned long arm_get_l3_phys(pt_entry_t pte, unsigned long virt)
                .clear_entry = arm_clear_entry,         \
                .page_table_empty = arm_page_table_empty,
 
-const struct paging arm_paging[] = {
+const static struct paging arm_paging[] = {
+#if MAX_PAGE_TABLE_LEVELS > 3
+       {
+               ARM_PAGING_COMMON
+               /* No block entries for level 0, so no need to set page_size */
+               .get_entry = arm_get_l0_entry,
+               .get_phys = arm_get_l0_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+#endif
 #if MAX_PAGE_TABLE_LEVELS > 2
        {
                ARM_PAGING_COMMON
@@ -144,6 +183,47 @@ const struct paging arm_paging[] = {
        }
 };
 
+const static struct paging arm_s2_paging_alt[] = {
+       {
+               ARM_PAGING_COMMON
+               .get_entry = arm_get_l1_alt_entry,
+               .get_phys = arm_get_l1_alt_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+       {
+               ARM_PAGING_COMMON
+               /* Block entry: 2MB */
+               .page_size = 2 * 1024 * 1024,
+               .get_entry = arm_get_l2_entry,
+               .set_terminal = arm_set_l2_block,
+               .get_phys = arm_get_l2_phys,
+
+               .set_next_pt = arm_set_l12_table,
+               .get_next_pt = arm_get_l12_table,
+       },
+       {
+               ARM_PAGING_COMMON
+               /* Page entry: 4kB */
+               .page_size = 4 * 1024,
+               .get_entry = arm_get_l3_entry,
+               .set_terminal = arm_set_l3_page,
+               .get_phys = arm_get_l3_phys,
+       }
+};
+
+const struct paging *hv_paging = arm_paging;
+const struct paging *cell_paging;
+
 void arch_paging_init(void)
 {
+       cpu_parange = get_cpu_parange();
+
+       if (cpu_parange < 44)
+               /* 4 level page tables not supported for stage 2.
+                * We need to use multiple consecutive pages for L1 */
+               cell_paging = arm_s2_paging_alt;
+       else
+               cell_paging = arm_paging;
 }