* by IPA[20:12].
* This would allows to cover a 4GB memory map by using 4 concatenated level-2
* page tables and thus provide better table walk performances.
- * For the moment, the core doesn't allow to use concatenated pages, so we will
- * use three levels instead, starting at level 1.
+ * For the moment, we will implement the first level for AArch32 using only
+ * one level.
*
- * TODO: add a "u32 concatenated" field to the paging struct
+ * TODO: implement larger PARange support for AArch32
*/
+#define ARM_CELL_ROOT_PT_SZ 1
+
#if MAX_PAGE_TABLE_LEVELS < 3
#define T0SZ 0
#define SL0 0
typedef u64 *pt_entry_t;
+extern unsigned int cpu_parange;
+
+/* return the bits supported for the physical address range for this
+ * machine; in arch_paging_init this value will be kept in
+ * cpu_parange for later reference */
+static inline unsigned int get_cpu_parange(void)
+{
+ /* TODO: implement proper PARange support on AArch32 */
+ return 39;
+}
+
/* Only executed on hypervisor paging struct changes */
static inline void arch_paging_flush_page_tlbs(unsigned long page_addr)
{
int arch_mmu_cell_init(struct cell *cell)
{
- cell->arch.mm.root_paging = hv_paging;
- cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+ cell->arch.mm.root_paging = cell_paging;
+ cell->arch.mm.root_table =
+ page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
if (!cell->arch.mm.root_table)
return -ENOMEM;
void arch_mmu_cell_destroy(struct cell *cell)
{
- page_free(&mem_pool, cell->arch.mm.root_table, 1);
+ page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
}
int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
#include <jailhouse/paging.h>
+unsigned int cpu_parange = 0;
+
static bool arm_entry_valid(pt_entry_t entry, unsigned long flags)
{
// FIXME: validate flags!
return true;
}
+#if MAX_PAGE_TABLE_LEVELS > 3
+static pt_entry_t arm_get_l0_entry(page_table_t page_table, unsigned long virt)
+{
+ return &page_table[(virt & L0_VADDR_MASK) >> 39];
+}
+
+static unsigned long arm_get_l0_phys(pt_entry_t pte, unsigned long virt)
+{
+ if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+ return INVALID_PHYS_ADDR;
+ return (*pte & PTE_L0_BLOCK_ADDR_MASK) | (virt & BLOCK_512G_VADDR_MASK);
+}
+#endif
+
#if MAX_PAGE_TABLE_LEVELS > 2
static pt_entry_t arm_get_l1_entry(page_table_t page_table, unsigned long virt)
{
}
#endif
+static pt_entry_t arm_get_l1_alt_entry(page_table_t page_table, unsigned long virt)
+{
+ return &page_table[(virt & BIT_MASK(48,30)) >> 30];
+}
+
+static unsigned long arm_get_l1_alt_phys(pt_entry_t pte, unsigned long virt)
+{
+ if ((*pte & PTE_TABLE_FLAGS) == PTE_TABLE_FLAGS)
+ return INVALID_PHYS_ADDR;
+ return (*pte & BIT_MASK(48,30)) | (virt & BIT_MASK(29,0));
+}
+
static pt_entry_t arm_get_l2_entry(page_table_t page_table, unsigned long virt)
{
return &page_table[(virt & L2_VADDR_MASK) >> 21];
.clear_entry = arm_clear_entry, \
.page_table_empty = arm_page_table_empty,
-const struct paging arm_paging[] = {
+const static struct paging arm_paging[] = {
+#if MAX_PAGE_TABLE_LEVELS > 3
+ {
+ ARM_PAGING_COMMON
+ /* No block entries for level 0, so no need to set page_size */
+ .get_entry = arm_get_l0_entry,
+ .get_phys = arm_get_l0_phys,
+
+ .set_next_pt = arm_set_l12_table,
+ .get_next_pt = arm_get_l12_table,
+ },
+#endif
#if MAX_PAGE_TABLE_LEVELS > 2
{
ARM_PAGING_COMMON
}
};
+const static struct paging arm_s2_paging_alt[] = {
+ {
+ ARM_PAGING_COMMON
+ .get_entry = arm_get_l1_alt_entry,
+ .get_phys = arm_get_l1_alt_phys,
+
+ .set_next_pt = arm_set_l12_table,
+ .get_next_pt = arm_get_l12_table,
+ },
+ {
+ ARM_PAGING_COMMON
+ /* Block entry: 2MB */
+ .page_size = 2 * 1024 * 1024,
+ .get_entry = arm_get_l2_entry,
+ .set_terminal = arm_set_l2_block,
+ .get_phys = arm_get_l2_phys,
+
+ .set_next_pt = arm_set_l12_table,
+ .get_next_pt = arm_get_l12_table,
+ },
+ {
+ ARM_PAGING_COMMON
+ /* Page entry: 4kB */
+ .page_size = 4 * 1024,
+ .get_entry = arm_get_l3_entry,
+ .set_terminal = arm_set_l3_page,
+ .get_phys = arm_get_l3_phys,
+ }
+};
+
+const struct paging *hv_paging = arm_paging;
+const struct paging *cell_paging;
+
void arch_paging_init(void)
{
+ cpu_parange = get_cpu_parange();
+
+ if (cpu_parange < 44)
+ /* 4 level page tables not supported for stage 2.
+ * We need to use multiple consecutive pages for L1 */
+ cell_paging = arm_s2_paging_alt;
+ else
+ cell_paging = arm_paging;
}