int arch_mmu_cell_init(struct cell *cell)
{
- cell->arch.mm.root_paging = hv_paging;
- cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+ cell->arch.mm.root_paging = cell_paging;
+ cell->arch.mm.root_table =
+ page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
if (!cell->arch.mm.root_table)
return -ENOMEM;
void arch_mmu_cell_destroy(struct cell *cell)
{
- page_free(&mem_pool, cell->arch.mm.root_table, 1);
+ page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
}
int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
struct cell *cell = cpu_data->cell;
unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
u64 vttbr = 0;
- u32 vtcr = T0SZ
- | SL0 << TCR_SL0_SHIFT
- | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
- | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
- | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
- | VTCR_RES1;
+ u32 vtcr = VTCR_CELL;
if (cell->id > 0xff) {
panic_printk("No cell ID available\n");
* Invalidate all stage-1 and 2 TLB entries for the current VMID
* ERET will ensure completion of these ops
*/
- arm_write_sysreg(TLBIALL, 1);
+ tlb_flush_guest();
dsb(nsh);
cpu_data->flush_vcpu_caches = false;
}