flags |= S2_PTE_ACCESS_RO;
if (mem->flags & JAILHOUSE_MEM_WRITE)
flags |= S2_PTE_ACCESS_WO;
- /*
- * `DMA' may be a bit misleading here: it is used to define MMIO regions
- */
- if (mem->flags & JAILHOUSE_MEM_DMA)
+ if (mem->flags & JAILHOUSE_MEM_IO)
flags |= S2_PTE_FLAG_DEVICE;
else
flags |= S2_PTE_FLAG_NORMAL;
int arch_mmu_cell_init(struct cell *cell)
{
- cell->arch.mm.root_paging = hv_paging;
- cell->arch.mm.root_table = page_alloc(&mem_pool, 1);
+ cell->arch.mm.root_paging = cell_paging;
+ cell->arch.mm.root_table =
+ page_alloc_aligned(&mem_pool, ARM_CELL_ROOT_PT_SZ);
+
if (!cell->arch.mm.root_table)
return -ENOMEM;
return 0;
}
+void arch_mmu_cell_destroy(struct cell *cell)
+{
+ page_free(&mem_pool, cell->arch.mm.root_table, ARM_CELL_ROOT_PT_SZ);
+}
+
int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
{
struct cell *cell = cpu_data->cell;
unsigned long cell_table = paging_hvirt2phys(cell->arch.mm.root_table);
u64 vttbr = 0;
- u32 vtcr = T0SZ
- | SL0 << TCR_SL0_SHIFT
- | (TCR_RGN_WB_WA << TCR_IRGN0_SHIFT)
- | (TCR_RGN_WB_WA << TCR_ORGN0_SHIFT)
- | (TCR_INNER_SHAREABLE << TCR_SH0_SHIFT)
- | VTCR_RES1;
+ u32 vtcr = VTCR_CELL;
if (cell->id > 0xff) {
panic_printk("No cell ID available\n");
arm_write_sysreg(VTTBR_EL2, vttbr);
arm_write_sysreg(VTCR_EL2, vtcr);
+ /* Ensure that the new VMID is present before flushing the caches */
isb();
+ /*
+ * At initialisation, arch_config_commit does not act on other CPUs,
+ * since they register themselves to the root cpu_set afterwards. It
+ * means that this unconditionnal flush is redundant on master CPU.
+ */
+ arch_cpu_tlb_flush(cpu_data);
+
+ return 0;
+}
+
+void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
+{
/*
* Invalidate all stage-1 and 2 TLB entries for the current VMID
* ERET will ensure completion of these ops
*/
- arm_write_sysreg(TLBIALL, 1);
+ tlb_flush_guest();
+ dsb(nsh);
+ cpu_data->flush_vcpu_caches = false;
+}
- return 0;
+void arch_cell_caches_flush(struct cell *cell)
+{
+ /* Only the first CPU needs to clean the data caches */
+ spin_lock(&cell->arch.caches_lock);
+ if (cell->arch.needs_flush) {
+ /*
+ * Since there is no way to know which virtual addresses have been used
+ * by the root cell to write the new cell's data, a complete clean has
+ * to be performed.
+ */
+ arch_cpu_dcaches_flush(CACHES_CLEAN_INVALIDATE);
+ cell->arch.needs_flush = false;
+ }
+ spin_unlock(&cell->arch.caches_lock);
+
+ /*
+ * New instructions may have been written, so the I-cache needs to be
+ * invalidated even though the VMID is different.
+ * A complete invalidation is the only way to ensure all virtual aliases
+ * of these memory locations are invalidated, whatever the cache type.
+ */
+ arch_cpu_icache_flush();
+
+ /* ERET will ensure context synchronization */
}