static void arch_suspend_self(struct per_cpu *cpu_data)
{
psci_suspend(cpu_data);
+
+ if (cpu_data->flush_vcpu_caches)
+ arch_cpu_tlb_flush(cpu_data);
}
struct registers* arch_handle_exit(struct per_cpu *cpu_data,
for_each_cpu(cpu, cell->cpu_set)
arch_reset_cpu(cpu);
}
+
+/* Note: only supports synchronous flushing as triggered by config_commit! */
+void arch_flush_cell_vcpu_caches(struct cell *cell)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, cell->cpu_set)
+ if (cpu == this_cpu_id())
+ arch_cpu_tlb_flush(per_cpu(cpu));
+ else
+ per_cpu(cpu)->flush_vcpu_caches = true;
+}
+
+void arch_config_commit(struct cell *cell_added_removed)
+{
+}
void arch_cpu_dcaches_flush(unsigned int action);
void arch_cpu_icache_flush(void);
+void arch_cpu_tlb_flush(struct per_cpu *cpu_data);
void arch_cell_caches_flush(struct cell *cell);
int arch_mmu_cell_init(struct cell *cell);
void arch_mmu_cell_destroy(struct cell *cell);
#include <jailhouse/types.h>
#include <jailhouse/utils.h>
#include <asm/processor.h>
+#include <asm/sysregs.h>
#define PAGE_SIZE 4096
#define PAGE_MASK ~(PAGE_SIZE - 1)
typedef u64 *pt_entry_t;
+/* Only executed on hypervisor paging struct changes */
static inline void arch_paging_flush_page_tlbs(unsigned long page_addr)
{
+ /*
+ * This instruction is UNDEF at EL1, but the whole TLB is invalidated
+ * before enabling the EL2 stage 1 MMU anyway.
+ */
+ if (is_el2())
+ arm_write_sysreg(TLBIMVAH, page_addr & PAGE_MASK);
}
+extern unsigned int cache_line_size;
+
+/* Used to clean the PAGE_MAP_COHERENT page table changes */
static inline void arch_paging_flush_cpu_caches(void *addr, long size)
{
+ do {
+ /* Clean & invalidate by MVA to PoC */
+ arm_write_sysreg(DCCIMVAC, addr);
+ size -= cache_line_size;
+ addr += cache_line_size;
+ } while (size > 0);
}
#endif /* !__ASSEMBLY__ */
/* The mbox will be accessed with a ldrd, which requires alignment */
__attribute__((aligned(8))) struct psci_mbox psci_mbox;
- bool flush_caches;
+ bool flush_vcpu_caches;
int shutdown_state;
bool failed;
} __attribute__((aligned(PAGE_SIZE)));
#ifndef _JAILHOUSE_ASM_PROCESSOR_H
#define _JAILHOUSE_ASM_PROCESSOR_H
+#include <jailhouse/types.h>
#include <jailhouse/utils.h>
#define PSR_MODE_MASK 0xf
{
}
+static inline bool is_el2(void)
+{
+ u32 psr;
+
+ asm volatile ("mrs %0, cpsr" : "=r" (psr));
+
+ return (psr & PSR_MODE_MASK) == PSR_HYP_MODE;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* !_JAILHOUSE_ASM_PROCESSOR_H */
* 32bit sysregs definitions
* (Use the AArch64 names to ease the compatibility work)
*/
+#define CTR_EL0 SYSREG_32(0, c0, c0, 1)
#define MPIDR_EL1 SYSREG_32(0, c0, c0, 5)
#define ID_PFR0_EL1 SYSREG_32(0, c0, c1, 0)
#define ID_PFR1_EL1 SYSREG_32(0, c0, c1, 1)
#define ICIALLUIS SYSREG_32(0, c7, c1, 0)
#define ICIALLU SYSREG_32(0, c7, c5, 0)
+#define DCCIMVAC SYSREG_32(0, c7, c10, 1)
#define DCCSW SYSREG_32(0, c7, c10, 2)
#define DCCISW SYSREG_32(0, c7, c14, 2)
arm_write_sysreg(VTTBR_EL2, vttbr);
arm_write_sysreg(VTCR_EL2, vtcr);
+ /* Ensure that the new VMID is present before flushing the caches */
isb();
+ /*
+ * At initialisation, arch_config_commit does not act on other CPUs,
+ * since they register themselves to the root cpu_set afterwards. It
+ * means that this unconditionnal flush is redundant on master CPU.
+ */
+ arch_cpu_tlb_flush(cpu_data);
+
+ return 0;
+}
+
+void arch_cpu_tlb_flush(struct per_cpu *cpu_data)
+{
/*
* Invalidate all stage-1 and 2 TLB entries for the current VMID
* ERET will ensure completion of these ops
*/
arm_write_sysreg(TLBIALL, 1);
-
- return 0;
+ dsb(nsh);
+ cpu_data->flush_vcpu_caches = false;
}
void arch_cell_caches_flush(struct cell *cell)
#include <jailhouse/paging.h>
#include <jailhouse/string.h>
+unsigned int cache_line_size;
+
static int arch_check_features(void)
{
u32 pfr1;
+ u32 ctr;
+
arm_read_sysreg(ID_PFR1_EL1, pfr1);
if (!PFR1_VIRT(pfr1))
return -ENODEV;
+ arm_read_sysreg(CTR_EL0, ctr);
+ /* Extract the minimal cache line size */
+ cache_line_size = 4 << (ctr >> 16 & 0xf);
+
return 0;
}
// catch missing symbols
void arch_shutdown_cpu(unsigned int cpu_id) {}
-void arch_flush_cell_vcpu_caches(struct cell *cell) {}
-void arch_config_commit(struct cell *cell_added_removed) {}
void arch_shutdown(void) {}
void arch_panic_stop(void) {__builtin_unreachable();}
void arch_panic_park(void) {}