]> rtime.felk.cvut.cz Git - jailhouse.git/commitdiff
arm: disable caches on cell reset
authorJean-Philippe Brucker <jean-philippe.brucker@arm.com>
Wed, 30 Jul 2014 14:13:14 +0000 (15:13 +0100)
committerJan Kiszka <jan.kiszka@siemens.com>
Fri, 19 Dec 2014 10:04:07 +0000 (11:04 +0100)
This patch allows to enter new guests with cache disabled. By cleaning
the data caches, it makes sure that the recently written guest code and
datas are present in memory before returning to an environment with only
a stage-2 MMU.

Signed-off-by: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
hypervisor/arch/arm/control.c
hypervisor/arch/arm/include/asm/cell.h
hypervisor/arch/arm/include/asm/control.h
hypervisor/arch/arm/include/asm/processor.h
hypervisor/arch/arm/mmu_cell.c

index a802669e7ccaa21047e24dd28aa59dfdbdc80074..4d7546ec425c867049fd272f8e1d09f615de9efe 100644 (file)
 #include <jailhouse/string.h>
 #include <asm/control.h>
 #include <asm/irqchip.h>
+#include <asm/processor.h>
 #include <asm/sysregs.h>
 #include <asm/traps.h>
 
 static void arch_reset_el1(struct registers *regs)
 {
+       u32 sctlr;
+
        /* Wipe all banked and usr regs */
        memset(regs, 0, sizeof(struct registers));
 
@@ -49,7 +52,9 @@ static void arch_reset_el1(struct registers *regs)
        arm_write_banked_reg(SPSR_fiq, 0);
 
        /* Wipe the system registers */
-       arm_write_sysreg(SCTLR_EL1, 0);
+       arm_read_sysreg(SCTLR_EL1, sctlr);
+       sctlr = sctlr & ~SCTLR_MASK;
+       arm_write_sysreg(SCTLR_EL1, sctlr);
        arm_write_sysreg(ACTLR_EL1, 0);
        arm_write_sysreg(CPACR_EL1, 0);
        arm_write_sysreg(CONTEXTIDR_EL1, 0);
@@ -87,11 +92,19 @@ static void arch_reset_self(struct per_cpu *cpu_data)
 {
        int err;
        unsigned long reset_address;
+       struct cell *cell = cpu_data->cell;
        struct registers *regs = guest_regs(cpu_data);
 
        err = arch_mmu_cpu_cell_init(cpu_data);
        if (err)
                printk("MMU setup failed\n");
+       /*
+        * On the first CPU to reach this, write all cell datas to memory so it
+        * can be started with caches disabled.
+        * On all CPUs, invalidate the instruction caches to take into account
+        * the potential new instructions.
+        */
+       arch_cell_caches_flush(cell);
 
        /*
         * We come from the IRQ handler, but we won't return there, so the IPI
@@ -156,12 +169,16 @@ void arch_resume_cpu(unsigned int cpu_id)
 /* CPU must be stopped */
 void arch_park_cpu(unsigned int cpu_id)
 {
+       struct per_cpu *cpu_data = per_cpu(cpu_id);
+
        /*
         * Reset always follows park_cpu, so we just need to make sure that the
         * CPU is suspended
         */
        if (psci_wait_cpu_stopped(cpu_id) != 0)
                printk("ERROR: CPU%d is supposed to be stopped\n", cpu_id);
+       else
+               cpu_data->cell->arch.needs_flush = true;
 }
 
 /* CPU must be stopped */
index dba6f2d8660203fb57053205fbfcf0ac93c01bf8..e52e44e4fe78609dfdf980f20e6b4853ecd372cb 100644 (file)
@@ -14,6 +14,7 @@
 #define _JAILHOUSE_ASM_CELL_H
 
 #include <jailhouse/types.h>
+#include <asm/spinlock.h>
 
 #ifndef __ASSEMBLY__
 
@@ -23,6 +24,9 @@
 
 struct arch_cell {
        struct paging_structures mm;
+
+       spinlock_t caches_lock;
+       bool needs_flush;
 };
 
 struct cell {
index 2ada50d7c3fae44387aa8d3f55d23a8a6f8c788a..592ee29f76a33159e9223df033bdc607af4229d3 100644 (file)
@@ -25,6 +25,8 @@
 #ifndef __ASSEMBLY__
 
 void arch_cpu_dcaches_flush(unsigned int action);
+void arch_cpu_icache_flush(void);
+void arch_cell_caches_flush(struct cell *cell);
 int arch_mmu_cell_init(struct cell *cell);
 void arch_mmu_cell_destroy(struct cell *cell);
 int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data);
index 6285abc99722ce5849c3e479b5ac40825e14733a..451f55c2d71329052f9c437eb7d79485630b3e81 100644 (file)
 #define SCTLR_AFE_BIT  (1 << 29)
 #define SCTLR_TE_BIT   (1 << 30)
 
+/* Bits to wipe on cell reset */
+#define SCTLR_MASK     (SCTLR_M_BIT | SCTLR_A_BIT | SCTLR_C_BIT        \
+                       | SCTLR_I_BIT | SCTLR_V_BIT | SCTLR_WXN_BIT     \
+                       | SCTLR_UWXN_BIT | SCTLR_FI_BIT | SCTLR_EE_BIT  \
+                       | SCTLR_TRE_BIT | SCTLR_AFE_BIT | SCTLR_TE_BIT)
+
+
 #define HCR_TRVM_BIT   (1 << 30)
 #define HCR_TVM_BIT    (1 << 26)
 #define HCR_HDC_BIT    (1 << 29)
index d1d62611f49f2a2176b7a58d270b6acbf7b1b004..90e06543e6971535520bf34f301ec28d4b6071de 100644 (file)
@@ -104,3 +104,29 @@ int arch_mmu_cpu_cell_init(struct per_cpu *cpu_data)
 
        return 0;
 }
+
+void arch_cell_caches_flush(struct cell *cell)
+{
+       /* Only the first CPU needs to clean the data caches */
+       spin_lock(&cell->arch.caches_lock);
+       if (cell->arch.needs_flush) {
+               /*
+                * Since there is no way to know which virtual addresses have been used
+                * by the root cell to write the new cell's data, a complete clean has
+                * to be performed.
+                */
+               arch_cpu_dcaches_flush(CACHES_CLEAN_INVALIDATE);
+               cell->arch.needs_flush = false;
+       }
+       spin_unlock(&cell->arch.caches_lock);
+
+       /*
+        * New instructions may have been written, so the I-cache needs to be
+        * invalidated even though the VMID is different.
+        * A complete invalidation is the only way to ensure all virtual aliases
+        * of these memory locations are invalidated, whatever the cache type.
+        */
+       arch_cpu_icache_flush();
+
+       /* ERET will ensure context synchronization */
+}