]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - arch/arm64/include/asm/kvm_host.h
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[zynq/linux.git] / arch / arm64 / include / asm / kvm_host.h
index da3fc7324d6826b7b600c4167c77b1f4bd494cf1..a01fe087e022882d63f50e6fb766b13217a38208 100644 (file)
 
 #include <linux/types.h>
 #include <linux/kvm_types.h>
+#include <asm/arch_gicv3.h>
 #include <asm/cpufeature.h>
 #include <asm/daifflags.h>
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
+#include <asm/smp_plat.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -57,16 +59,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
 void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
 
-struct kvm_arch {
+struct kvm_vmid {
        /* The VMID generation used for the virt. memory system */
        u64    vmid_gen;
        u32    vmid;
+};
+
+struct kvm_arch {
+       struct kvm_vmid vmid;
 
        /* stage2 entry level table */
        pgd_t *pgd;
+       phys_addr_t pgd_phys;
 
-       /* VTTBR value associated with above pgd and vmid */
-       u64    vttbr;
        /* VTCR_EL2 value for this VM */
        u64    vtcr;
 
@@ -381,7 +386,36 @@ void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
-#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
+/*
+ * The couple of isb() below are there to guarantee the same behaviour
+ * on VHE as on !VHE, where the eret to EL1 acts as a context
+ * synchronization event.
+ */
+#define kvm_call_hyp(f, ...)                                           \
+       do {                                                            \
+               if (has_vhe()) {                                        \
+                       f(__VA_ARGS__);                                 \
+                       isb();                                          \
+               } else {                                                \
+                       __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
+               }                                                       \
+       } while(0)
+
+#define kvm_call_hyp_ret(f, ...)                                       \
+       ({                                                              \
+               typeof(f(__VA_ARGS__)) ret;                             \
+                                                                       \
+               if (has_vhe()) {                                        \
+                       ret = f(__VA_ARGS__);                           \
+                       isb();                                          \
+               } else {                                                \
+                       ret = __kvm_call_hyp(kvm_ksym_ref(f),           \
+                                            ##__VA_ARGS__);            \
+               }                                                       \
+                                                                       \
+               ret;                                                    \
+       })
 
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
@@ -400,6 +434,13 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
 
 DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
 
+static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+                                            int cpu)
+{
+       /* The host's MPIDR is immutable, so let's set it up at boot time */
+       cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+}
+
 void __kvm_enable_ssbs(void);
 
 static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
@@ -485,10 +526,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
 static inline void kvm_arm_vhe_guest_enter(void)
 {
        local_daif_mask();
+
+       /*
+        * Having IRQs masked via PMR when entering the guest means the GIC
+        * will not signal the CPU of interrupts of lower priority, and the
+        * only way to get out will be via guest exceptions.
+        * Naturally, we want to avoid this.
+        */
+       if (system_uses_irq_prio_masking()) {
+               gic_write_pmr(GIC_PRIO_IRQON);
+               dsb(sy);
+       }
 }
 
 static inline void kvm_arm_vhe_guest_exit(void)
 {
+       /*
+        * local_daif_restore() takes care to properly restore PSTATE.DAIF
+        * and the GIC PMR if the host is using IRQ priorities.
+        */
        local_daif_restore(DAIF_PROCCTX_NOIRQ);
 
        /*