]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/commitdiff
Merge remote-tracking branch 'master' into queue
authorMarcelo Tosatti <mtosatti@redhat.com>
Mon, 29 Oct 2012 21:15:32 +0000 (19:15 -0200)
committerMarcelo Tosatti <mtosatti@redhat.com>
Mon, 29 Oct 2012 21:15:32 +0000 (19:15 -0200)
Merge reason: development work has dependency on kvm patches merged
upstream.

Conflicts:
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/kvm_para.h

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
14 files changed:
1  2 
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_para.h
arch/powerpc/include/uapi/asm/Kbuild
arch/powerpc/include/uapi/asm/epapr_hcalls.h
arch/powerpc/include/uapi/asm/kvm.h
arch/powerpc/include/uapi/asm/kvm_para.h
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kvm/book3s_32_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/trace.h
arch/x86/kvm/mmu.c
include/uapi/linux/kvm.h

index 13d6b7bf3b69818641705770fed052999389afd5,a4fe15e33c6f0e34e49446d89329d9c5cfd37a61..324495a08c87f9c8b2c3bbe2923f6c6e0fa71e8f
@@@ -1,39 -1,4 +1,3 @@@
- include include/asm-generic/Kbuild.asm
- header-y += auxvec.h
- header-y += bootx.h
- header-y += byteorder.h
- header-y += cputable.h
- header-y += elf.h
- header-y += errno.h
- header-y += fcntl.h
- header-y += ioctl.h
- header-y += ioctls.h
- header-y += ipcbuf.h
- header-y += linkage.h
- header-y += msgbuf.h
- header-y += nvram.h
- header-y += param.h
- header-y += poll.h
- header-y += posix_types.h
- header-y += ps3fb.h
- header-y += resource.h
- header-y += seccomp.h
- header-y += sembuf.h
- header-y += shmbuf.h
- header-y += sigcontext.h
- header-y += siginfo.h
- header-y += signal.h
- header-y += socket.h
- header-y += sockios.h
- header-y += spu_info.h
- header-y += stat.h
- header-y += statfs.h
- header-y += termbits.h
- header-y += termios.h
- header-y += types.h
- header-y += ucontext.h
- header-y += unistd.h
- header-y += epapr_hcalls.h
  
 -
+ generic-y += clkdev.h
  generic-y += rwsem.h
index ab738005d2eafc57b6bb8bf5661cab3f05502f2f,7aefdb3e1ce405865beb991934dadfac22036724..36fcf41904613cf442bd07338887bb492735213a
@@@ -59,7 -59,7 +59,7 @@@ struct hpte_cache 
        struct hlist_node list_vpte;
        struct hlist_node list_vpte_long;
        struct rcu_head rcu_head;
-       u64 host_va;
+       u64 host_vpn;
        u64 pfn;
        ulong slot;
        struct kvmppc_pte pte;
@@@ -160,7 -160,7 +160,7 @@@ extern long kvmppc_virtmode_h_enter(str
  extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
                        long pte_index, unsigned long pteh, unsigned long ptel);
  extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
 -                      struct kvm_memory_slot *memslot);
 +                      struct kvm_memory_slot *memslot, unsigned long *map);
  
  extern void kvmppc_entry_trampoline(void);
  extern void kvmppc_hv_entry_trampoline(void);
index a168ce37d85ce35a76116be05f7f3d7befc3de15,9365860fb7f61676a57233eb14de66db0cc0c65b..2b119654b4c1a5fcd2ce568fd83ad3a64bccdd03
   *
   * Authors: Hollis Blanchard <hollisb@us.ibm.com>
   */
  #ifndef __POWERPC_KVM_PARA_H__
  #define __POWERPC_KVM_PARA_H__
  
- #include <linux/types.h>
- /*
-  * Additions to this struct must only occur at the end, and should be
-  * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
-  * (albeit not necessarily relevant to the current target hardware platform).
-  *
-  * Struct fields are always 32 or 64 bit aligned, depending on them being 32
-  * or 64 bit wide respectively.
-  *
-  * See Documentation/virtual/kvm/ppc-pv.txt
-  */
- struct kvm_vcpu_arch_shared {
-       __u64 scratch1;
-       __u64 scratch2;
-       __u64 scratch3;
-       __u64 critical;         /* Guest may not get interrupts if == r1 */
-       __u64 sprg0;
-       __u64 sprg1;
-       __u64 sprg2;
-       __u64 sprg3;
-       __u64 srr0;
-       __u64 srr1;
-       __u64 dar;              /* dear on BookE */
-       __u64 msr;
-       __u32 dsisr;
-       __u32 int_pending;      /* Tells the guest if we have an interrupt */
-       __u32 sr[16];
-       __u32 mas0;
-       __u32 mas1;
-       __u64 mas7_3;
-       __u64 mas2;
-       __u32 mas4;
-       __u32 mas6;
-       __u32 esr;
-       __u32 pir;
-       /*
-        * SPRG4-7 are user-readable, so we can only keep these consistent
-        * between the shared area and the real registers when there's an
-        * intervening exit to KVM.  This also applies to SPRG3 on some
-        * chips.
-        *
-        * This suffices for access by guest userspace, since in PR-mode
-        * KVM, an exit must occur when changing the guest's MSR[PR].
-        * If the guest kernel writes to SPRG3-7 via the shared area, it
-        * must also use the shared area for reading while in kernel space.
-        */
-       __u64 sprg4;
-       __u64 sprg5;
-       __u64 sprg6;
-       __u64 sprg7;
- };
- #define KVM_SC_MAGIC_R0               0x4b564d21 /* "KVM!" */
- #define KVM_HCALL_TOKEN(num)     _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
- #include <asm/epapr_hcalls.h>
- #define KVM_FEATURE_MAGIC_PAGE        1
- #define KVM_MAGIC_FEAT_SR             (1 << 0)
- /* MASn, ESR, PIR, and high SPRGs */
- #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7  (1 << 1)
- #ifdef __KERNEL__
+ #include <uapi/asm/kvm_para.h>
  
 -
  #ifdef CONFIG_KVM_GUEST
  
  #include <linux/of.h>
@@@ -122,7 -55,7 +54,7 @@@ static unsigned long kvm_hypercall(unsi
                                   unsigned long *out,
                                   unsigned long nr)
  {
 -      return HC_EV_UNIMPLEMENTED;
 +      return EV_UNIMPLEMENTED;
  }
  
  #endif
@@@ -133,7 -66,7 +65,7 @@@ static inline long kvm_hypercall0_1(uns
        unsigned long out[8];
        unsigned long r;
  
 -      r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
        *r2 = out[0];
  
        return r;
@@@ -144,7 -77,7 +76,7 @@@ static inline long kvm_hypercall0(unsig
        unsigned long in[8];
        unsigned long out[8];
  
 -      return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
  }
  
  static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
        unsigned long out[8];
  
        in[0] = p1;
 -      return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
  }
  
  static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  
        in[0] = p1;
        in[1] = p2;
 -      return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
  }
  
  static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
        in[0] = p1;
        in[1] = p2;
        in[2] = p3;
 -      return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
  }
  
  static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
        in[1] = p2;
        in[2] = p3;
        in[3] = p4;
 -      return kvm_hypercall(in, out, nr | HC_VENDOR_KVM);
 +      return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr));
  }
  
  
@@@ -212,6 -145,4 +144,4 @@@ static inline bool kvm_check_and_clear_
        return false;
  }
  
- #endif /* __KERNEL__ */
  #endif /* __POWERPC_KVM_PARA_H__ */
index baebb3da1d44160fc6f6f259886656e6f494b167,a33c3c03bb2e104693eab939e0ffc3e901508259..9eedfc5a557bb3b583b2d3c6f052c5ae7b30b6af
@@@ -1,3 -1,44 +1,45 @@@
  # UAPI Header export list
  include include/uapi/asm-generic/Kbuild.asm
  
+ header-y += auxvec.h
+ header-y += bitsperlong.h
+ header-y += bootx.h
+ header-y += byteorder.h
+ header-y += cputable.h
+ header-y += elf.h
+ header-y += errno.h
+ header-y += fcntl.h
+ header-y += ioctl.h
+ header-y += ioctls.h
+ header-y += ipcbuf.h
+ header-y += kvm.h
+ header-y += kvm_para.h
+ header-y += linkage.h
+ header-y += mman.h
+ header-y += msgbuf.h
+ header-y += nvram.h
+ header-y += param.h
+ header-y += poll.h
+ header-y += posix_types.h
+ header-y += ps3fb.h
+ header-y += ptrace.h
+ header-y += resource.h
+ header-y += seccomp.h
+ header-y += sembuf.h
+ header-y += setup.h
+ header-y += shmbuf.h
+ header-y += sigcontext.h
+ header-y += siginfo.h
+ header-y += signal.h
+ header-y += socket.h
+ header-y += sockios.h
+ header-y += spu_info.h
+ header-y += stat.h
+ header-y += statfs.h
+ header-y += swab.h
+ header-y += termbits.h
+ header-y += termios.h
+ header-y += types.h
+ header-y += ucontext.h
+ header-y += unistd.h
++header-y += epapr_hcalls.h
index b8d94459a929d6d1c74203bd40d03b6ab8bea9ab,bf2c06c338719e66c20db73bf8bc8d0bb8dabc5d..b8d94459a929d6d1c74203bd40d03b6ab8bea9ab
  #ifndef _EPAPR_HCALLS_H
  #define _EPAPR_HCALLS_H
  
 -#include <linux/types.h>
 -#include <linux/errno.h>
 -#include <asm/byteorder.h>
 -
  #define EV_BYTE_CHANNEL_SEND          1
  #define EV_BYTE_CHANNEL_RECEIVE               2
  #define EV_BYTE_CHANNEL_POLL          3
@@@ -84,8 -88,7 +84,8 @@@
  #define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
  #define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
  
 -/* epapr error codes */
 +/* epapr return codes */
 +#define EV_SUCCESS            0
  #define EV_EPERM              1       /* Operation not permitted */
  #define EV_ENOENT             2       /*  Entry Not Found */
  #define EV_EIO                        3       /* I/O error occured */
  #define EV_UNIMPLEMENTED      12      /* Unimplemented hypercall */
  #define EV_BUFFER_OVERFLOW    13      /* Caller-supplied buffer too small */
  
 +#ifndef __ASSEMBLY__
 +#include <linux/types.h>
 +#include <linux/errno.h>
 +#include <asm/byteorder.h>
 +
  /*
   * Hypercall register clobber list
   *
@@@ -195,7 -193,7 +195,7 @@@ static inline unsigned int ev_int_set_c
        r5  = priority;
        r6  = destination;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
                : : EV_HCALL_CLOBBERS4
        );
@@@ -224,7 -222,7 +224,7 @@@ static inline unsigned int ev_int_get_c
        r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
        r3 = interrupt;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
                : : EV_HCALL_CLOBBERS4
        );
@@@ -254,7 -252,7 +254,7 @@@ static inline unsigned int ev_int_set_m
        r3 = interrupt;
        r4 = mask;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "+r" (r4)
                : : EV_HCALL_CLOBBERS2
        );
@@@ -279,7 -277,7 +279,7 @@@ static inline unsigned int ev_int_get_m
        r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
        r3 = interrupt;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "=r" (r4)
                : : EV_HCALL_CLOBBERS2
        );
@@@ -307,7 -305,7 +307,7 @@@ static inline unsigned int ev_int_eoi(u
        r11 = EV_HCALL_TOKEN(EV_INT_EOI);
        r3 = interrupt;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3)
                : : EV_HCALL_CLOBBERS1
        );
@@@ -346,7 -344,7 +346,7 @@@ static inline unsigned int ev_byte_chan
        r7 = be32_to_cpu(p[2]);
        r8 = be32_to_cpu(p[3]);
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3),
                  "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
                : : EV_HCALL_CLOBBERS6
@@@ -385,7 -383,7 +385,7 @@@ static inline unsigned int ev_byte_chan
        r3 = handle;
        r4 = *count;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "+r" (r4),
                  "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
                : : EV_HCALL_CLOBBERS6
@@@ -423,7 -421,7 +423,7 @@@ static inline unsigned int ev_byte_chan
        r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
        r3 = handle;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
                : : EV_HCALL_CLOBBERS3
        );
@@@ -456,7 -454,7 +456,7 @@@ static inline unsigned int ev_int_iack(
        r11 = EV_HCALL_TOKEN(EV_INT_IACK);
        r3 = handle;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3), "=r" (r4)
                : : EV_HCALL_CLOBBERS2
        );
@@@ -480,7 -478,7 +480,7 @@@ static inline unsigned int ev_doorbell_
        r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
        r3 = handle;
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "+r" (r3)
                : : EV_HCALL_CLOBBERS1
        );
@@@ -500,12 -498,12 +500,12 @@@ static inline unsigned int ev_idle(void
  
        r11 = EV_HCALL_TOKEN(EV_IDLE);
  
 -      __asm__ __volatile__ ("sc 1"
 +      asm volatile("bl        epapr_hypercall_start"
                : "+r" (r11), "=r" (r3)
                : : EV_HCALL_CLOBBERS1
        );
  
        return r3;
  }
 -
 +#endif /* !__ASSEMBLY__ */
  #endif
index b89ae4db45ced4523e101c18a5477c62c928fd0a,1bea4d8ea6f432d3e3425752aafa156a8fe7710b..b89ae4db45ced4523e101c18a5477c62c928fd0a
@@@ -221,12 -221,6 +221,12 @@@ struct kvm_sregs 
  
                        __u32 dbsr;     /* KVM_SREGS_E_UPDATE_DBSR */
                        __u32 dbcr[3];
 +                      /*
 +                       * iac/dac registers are 64bit wide, while this API
 +                       * interface provides only lower 32 bits on 64 bit
 +                       * processors. ONE_REG interface is added for 64bit
 +                       * iac/dac registers.
 +                       */
                        __u32 iac[4];
                        __u32 dac[2];
                        __u32 dvc[2];
@@@ -332,58 -326,5 +332,58 @@@ struct kvm_book3e_206_tlb_params 
  };
  
  #define KVM_REG_PPC_HIOR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
 +#define KVM_REG_PPC_IAC1      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
 +#define KVM_REG_PPC_IAC2      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
 +#define KVM_REG_PPC_IAC3      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
 +#define KVM_REG_PPC_IAC4      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
 +#define KVM_REG_PPC_DAC1      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
 +#define KVM_REG_PPC_DAC2      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
 +#define KVM_REG_PPC_DABR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
 +#define KVM_REG_PPC_DSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
 +#define KVM_REG_PPC_PURR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
 +#define KVM_REG_PPC_SPURR     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
 +#define KVM_REG_PPC_DAR               (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
 +#define KVM_REG_PPC_DSISR     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
 +#define KVM_REG_PPC_AMR               (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
 +#define KVM_REG_PPC_UAMOR     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
 +
 +#define KVM_REG_PPC_MMCR0     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
 +#define KVM_REG_PPC_MMCR1     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
 +#define KVM_REG_PPC_MMCRA     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
 +
 +#define KVM_REG_PPC_PMC1      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
 +#define KVM_REG_PPC_PMC2      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
 +#define KVM_REG_PPC_PMC3      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
 +#define KVM_REG_PPC_PMC4      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
 +#define KVM_REG_PPC_PMC5      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
 +#define KVM_REG_PPC_PMC6      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
 +#define KVM_REG_PPC_PMC7      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
 +#define KVM_REG_PPC_PMC8      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
 +
 +/* 32 floating-point registers */
 +#define KVM_REG_PPC_FPR0      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
 +#define KVM_REG_PPC_FPR(n)    (KVM_REG_PPC_FPR0 + (n))
 +#define KVM_REG_PPC_FPR31     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
 +
 +/* 32 VMX/Altivec vector registers */
 +#define KVM_REG_PPC_VR0               (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
 +#define KVM_REG_PPC_VR(n)     (KVM_REG_PPC_VR0 + (n))
 +#define KVM_REG_PPC_VR31      (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
 +
 +/* 32 double-width FP registers for VSX */
 +/* High-order halves overlap with FP regs */
 +#define KVM_REG_PPC_VSR0      (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
 +#define KVM_REG_PPC_VSR(n)    (KVM_REG_PPC_VSR0 + (n))
 +#define KVM_REG_PPC_VSR31     (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
 +
 +/* FP and vector status/control registers */
 +#define KVM_REG_PPC_FPSCR     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
 +#define KVM_REG_PPC_VSCR      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
 +
 +/* Virtual processor areas */
 +/* For SLB & DTL, address in high (first) half, length in low half */
 +#define KVM_REG_PPC_VPA_ADDR  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
 +#define KVM_REG_PPC_VPA_SLB   (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
 +#define KVM_REG_PPC_VPA_DTL   (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
  
  #endif /* __LINUX_KVM_POWERPC_H */
index 0000000000000000000000000000000000000000,5e04383a1db5867c9c678b8ad3c6cadf760cab58..ed0e0254b47f2b3ae67940e15d151b58dddc7462
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,90 +1,91 @@@
 -#define HC_VENDOR_KVM         (42 << 16)
 -#define HC_EV_SUCCESS         0
 -#define HC_EV_UNIMPLEMENTED   12
+ /*
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License, version 2, as
+  * published by the Free Software Foundation.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  * You should have received a copy of the GNU General Public License
+  * along with this program; if not, write to the Free Software
+  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+  *
+  * Copyright IBM Corp. 2008
+  *
+  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+  */
+ #ifndef _UAPI__POWERPC_KVM_PARA_H__
+ #define _UAPI__POWERPC_KVM_PARA_H__
+ #include <linux/types.h>
+ /*
+  * Additions to this struct must only occur at the end, and should be
+  * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
+  * (albeit not necessarily relevant to the current target hardware platform).
+  *
+  * Struct fields are always 32 or 64 bit aligned, depending on them being 32
+  * or 64 bit wide respectively.
+  *
+  * See Documentation/virtual/kvm/ppc-pv.txt
+  */
+ struct kvm_vcpu_arch_shared {
+       __u64 scratch1;
+       __u64 scratch2;
+       __u64 scratch3;
+       __u64 critical;         /* Guest may not get interrupts if == r1 */
+       __u64 sprg0;
+       __u64 sprg1;
+       __u64 sprg2;
+       __u64 sprg3;
+       __u64 srr0;
+       __u64 srr1;
+       __u64 dar;              /* dear on BookE */
+       __u64 msr;
+       __u32 dsisr;
+       __u32 int_pending;      /* Tells the guest if we have an interrupt */
+       __u32 sr[16];
+       __u32 mas0;
+       __u32 mas1;
+       __u64 mas7_3;
+       __u64 mas2;
+       __u32 mas4;
+       __u32 mas6;
+       __u32 esr;
+       __u32 pir;
+       /*
+        * SPRG4-7 are user-readable, so we can only keep these consistent
+        * between the shared area and the real registers when there's an
+        * intervening exit to KVM.  This also applies to SPRG3 on some
+        * chips.
+        *
+        * This suffices for access by guest userspace, since in PR-mode
+        * KVM, an exit must occur when changing the guest's MSR[PR].
+        * If the guest kernel writes to SPRG3-7 via the shared area, it
+        * must also use the shared area for reading while in kernel space.
+        */
+       __u64 sprg4;
+       __u64 sprg5;
+       __u64 sprg6;
+       __u64 sprg7;
+ };
+ #define KVM_SC_MAGIC_R0               0x4b564d21 /* "KVM!" */
++
++#define KVM_HCALL_TOKEN(num)     _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
++
++#include <uapi/asm/epapr_hcalls.h>
+ #define KVM_FEATURE_MAGIC_PAGE        1
+ #define KVM_MAGIC_FEAT_SR             (1 << 0)
+ /* MASn, ESR, PIR, and high SPRGs */
+ #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7  (1 << 1)
+ #endif /* _UAPI__POWERPC_KVM_PARA_H__ */
index e597dde124e8736d09caf15a8f1012c443b820cc,19e4288d84865e7b97f1a22a6fe099037b46aeef..78b8766fd79e4605f96a103c6562ff62caa37c5e
@@@ -43,7 -43,6 +43,7 @@@
  #include <asm/dcr.h>
  #include <asm/ftrace.h>
  #include <asm/switch_to.h>
 +#include <asm/epapr_hcalls.h>
  
  #ifdef CONFIG_PPC32
  extern void transfer_to_handler(void);
@@@ -95,7 -94,6 +95,6 @@@ EXPORT_SYMBOL(pci_dram_offset)
  #endif /* CONFIG_PCI */
  
  EXPORT_SYMBOL(start_thread);
- EXPORT_SYMBOL(kernel_thread);
  
  EXPORT_SYMBOL(giveup_fpu);
  #ifdef CONFIG_ALTIVEC
@@@ -193,7 -191,3 +192,7 @@@ EXPORT_SYMBOL(__arch_hweight64)
  #ifdef CONFIG_PPC_BOOK3S_64
  EXPORT_SYMBOL_GPL(mmu_psize_defs);
  #endif
 +
 +#ifdef CONFIG_EPAPR_PARAVIRT
 +EXPORT_SYMBOL(epapr_hypercall_start);
 +#endif
index 9fac0101ffb98ca99139d5ff8ed547cfc9e2461f,b0f625a33345a6e7cf690d21b041507adae5f61f..d1107a9b5d130d08dc8f4e20ec6e8f697a3c285e
@@@ -141,7 -141,7 +141,7 @@@ extern char etext[]
  int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
  {
        pfn_t hpaddr;
-       u64 va;
+       u64 vpn;
        u64 vsid;
        struct kvmppc_sid_map *map;
        volatile u32 *pteg;
        BUG_ON(!map);
  
        vsid = map->host_vsid;
-       va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
+       vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
+               ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
  next_pteg:
        if (rr == 16) {
                primary = !primary;
        dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
                    orig_pte->may_write ? 'w' : '-',
                    orig_pte->may_execute ? 'x' : '-',
-                   orig_pte->eaddr, (ulong)pteg, va,
+                   orig_pte->eaddr, (ulong)pteg, vpn,
                    orig_pte->vpage, hpaddr);
  
        pte->slot = (ulong)&pteg[rr];
-       pte->host_va = va;
+       pte->host_vpn = vpn;
        pte->pte = *orig_pte;
        pte->pfn = hpaddr >> PAGE_SHIFT;
  
        kvmppc_mmu_hpte_cache_map(vcpu, pte);
  
 +      kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
  out:
        return r;
  }
index 6b2c80e496813c7a81b11da0b21769b7dd1212e9,4d72f9ebc554bddf331ac9d9201b10cd0d3e484f..d0205a545a81913d1a0698884ee9d6a69ae75075
@@@ -33,7 -33,7 +33,7 @@@
  
  void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  {
-       ppc_md.hpte_invalidate(pte->slot, pte->host_va,
+       ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
                               MMU_PAGE_4K, MMU_SEGSIZE_256M,
                               false);
  }
@@@ -80,8 -80,9 +80,9 @@@ static struct kvmppc_sid_map *find_sid_
  
  int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
  {
+       unsigned long vpn;
        pfn_t hpaddr;
-       ulong hash, hpteg, va;
+       ulong hash, hpteg;
        u64 vsid;
        int ret;
        int rflags = 0x192;
        }
  
        vsid = map->host_vsid;
-       va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
+       vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
  
        if (!orig_pte->may_write)
                rflags |= HPTE_R_PP;
        else
                kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
  
-       hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
+       hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
  
  map_again:
        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
                        goto out;
                }
  
-       ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
+       ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
+                                MMU_PAGE_4K, MMU_SEGSIZE_256M);
  
        if (ret < 0) {
                /* If we couldn't map a primary PTE, try a secondary */
        } else {
                struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
  
-               trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte);
+               trace_kvm_book3s_64_mmu_map(rflags, hpteg,
+                                           vpn, hpaddr, orig_pte);
  
                /* The ppc_md code may give us a secondary entry even though we
                   asked for a primary. Fix up. */
                }
  
                pte->slot = hpteg + (ret & 7);
-               pte->host_va = va;
+               pte->host_vpn = vpn;
                pte->pte = *orig_pte;
                pte->pfn = hpaddr >> PAGE_SHIFT;
  
                kvmppc_mmu_hpte_cache_map(vcpu, pte);
        }
 +      kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
  
  out:
        return r;
index 9a15da76e56bed6aa702e5d0682abc1e6983867e,721d4603a23587bf692c887a7c10c9aca73c2ccb..cd8025db3017ec52a95f61e6c6ef97f7df8ea4ce
@@@ -30,7 -30,6 +30,7 @@@
  #include <linux/cpumask.h>
  #include <linux/spinlock.h>
  #include <linux/page-flags.h>
 +#include <linux/srcu.h>
  
  #include <asm/reg.h>
  #include <asm/cputable.h>
@@@ -143,22 -142,6 +143,22 @@@ static void init_vpa(struct kvm_vcpu *v
        vpa->yield_count = 1;
  }
  
 +static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
 +                 unsigned long addr, unsigned long len)
 +{
 +      /* check address is cacheline aligned */
 +      if (addr & (L1_CACHE_BYTES - 1))
 +              return -EINVAL;
 +      spin_lock(&vcpu->arch.vpa_update_lock);
 +      if (v->next_gpa != addr || v->len != len) {
 +              v->next_gpa = addr;
 +              v->len = addr ? len : 0;
 +              v->update_pending = 1;
 +      }
 +      spin_unlock(&vcpu->arch.vpa_update_lock);
 +      return 0;
 +}
 +
  /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
  struct reg_vpa {
        u32 dummy;
@@@ -337,8 -320,7 +337,8 @@@ static void kvmppc_update_vpas(struct k
        spin_lock(&vcpu->arch.vpa_update_lock);
        if (vcpu->arch.vpa.update_pending) {
                kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
 -              init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
 +              if (vcpu->arch.vpa.pinned_addr)
 +                      init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
        }
        if (vcpu->arch.dtl.update_pending) {
                kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
@@@ -384,16 -366,13 +384,16 @@@ int kvmppc_pseries_do_hcall(struct kvm_
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
        unsigned long target, ret = H_SUCCESS;
        struct kvm_vcpu *tvcpu;
 +      int idx;
  
        switch (req) {
        case H_ENTER:
 +              idx = srcu_read_lock(&vcpu->kvm->srcu);
                ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
                                              kvmppc_get_gpr(vcpu, 5),
                                              kvmppc_get_gpr(vcpu, 6),
                                              kvmppc_get_gpr(vcpu, 7));
 +              srcu_read_unlock(&vcpu->kvm->srcu, idx);
                break;
        case H_CEDE:
                break;
@@@ -432,7 -411,6 +432,7 @@@ static int kvmppc_handle_exit(struct kv
                              struct task_struct *tsk)
  {
        int r = RESUME_HOST;
 +      int srcu_idx;
  
        vcpu->stat.sum_exits++;
  
         * have been handled already.
         */
        case BOOK3S_INTERRUPT_H_DATA_STORAGE:
 +              srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = kvmppc_book3s_hv_page_fault(run, vcpu,
                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
 +              srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
                break;
        case BOOK3S_INTERRUPT_H_INST_STORAGE:
 +              srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = kvmppc_book3s_hv_page_fault(run, vcpu,
                                kvmppc_get_pc(vcpu), 0);
 +              srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
                break;
        /*
         * This occurs if the guest executes an illegal instruction.
@@@ -561,175 -535,36 +561,175 @@@ int kvm_arch_vcpu_ioctl_set_sregs(struc
        return 0;
  }
  
 -int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 +int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
  {
 -      int r = -EINVAL;
 +      int r = 0;
 +      long int i;
  
 -      switch (reg->id) {
 +      switch (id) {
        case KVM_REG_PPC_HIOR:
 -              r = put_user(0, (u64 __user *)reg->addr);
 +              *val = get_reg_val(id, 0);
 +              break;
 +      case KVM_REG_PPC_DABR:
 +              *val = get_reg_val(id, vcpu->arch.dabr);
 +              break;
 +      case KVM_REG_PPC_DSCR:
 +              *val = get_reg_val(id, vcpu->arch.dscr);
 +              break;
 +      case KVM_REG_PPC_PURR:
 +              *val = get_reg_val(id, vcpu->arch.purr);
 +              break;
 +      case KVM_REG_PPC_SPURR:
 +              *val = get_reg_val(id, vcpu->arch.spurr);
 +              break;
 +      case KVM_REG_PPC_AMR:
 +              *val = get_reg_val(id, vcpu->arch.amr);
 +              break;
 +      case KVM_REG_PPC_UAMOR:
 +              *val = get_reg_val(id, vcpu->arch.uamor);
 +              break;
 +      case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
 +              i = id - KVM_REG_PPC_MMCR0;
 +              *val = get_reg_val(id, vcpu->arch.mmcr[i]);
 +              break;
 +      case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
 +              i = id - KVM_REG_PPC_PMC1;
 +              *val = get_reg_val(id, vcpu->arch.pmc[i]);
 +              break;
 +#ifdef CONFIG_VSX
 +      case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 +              if (cpu_has_feature(CPU_FTR_VSX)) {
 +                      /* VSX => FP reg i is stored in arch.vsr[2*i] */
 +                      long int i = id - KVM_REG_PPC_FPR0;
 +                      *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
 +              } else {
 +                      /* let generic code handle it */
 +                      r = -EINVAL;
 +              }
 +              break;
 +      case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
 +              if (cpu_has_feature(CPU_FTR_VSX)) {
 +                      long int i = id - KVM_REG_PPC_VSR0;
 +                      val->vsxval[0] = vcpu->arch.vsr[2 * i];
 +                      val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
 +              } else {
 +                      r = -ENXIO;
 +              }
 +              break;
 +#endif /* CONFIG_VSX */
 +      case KVM_REG_PPC_VPA_ADDR:
 +              spin_lock(&vcpu->arch.vpa_update_lock);
 +              *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
 +              spin_unlock(&vcpu->arch.vpa_update_lock);
 +              break;
 +      case KVM_REG_PPC_VPA_SLB:
 +              spin_lock(&vcpu->arch.vpa_update_lock);
 +              val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
 +              val->vpaval.length = vcpu->arch.slb_shadow.len;
 +              spin_unlock(&vcpu->arch.vpa_update_lock);
 +              break;
 +      case KVM_REG_PPC_VPA_DTL:
 +              spin_lock(&vcpu->arch.vpa_update_lock);
 +              val->vpaval.addr = vcpu->arch.dtl.next_gpa;
 +              val->vpaval.length = vcpu->arch.dtl.len;
 +              spin_unlock(&vcpu->arch.vpa_update_lock);
                break;
        default:
 +              r = -EINVAL;
                break;
        }
  
        return r;
  }
  
 -int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
 +int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
  {
 -      int r = -EINVAL;
 +      int r = 0;
 +      long int i;
 +      unsigned long addr, len;
  
 -      switch (reg->id) {
 +      switch (id) {
        case KVM_REG_PPC_HIOR:
 -      {
 -              u64 hior;
                /* Only allow this to be set to zero */
 -              r = get_user(hior, (u64 __user *)reg->addr);
 -              if (!r && (hior != 0))
 +              if (set_reg_val(id, *val))
                        r = -EINVAL;
                break;
 -      }
 +      case KVM_REG_PPC_DABR:
 +              vcpu->arch.dabr = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_DSCR:
 +              vcpu->arch.dscr = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_PURR:
 +              vcpu->arch.purr = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_SPURR:
 +              vcpu->arch.spurr = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_AMR:
 +              vcpu->arch.amr = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_UAMOR:
 +              vcpu->arch.uamor = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
 +              i = id - KVM_REG_PPC_MMCR0;
 +              vcpu->arch.mmcr[i] = set_reg_val(id, *val);
 +              break;
 +      case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
 +              i = id - KVM_REG_PPC_PMC1;
 +              vcpu->arch.pmc[i] = set_reg_val(id, *val);
 +              break;
 +#ifdef CONFIG_VSX
 +      case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
 +              if (cpu_has_feature(CPU_FTR_VSX)) {
 +                      /* VSX => FP reg i is stored in arch.vsr[2*i] */
 +                      long int i = id - KVM_REG_PPC_FPR0;
 +                      vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
 +              } else {
 +                      /* let generic code handle it */
 +                      r = -EINVAL;
 +              }
 +              break;
 +      case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
 +              if (cpu_has_feature(CPU_FTR_VSX)) {
 +                      long int i = id - KVM_REG_PPC_VSR0;
 +                      vcpu->arch.vsr[2 * i] = val->vsxval[0];
 +                      vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
 +              } else {
 +                      r = -ENXIO;
 +              }
 +              break;
 +#endif /* CONFIG_VSX */
 +      case KVM_REG_PPC_VPA_ADDR:
 +              addr = set_reg_val(id, *val);
 +              r = -EINVAL;
 +              if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
 +                            vcpu->arch.dtl.next_gpa))
 +                      break;
 +              r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
 +              break;
 +      case KVM_REG_PPC_VPA_SLB:
 +              addr = val->vpaval.addr;
 +              len = val->vpaval.length;
 +              r = -EINVAL;
 +              if (addr && !vcpu->arch.vpa.next_gpa)
 +                      break;
 +              r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
 +              break;
 +      case KVM_REG_PPC_VPA_DTL:
 +              addr = val->vpaval.addr;
 +              len = val->vpaval.length;
 +              r = -EINVAL;
 +              if (len < sizeof(struct dtl_entry))
 +                      break;
 +              if (addr && !vcpu->arch.vpa.next_gpa)
 +                      break;
 +              len -= len % sizeof(struct dtl_entry);
 +              r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
 +              break;
        default:
 +              r = -EINVAL;
                break;
        }
  
@@@ -862,11 -697,17 +862,11 @@@ extern void xics_wake_cpu(int cpu)
  static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
                                   struct kvm_vcpu *vcpu)
  {
 -      struct kvm_vcpu *v;
 -
        if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
                return;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
        --vc->n_runnable;
        ++vc->n_busy;
 -      /* decrement the physical thread id of each following vcpu */
 -      v = vcpu;
 -      list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
 -              --v->arch.ptid;
        list_del(&vcpu->arch.run_list);
  }
  
@@@ -979,7 -820,6 +979,7 @@@ static int kvmppc_run_core(struct kvmpp
        long ret;
        u64 now;
        int ptid, i, need_vpa_update;
 +      int srcu_idx;
  
        /* don't start if any threads have a signal pending */
        need_vpa_update = 0;
        spin_unlock(&vc->lock);
  
        kvm_guest_enter();
 +
 +      srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
 +
        __kvmppc_vcore_entry(NULL, vcpu0);
        for (i = 0; i < threads_per_core; ++i)
                kvmppc_release_hwthread(vc->pcpu + i);
        vc->vcore_state = VCORE_EXITING;
        spin_unlock(&vc->lock);
  
 +      srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
 +
        /* make sure updates to secondary vcpu structs are visible now */
        smp_mb();
        kvm_guest_exit();
@@@ -1348,7 -1183,7 +1348,7 @@@ static const struct vm_operations_struc
  
  static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
  {
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &kvm_rma_vm_ops;
        return 0;
  }
@@@ -1438,7 -1273,7 +1438,7 @@@ int kvm_vm_ioctl_get_dirty_log(struct k
        n = kvm_dirty_bitmap_bytes(memslot);
        memset(memslot->dirty_bitmap, 0, n);
  
 -      r = kvmppc_hv_get_dirty_log(kvm, memslot);
 +      r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
        if (r)
                goto out;
  
@@@ -1464,86 -1299,53 +1464,86 @@@ static unsigned long slb_pgsize_encodin
        return senc;
  }
  
 -int kvmppc_core_prepare_memory_region(struct kvm *kvm,
 -                              struct kvm_userspace_memory_region *mem)
 +static void unpin_slot(struct kvm_memory_slot *memslot)
  {
 -      unsigned long npages;
 -      unsigned long *phys;
 +      unsigned long *physp;
 +      unsigned long j, npages, pfn;
 +      struct page *page;
  
 -      /* Allocate a slot_phys array */
 -      phys = kvm->arch.slot_phys[mem->slot];
 -      if (!kvm->arch.using_mmu_notifiers && !phys) {
 -              npages = mem->memory_size >> PAGE_SHIFT;
 -              phys = vzalloc(npages * sizeof(unsigned long));
 -              if (!phys)
 -                      return -ENOMEM;
 -              kvm->arch.slot_phys[mem->slot] = phys;
 -              kvm->arch.slot_npages[mem->slot] = npages;
 +      physp = memslot->arch.slot_phys;
 +      npages = memslot->npages;
 +      if (!physp)
 +              return;
 +      for (j = 0; j < npages; j++) {
 +              if (!(physp[j] & KVMPPC_GOT_PAGE))
 +                      continue;
 +              pfn = physp[j] >> PAGE_SHIFT;
 +              page = pfn_to_page(pfn);
 +              SetPageDirty(page);
 +              put_page(page);
        }
 +}
 +
 +void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
 +                            struct kvm_memory_slot *dont)
 +{
 +      if (!dont || free->arch.rmap != dont->arch.rmap) {
 +              vfree(free->arch.rmap);
 +              free->arch.rmap = NULL;
 +      }
 +      if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
 +              unpin_slot(free);
 +              vfree(free->arch.slot_phys);
 +              free->arch.slot_phys = NULL;
 +      }
 +}
 +
 +int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
 +                             unsigned long npages)
 +{
 +      slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
 +      if (!slot->arch.rmap)
 +              return -ENOMEM;
 +      slot->arch.slot_phys = NULL;
  
        return 0;
  }
  
 -static void unpin_slot(struct kvm *kvm, int slot_id)
 +int kvmppc_core_prepare_memory_region(struct kvm *kvm,
 +                                    struct kvm_memory_slot *memslot,
 +                                    struct kvm_userspace_memory_region *mem)
  {
 -      unsigned long *physp;
 -      unsigned long j, npages, pfn;
 -      struct page *page;
 +      unsigned long *phys;
  
 -      physp = kvm->arch.slot_phys[slot_id];
 -      npages = kvm->arch.slot_npages[slot_id];
 -      if (physp) {
 -              spin_lock(&kvm->arch.slot_phys_lock);
 -              for (j = 0; j < npages; j++) {
 -                      if (!(physp[j] & KVMPPC_GOT_PAGE))
 -                              continue;
 -                      pfn = physp[j] >> PAGE_SHIFT;
 -                      page = pfn_to_page(pfn);
 -                      SetPageDirty(page);
 -                      put_page(page);
 -              }
 -              kvm->arch.slot_phys[slot_id] = NULL;
 -              spin_unlock(&kvm->arch.slot_phys_lock);
 -              vfree(physp);
 +      /* Allocate a slot_phys array if needed */
 +      phys = memslot->arch.slot_phys;
 +      if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
 +              phys = vzalloc(memslot->npages * sizeof(unsigned long));
 +              if (!phys)
 +                      return -ENOMEM;
 +              memslot->arch.slot_phys = phys;
        }
 +
 +      return 0;
  }
  
  void kvmppc_core_commit_memory_region(struct kvm *kvm,
 -                              struct kvm_userspace_memory_region *mem)
 +                                    struct kvm_userspace_memory_region *mem,
 +                                    struct kvm_memory_slot old)
  {
 +      unsigned long npages = mem->memory_size >> PAGE_SHIFT;
 +      struct kvm_memory_slot *memslot;
 +
 +      if (npages && old.npages) {
 +              /*
 +               * If modifying a memslot, reset all the rmap dirty bits.
 +               * If this is a new memslot, we don't need to do anything
 +               * since the rmap array starts out as all zeroes,
 +               * i.e. no pages are dirty.
 +               */
 +              memslot = id_to_memslot(kvm->memslots, mem->slot);
 +              kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
 +      }
  }
  
  static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
        unsigned long rmls;
        unsigned long *physp;
        unsigned long i, npages;
 +      int srcu_idx;
  
        mutex_lock(&kvm->lock);
        if (kvm->arch.rma_setup_done)
        }
  
        /* Look up the memslot for guest physical address 0 */
 +      srcu_idx = srcu_read_lock(&kvm->srcu);
        memslot = gfn_to_memslot(kvm, 0);
  
        /* We must have some memory at 0 by now */
        err = -EINVAL;
        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
 -              goto out;
 +              goto out_srcu;
  
        /* Look up the VMA for the start of this memory slot */
        hva = memslot->userspace_addr;
                err = -EPERM;
                if (cpu_has_feature(CPU_FTR_ARCH_201)) {
                        pr_err("KVM: CPU requires an RMO\n");
 -                      goto out;
 +                      goto out_srcu;
                }
  
                /* We can handle 4k, 64k or 16M pages in the VRMA */
                err = -EINVAL;
                if (!(psize == 0x1000 || psize == 0x10000 ||
                      psize == 0x1000000))
 -                      goto out;
 +                      goto out_srcu;
  
                /* Update VRMASD field in the LPCR */
                senc = slb_pgsize_encoding(psize);
                err = -EINVAL;
                if (rmls < 0) {
                        pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
 -                      goto out;
 +                      goto out_srcu;
                }
                atomic_inc(&ri->use_count);
                kvm->arch.rma = ri;
                /* Initialize phys addrs of pages in RMO */
                npages = ri->npages;
                porder = __ilog2(npages);
 -              physp = kvm->arch.slot_phys[memslot->id];
 -              spin_lock(&kvm->arch.slot_phys_lock);
 -              for (i = 0; i < npages; ++i)
 -                      physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
 -              spin_unlock(&kvm->arch.slot_phys_lock);
 +              physp = memslot->arch.slot_phys;
 +              if (physp) {
 +                      if (npages > memslot->npages)
 +                              npages = memslot->npages;
 +                      spin_lock(&kvm->arch.slot_phys_lock);
 +                      for (i = 0; i < npages; ++i)
 +                              physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
 +                                      porder;
 +                      spin_unlock(&kvm->arch.slot_phys_lock);
 +              }
        }
  
        /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
        smp_wmb();
        kvm->arch.rma_setup_done = 1;
        err = 0;
 + out_srcu:
 +      srcu_read_unlock(&kvm->srcu, srcu_idx);
   out:
        mutex_unlock(&kvm->lock);
        return err;
@@@ -1735,6 -1528,12 +1735,6 @@@ int kvmppc_core_init_vm(struct kvm *kvm
  
  void kvmppc_core_destroy_vm(struct kvm *kvm)
  {
 -      unsigned long i;
 -
 -      if (!kvm->arch.using_mmu_notifiers)
 -              for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
 -                      unpin_slot(kvm, i);
 -
        if (kvm->arch.rma) {
                kvm_release_rma(kvm->arch.rma);
                kvm->arch.rma = NULL;
diff --combined arch/powerpc/kvm/trace.h
index 519aba8bb3d38e5cf227c346cc57f4d855a3ae6c,ddb6a2149d4460c067da161de4672f3cbaf4a97a..e326489a54205b30bc8c111c442ca19ab6d5c831
@@@ -31,126 -31,6 +31,126 @@@ TRACE_EVENT(kvm_ppc_instr
                  __entry->inst, __entry->pc, __entry->emulate)
  );
  
 +#ifdef CONFIG_PPC_BOOK3S
 +#define kvm_trace_symbol_exit \
 +      {0x100, "SYSTEM_RESET"}, \
 +      {0x200, "MACHINE_CHECK"}, \
 +      {0x300, "DATA_STORAGE"}, \
 +      {0x380, "DATA_SEGMENT"}, \
 +      {0x400, "INST_STORAGE"}, \
 +      {0x480, "INST_SEGMENT"}, \
 +      {0x500, "EXTERNAL"}, \
 +      {0x501, "EXTERNAL_LEVEL"}, \
 +      {0x502, "EXTERNAL_HV"}, \
 +      {0x600, "ALIGNMENT"}, \
 +      {0x700, "PROGRAM"}, \
 +      {0x800, "FP_UNAVAIL"}, \
 +      {0x900, "DECREMENTER"}, \
 +      {0x980, "HV_DECREMENTER"}, \
 +      {0xc00, "SYSCALL"}, \
 +      {0xd00, "TRACE"}, \
 +      {0xe00, "H_DATA_STORAGE"}, \
 +      {0xe20, "H_INST_STORAGE"}, \
 +      {0xe40, "H_EMUL_ASSIST"}, \
 +      {0xf00, "PERFMON"}, \
 +      {0xf20, "ALTIVEC"}, \
 +      {0xf40, "VSX"}
 +#else
 +#define kvm_trace_symbol_exit \
 +      {0, "CRITICAL"}, \
 +      {1, "MACHINE_CHECK"}, \
 +      {2, "DATA_STORAGE"}, \
 +      {3, "INST_STORAGE"}, \
 +      {4, "EXTERNAL"}, \
 +      {5, "ALIGNMENT"}, \
 +      {6, "PROGRAM"}, \
 +      {7, "FP_UNAVAIL"}, \
 +      {8, "SYSCALL"}, \
 +      {9, "AP_UNAVAIL"}, \
 +      {10, "DECREMENTER"}, \
 +      {11, "FIT"}, \
 +      {12, "WATCHDOG"}, \
 +      {13, "DTLB_MISS"}, \
 +      {14, "ITLB_MISS"}, \
 +      {15, "DEBUG"}, \
 +      {32, "SPE_UNAVAIL"}, \
 +      {33, "SPE_FP_DATA"}, \
 +      {34, "SPE_FP_ROUND"}, \
 +      {35, "PERFORMANCE_MONITOR"}, \
 +      {36, "DOORBELL"}, \
 +      {37, "DOORBELL_CRITICAL"}, \
 +      {38, "GUEST_DBELL"}, \
 +      {39, "GUEST_DBELL_CRIT"}, \
 +      {40, "HV_SYSCALL"}, \
 +      {41, "HV_PRIV"}
 +#endif
 +
 +TRACE_EVENT(kvm_exit,
 +      TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
 +      TP_ARGS(exit_nr, vcpu),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   exit_nr         )
 +              __field(        unsigned long,  pc              )
 +              __field(        unsigned long,  msr             )
 +              __field(        unsigned long,  dar             )
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +              __field(        unsigned long,  srr1            )
 +#endif
 +              __field(        unsigned long,  last_inst       )
 +      ),
 +
 +      TP_fast_assign(
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +              struct kvmppc_book3s_shadow_vcpu *svcpu;
 +#endif
 +              __entry->exit_nr        = exit_nr;
 +              __entry->pc             = kvmppc_get_pc(vcpu);
 +              __entry->dar            = kvmppc_get_fault_dar(vcpu);
 +              __entry->msr            = vcpu->arch.shared->msr;
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +              svcpu = svcpu_get(vcpu);
 +              __entry->srr1           = svcpu->shadow_srr1;
 +              svcpu_put(svcpu);
 +#endif
 +              __entry->last_inst      = vcpu->arch.last_inst;
 +      ),
 +
 +      TP_printk("exit=%s"
 +              " | pc=0x%lx"
 +              " | msr=0x%lx"
 +              " | dar=0x%lx"
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +              " | srr1=0x%lx"
 +#endif
 +              " | last_inst=0x%lx"
 +              ,
 +              __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
 +              __entry->pc,
 +              __entry->msr,
 +              __entry->dar,
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +              __entry->srr1,
 +#endif
 +              __entry->last_inst
 +              )
 +);
 +
 +TRACE_EVENT(kvm_unmap_hva,
 +      TP_PROTO(unsigned long hva),
 +      TP_ARGS(hva),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned long,  hva             )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->hva            = hva;
 +      ),
 +
 +      TP_printk("unmap hva 0x%lx\n", __entry->hva)
 +);
 +
  TRACE_EVENT(kvm_stlb_inval,
        TP_PROTO(unsigned int stlb_index),
        TP_ARGS(stlb_index),
@@@ -218,31 -98,41 +218,31 @@@ TRACE_EVENT(kvm_gtlb_write
                __entry->word1, __entry->word2)
  );
  
 -
 -/*************************************************************************
 - *                         Book3S trace points                           *
 - *************************************************************************/
 -
 -#ifdef CONFIG_KVM_BOOK3S_PR
 -
 -TRACE_EVENT(kvm_book3s_exit,
 -      TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
 -      TP_ARGS(exit_nr, vcpu),
 +TRACE_EVENT(kvm_check_requests,
 +      TP_PROTO(struct kvm_vcpu *vcpu),
 +      TP_ARGS(vcpu),
  
        TP_STRUCT__entry(
 -              __field(        unsigned int,   exit_nr         )
 -              __field(        unsigned long,  pc              )
 -              __field(        unsigned long,  msr             )
 -              __field(        unsigned long,  dar             )
 -              __field(        unsigned long,  srr1            )
 +              __field(        __u32,  cpu_nr          )
 +              __field(        __u32,  requests        )
        ),
  
        TP_fast_assign(
 -              struct kvmppc_book3s_shadow_vcpu *svcpu;
 -              __entry->exit_nr        = exit_nr;
 -              __entry->pc             = kvmppc_get_pc(vcpu);
 -              __entry->dar            = kvmppc_get_fault_dar(vcpu);
 -              __entry->msr            = vcpu->arch.shared->msr;
 -              svcpu = svcpu_get(vcpu);
 -              __entry->srr1           = svcpu->shadow_srr1;
 -              svcpu_put(svcpu);
 +              __entry->cpu_nr         = vcpu->vcpu_id;
 +              __entry->requests       = vcpu->requests;
        ),
  
 -      TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
 -                __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar,
 -                __entry->srr1)
 +      TP_printk("vcpu=%x requests=%x",
 +              __entry->cpu_nr, __entry->requests)
  );
  
 +
 +/*************************************************************************
 + *                         Book3S trace points                           *
 + *************************************************************************/
 +
 +#ifdef CONFIG_KVM_BOOK3S_PR
 +
  TRACE_EVENT(kvm_book3s_reenter,
        TP_PROTO(int r, struct kvm_vcpu *vcpu),
        TP_ARGS(r, vcpu),
@@@ -299,7 -189,7 +299,7 @@@ TRACE_EVENT(kvm_book3s_mmu_map
        TP_ARGS(pte),
  
        TP_STRUCT__entry(
-               __field(        u64,            host_v        )
+               __field(        u64,            host_vpn        )
                __field(        u64,            pfn             )
                __field(        ulong,          eaddr           )
                __field(        u64,            vpage           )
        ),
  
        TP_fast_assign(
-               __entry->host_va        = pte->host_va;
+               __entry->host_vpn       = pte->host_vpn;
                __entry->pfn            = pte->pfn;
                __entry->eaddr          = pte->pte.eaddr;
                __entry->vpage          = pte->pte.vpage;
                                          (pte->pte.may_execute ? 0x1 : 0);
        ),
  
-       TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
-                 __entry->host_va, __entry->pfn, __entry->eaddr,
+       TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
+                 __entry->host_vpn, __entry->pfn, __entry->eaddr,
                  __entry->vpage, __entry->raddr, __entry->flags)
  );
  
@@@ -328,7 -218,7 +328,7 @@@ TRACE_EVENT(kvm_book3s_mmu_invalidate
        TP_ARGS(pte),
  
        TP_STRUCT__entry(
-               __field(        u64,            host_v        )
+               __field(        u64,            host_vpn        )
                __field(        u64,            pfn             )
                __field(        ulong,          eaddr           )
                __field(        u64,            vpage           )
        ),
  
        TP_fast_assign(
-               __entry->host_va        = pte->host_va;
+               __entry->host_vpn       = pte->host_vpn;
                __entry->pfn            = pte->pfn;
                __entry->eaddr          = pte->pte.eaddr;
                __entry->vpage          = pte->pte.vpage;
        ),
  
        TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
-                 __entry->host_va, __entry->pfn, __entry->eaddr,
+                 __entry->host_vpn, __entry->pfn, __entry->eaddr,
                  __entry->vpage, __entry->raddr, __entry->flags)
  );
  
@@@ -505,44 -395,6 +505,44 @@@ TRACE_EVENT(kvm_booke206_gtlb_write
                __entry->mas2, __entry->mas7_3)
  );
  
 +TRACE_EVENT(kvm_booke206_ref_release,
 +      TP_PROTO(__u64 pfn, __u32 flags),
 +      TP_ARGS(pfn, flags),
 +
 +      TP_STRUCT__entry(
 +              __field(        __u64,  pfn             )
 +              __field(        __u32,  flags           )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->pfn            = pfn;
 +              __entry->flags          = flags;
 +      ),
 +
 +      TP_printk("pfn=%llx flags=%x",
 +              __entry->pfn, __entry->flags)
 +);
 +
 +TRACE_EVENT(kvm_booke_queue_irqprio,
 +      TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority),
 +      TP_ARGS(vcpu, priority),
 +
 +      TP_STRUCT__entry(
 +              __field(        __u32,  cpu_nr          )
 +              __field(        __u32,  priority                )
 +              __field(        unsigned long,  pending         )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->cpu_nr         = vcpu->vcpu_id;
 +              __entry->priority       = priority;
 +              __entry->pending        = vcpu->arch.pending_exceptions;
 +      ),
 +
 +      TP_printk("vcpu=%x prio=%x pending=%lx",
 +              __entry->cpu_nr, __entry->priority, __entry->pending)
 +);
 +
  #endif
  
  #endif /* _TRACE_KVM_H */
diff --combined arch/x86/kvm/mmu.c
index 6f78fa3a47066e6cb110de5bcb8bf85d02157e53,6f85fe0bf958987f0275f1fa25e2b9e5240aad02..aabb1289ff04c906b1d3dfa442185f7c3dc7d189
@@@ -2497,8 -2497,7 +2497,7 @@@ static void mmu_set_spte(struct kvm_vcp
                }
        }
  
-       if (!is_error_pfn(pfn))
-               kvm_release_pfn_clean(pfn);
+       kvm_release_pfn_clean(pfn);
  }
  
  static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
        mmu_free_roots(vcpu);
  }
  
 +static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
 +{
 +      int bit7;
 +
 +      bit7 = (gpte >> 7) & 1;
 +      return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
 +}
 +
  static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
                                     bool no_dirty_log)
  {
        return gfn_to_pfn_memslot_atomic(slot, gfn);
  }
  
 +static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
 +                                struct kvm_mmu_page *sp, u64 *spte,
 +                                u64 gpte)
 +{
 +      if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
 +              goto no_present;
 +
 +      if (!is_present_gpte(gpte))
 +              goto no_present;
 +
 +      if (!(gpte & PT_ACCESSED_MASK))
 +              goto no_present;
 +
 +      return false;
 +
 +no_present:
 +      drop_spte(vcpu->kvm, spte);
 +      return true;
 +}
 +
  static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
                                    struct kvm_mmu_page *sp,
                                    u64 *start, u64 *end)
@@@ -2728,6 -2699,11 +2727,6 @@@ static void transparent_hugepage_adjust
        }
  }
  
 -static bool mmu_invalid_pfn(pfn_t pfn)
 -{
 -      return unlikely(is_invalid_pfn(pfn));
 -}
 -
  static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
                                pfn_t pfn, unsigned access, int *ret_val)
  {
@@@ -2886,7 -2862,7 +2885,7 @@@ static int nonpaging_map(struct kvm_vcp
                return r;
  
        spin_lock(&vcpu->kvm->mmu_lock);
 -      if (mmu_notifier_retry(vcpu, mmu_seq))
 +      if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        if (likely(!force_pt_level))
@@@ -3355,7 -3331,7 +3354,7 @@@ static int tdp_page_fault(struct kvm_vc
                return r;
  
        spin_lock(&vcpu->kvm->mmu_lock);
 -      if (mmu_notifier_retry(vcpu, mmu_seq))
 +      if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        if (likely(!force_pt_level))
@@@ -3423,6 -3399,14 +3422,6 @@@ static void paging_free(struct kvm_vcp
        nonpaging_free(vcpu);
  }
  
 -static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
 -{
 -      int bit7;
 -
 -      bit7 = (gpte >> 7) & 1;
 -      return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
 -}
 -
  static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
  {
        unsigned mask;
diff --combined include/uapi/linux/kvm.h
index 494a84c37c3eee49b27e657c057096cd2b0ae88d,0a6d6ba44c858959cd2ed6b912751c5f36810989..494a84c37c3eee49b27e657c057096cd2b0ae88d
@@@ -167,15 -167,10 +167,15 @@@ struct kvm_pit_config 
  #define KVM_EXIT_OSI              18
  #define KVM_EXIT_PAPR_HCALL     19
  #define KVM_EXIT_S390_UCONTROL          20
 +#define KVM_EXIT_WATCHDOG         21
  
  /* For KVM_EXIT_INTERNAL_ERROR */
 -#define KVM_INTERNAL_ERROR_EMULATION 1
 -#define KVM_INTERNAL_ERROR_SIMUL_EX 2
 +/* Emulate instruction failed. */
 +#define KVM_INTERNAL_ERROR_EMULATION  1
 +/* Encounter unexpected simultaneous exceptions. */
 +#define KVM_INTERNAL_ERROR_SIMUL_EX   2
 +/* Encounter unexpected vm-exit due to delivery event. */
 +#define KVM_INTERNAL_ERROR_DELIVERY_EV        3
  
  /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
  struct kvm_run {
@@@ -482,8 -477,6 +482,8 @@@ struct kvm_ppc_smmu_info 
        struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
  };
  
 +#define KVM_PPC_PVINFO_FLAGS_EV_IDLE   (1<<0)
 +
  #define KVMIO 0xAE
  
  /* machine type bits, to be used as argument to KVM_CREATE_VM */
  #define KVM_CAP_READONLY_MEM 81
  #endif
  #define KVM_CAP_IRQFD_RESAMPLE 82
 +#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
  
  #ifdef KVM_CAP_IRQ_ROUTING
  
@@@ -856,9 -848,6 +856,9 @@@ struct kvm_s390_ucas_mapping 
  #define KVM_PPC_GET_SMMU_INFO   _IOR(KVMIO,  0xa6, struct kvm_ppc_smmu_info)
  /* Available with KVM_CAP_PPC_ALLOC_HTAB */
  #define KVM_PPC_ALLOCATE_HTAB   _IOWR(KVMIO, 0xa7, __u32)
 +#define KVM_CREATE_SPAPR_TCE    _IOW(KVMIO,  0xa8, struct kvm_create_spapr_tce)
 +/* Available with KVM_CAP_RMA */
 +#define KVM_ALLOCATE_RMA        _IOR(KVMIO,  0xa9, struct kvm_allocate_rma)
  
  /*
   * ioctls for vcpu fds
  /* Available with KVM_CAP_XCRS */
  #define KVM_GET_XCRS            _IOR(KVMIO,  0xa6, struct kvm_xcrs)
  #define KVM_SET_XCRS            _IOW(KVMIO,  0xa7, struct kvm_xcrs)
 -#define KVM_CREATE_SPAPR_TCE    _IOW(KVMIO,  0xa8, struct kvm_create_spapr_tce)
 -/* Available with KVM_CAP_RMA */
 -#define KVM_ALLOCATE_RMA        _IOR(KVMIO,  0xa9, struct kvm_allocate_rma)
  /* Available with KVM_CAP_SW_TLB */
  #define KVM_DIRTY_TLB           _IOW(KVMIO,  0xaa, struct kvm_dirty_tlb)
  /* Available with KVM_CAP_ONE_REG */