]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
x86-32: Add support for 64bit get_user()
authorVille Syrjälä <ville.syrjala@linux.intel.com>
Wed, 12 Dec 2012 11:34:03 +0000 (13:34 +0200)
committerH. Peter Anvin <hpa@linux.intel.com>
Thu, 7 Feb 2013 23:07:28 +0000 (15:07 -0800)
Implement __get_user_8() for x86-32. It will return the
64-bit result in edx:eax register pair, and ecx is used
to pass in the address and return the error value.

For consistency, change the register assignment for all
other __get_user_x() variants, so that address is passed in
ecx/rcx, the error value is returned in ecx/rcx, and eax/rax
contains the actual value.

[ hpa: I modified the patch so that it does NOT change the calling
  conventions for the existing callsites, this also means that the code
  is completely unchanged for 64 bits.

  Instead, continue to use eax for address input/error output and use
  the ecx:edx register pair for the output. ]

This is a partial refresh of a patch [1] by Jamie Lokier from
2004. Only the minimal changes to implement 64bit get_user()
were picked from the original patch.

[1] http://article.gmane.org/gmane.linux.kernel/198823

Originally-by: Jamie Lokier <jamie@shareable.org>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link:
http://lkml.kernel.org/r/1355312043-11467-1-git-send-email-ville.syrjala@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/include/asm/uaccess.h
arch/x86/kernel/i386_ksyms_32.c
arch/x86/lib/getuser.S

index 1709801d18ecada506e5709a3dd81a879cb4bfc4..1e963267d44eecd2abd62c6989722fad5c7d1883 100644 (file)
@@ -151,8 +151,15 @@ extern int __get_user_bad(void);
  * On error, the variable @x is set to zero.
  */
 #ifdef CONFIG_X86_32
-#define __get_user_8(__ret_gu, __val_gu, ptr)                          \
-               __get_user_x(X, __ret_gu, __val_gu, ptr)
+#define __get_user_8(ret, x, ptr)                    \
+do {                                                 \
+       register unsigned long long __xx asm("%edx"); \
+       asm volatile("call __get_user_8"              \
+                    : "=a" (ret), "=r" (__xx)        \
+                    : "0" (ptr));                    \
+       (x) = __xx;                                   \
+} while (0)
+
 #else
 #define __get_user_8(__ret_gu, __val_gu, ptr)                          \
                __get_user_x(8, __ret_gu, __val_gu, ptr)
@@ -162,6 +169,7 @@ extern int __get_user_bad(void);
 ({                                                                     \
        int __ret_gu;                                                   \
        unsigned long __val_gu;                                         \
+       unsigned long long __val_gu8;                                   \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
@@ -175,13 +183,16 @@ extern int __get_user_bad(void);
                __get_user_x(4, __ret_gu, __val_gu, ptr);               \
                break;                                                  \
        case 8:                                                         \
-               __get_user_8(__ret_gu, __val_gu, ptr);                  \
+               __get_user_8(__ret_gu, __val_gu8, ptr);                 \
                break;                                                  \
        default:                                                        \
                __get_user_x(X, __ret_gu, __val_gu, ptr);               \
                break;                                                  \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__val_gu;                             \
+       if (sizeof(*(ptr)) == 8)                                        \
+               (x) = (__typeof__(*(ptr)))__val_gu8;                    \
+       else                                                            \
+               (x) = (__typeof__(*(ptr)))__val_gu;                     \
        __ret_gu;                                                       \
 })
 
index 9c3bd4a2050e802a62ff13dfc52e855e641a802b..0fa69127209a4110f14f2e6ff0692a1ae863f1b3 100644 (file)
@@ -26,6 +26,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
 EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
 
 EXPORT_SYMBOL(__put_user_1);
 EXPORT_SYMBOL(__put_user_2);
index 156b9c8046704d7a3119356e12977578f3e3792e..d3bf9f99ca770fb779a547713ce98a3304a1d34a 100644 (file)
  * __get_user_X
  *
  * Inputs:     %[r|e]ax contains the address.
- *             The register is modified, but all changes are undone
- *             before returning because the C code doesn't know about it.
  *
  * Outputs:    %[r|e]ax is error code (0 or -EFAULT)
  *             %[r|e]dx contains zero-extended value
+ *             %ecx contains the high half for 32-bit __get_user_8
  *
  *
  * These functions should not modify any other registers,
@@ -79,22 +78,35 @@ ENTRY(__get_user_4)
        CFI_ENDPROC
 ENDPROC(__get_user_4)
 
-#ifdef CONFIG_X86_64
 ENTRY(__get_user_8)
        CFI_STARTPROC
+#ifdef CONFIG_X86_64
        add $7,%_ASM_AX
        jc bad_get_user
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
-       jae     bad_get_user
+       jae bad_get_user
        ASM_STAC
 4:     movq -7(%_ASM_AX),%_ASM_DX
        xor %eax,%eax
        ASM_CLAC
        ret
+#else
+       add $7,%_ASM_AX
+       jc bad_get_user_8
+       GET_THREAD_INFO(%_ASM_DX)
+       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       jae bad_get_user_8
+       ASM_STAC
+4:     mov -7(%_ASM_AX),%edx
+5:     mov -3(%_ASM_AX),%ecx
+       xor %eax,%eax
+       ASM_CLAC
+       ret
+#endif
        CFI_ENDPROC
 ENDPROC(__get_user_8)
-#endif
+
 
 bad_get_user:
        CFI_STARTPROC
@@ -105,9 +117,24 @@ bad_get_user:
        CFI_ENDPROC
 END(bad_get_user)
 
+#ifdef CONFIG_X86_32
+bad_get_user_8:
+       CFI_STARTPROC
+       xor %edx,%edx
+       xor %ecx,%ecx
+       mov $(-EFAULT),%_ASM_AX
+       ASM_CLAC
+       ret
+       CFI_ENDPROC
+END(bad_get_user_8)
+#endif
+
        _ASM_EXTABLE(1b,bad_get_user)
        _ASM_EXTABLE(2b,bad_get_user)
        _ASM_EXTABLE(3b,bad_get_user)
 #ifdef CONFIG_X86_64
        _ASM_EXTABLE(4b,bad_get_user)
+#else
+       _ASM_EXTABLE(4b,bad_get_user_8)
+       _ASM_EXTABLE(5b,bad_get_user_8)
 #endif