]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
authorIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
Kbuild
MAINTAINERS
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cmpxchg.h

diff --cc Kbuild
Simple merge
diff --cc MAINTAINERS
Simple merge
index af7b990054536c0054fe71f8c8cae080f266f502,3b5e28d6458271000d84554e1465ada591b4da57..e321293e0c8955c1bf99a0959e7b8697ae95ed83
@@@ -246,24 -246,15 +246,24 @@@ __LL_SC_PREFIX(arch_atomic64_dec_if_pos
  
        return result;
  }
- __LL_SC_EXPORT(atomic64_dec_if_positive);
+ __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
  
 -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                 \
 -__LL_SC_INLINE unsigned long                                          \
 -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,              \
 -                                   unsigned long old,                 \
 -                                   unsigned long new))                \
 +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)            \
 +__LL_SC_INLINE u##sz                                                  \
 +__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,          \
 +                                       unsigned long old,             \
 +                                       u##sz new))                    \
  {                                                                     \
 -      unsigned long tmp, oldval;                                      \
 +      unsigned long tmp;                                              \
 +      u##sz oldval;                                                   \
 +                                                                      \
 +      /*                                                              \
 +       * Sub-word sizes require explicit casting so that the compare  \
 +       * part of the cmpxchg doesn't end up interpreting non-zero     \
 +       * upper bits of the register containing "old".                 \
 +       */                                                             \
 +      if (sz < 32)                                                    \
 +              old = (u##sz)old;                                       \
                                                                        \
        asm volatile(                                                   \
        "       prfm    pstl1strm, %[v]\n"                              \
Simple merge
index 3f9376f1c409f9c29d51828deb0e12b09bbe5542,e825e61bbfe2f34722f691f2ed47642d27db50e7..e6ea0f42e097b2ed880c51edf46c4244b9f9f502
@@@ -177,29 -177,29 +177,29 @@@ __CMPXCHG_GEN(_mb
        VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
  })
  
- #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
-                                    (unsigned long)(n1), (unsigned long)(n2), \
-                                    ptr1); \
-       __ret; \
+ #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                               \
+ ({                                                                            \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
+                                    (unsigned long)(n1), (unsigned long)(n2),  \
+                                    ptr1);                                     \
+       __ret;                                                                  \
  })
  
- #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
-                                 (unsigned long)(n1), (unsigned long)(n2), \
-                                 ptr1); \
-       __ret; \
+ #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                 \
+ ({                                                                            \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
+                                 (unsigned long)(n1), (unsigned long)(n2),     \
+                                 ptr1);                                        \
+       __ret;                                                                  \
  })
  
 -#define __CMPWAIT_CASE(w, sz, name)                                   \
 -static inline void __cmpwait_case_##name(volatile void *ptr,          \
 -                                       unsigned long val)             \
 +#define __CMPWAIT_CASE(w, sfx, sz)                                    \
 +static inline void __cmpwait_case_##sz(volatile void *ptr,            \
 +                                     unsigned long val)               \
  {                                                                     \
        unsigned long tmp;                                              \
                                                                        \