]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/commitdiff
sparc64: Improvde documentation and readability of atomic backoff code.
authorDavid S. Miller <davem@davemloft.net>
Sun, 28 Oct 2012 20:04:47 +0000 (13:04 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 28 Oct 2012 20:04:47 +0000 (13:04 -0700)
Document what's going on in asm/backoff.h with a large and descriptive
comment.  Refer to it above the cpu_relax() definition in
asm/processor_64.h

Rename the pause patching section to have "3insn" in it's name like
the other patching sections do.

Based upon feedback from Sam Ravnborg.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/include/asm/backoff.h
arch/sparc/include/asm/processor_64.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/vmlinux.lds.S

index 20f01df0871b18eac5e7007ca2db3605847e3d18..4e02086b839cf2146431265ce77dbb984840d33e 100644 (file)
@@ -1,6 +1,46 @@
 #ifndef _SPARC64_BACKOFF_H
 #define _SPARC64_BACKOFF_H
 
+/* The macros in this file implement an exponential backoff facility
+ * for atomic operations.
+ *
+ * When multiple threads compete on an atomic operation, it is
+ * possible for one thread to be continually denied a successful
+ * completion of the compare-and-swap instruction.  Heavily
+ * threaded cpu implementations like Niagara can compound this
+ * problem even further.
+ *
+ * When an atomic operation fails and needs to be retried, we spin a
+ * certain number of times.  At each subsequent failure of the same
+ * operation we double the spin count, realizing an exponential
+ * backoff.
+ *
+ * When we spin, we try to use an operation that will cause the
+ * current cpu strand to block, and therefore make the core fully
+ * available to any other other runnable strands.  There are two
+ * options, based upon cpu capabilities.
+ *
+ * On all cpus prior to SPARC-T4 we do three dummy reads of the
+ * condition code register.  Each read blocks the strand for something
+ * between 40 and 50 cpu cycles.
+ *
+ * For SPARC-T4 and later we have a special "pause" instruction
+ * available.  This is implemented using writes to register %asr27.
+ * The cpu will block the number of cycles written into the register,
+ * unless a disrupting trap happens first.  SPARC-T4 specifically
+ * implements pause with a granularity of 8 cycles.  Each strand has
+ * an internal pause counter which decrements every 8 cycles.  So the
+ * chip shifts the %asr27 value down by 3 bits, and writes the result
+ * into the pause counter.  If a value smaller than 8 is written, the
+ * chip blocks for 1 cycle.
+ *
+ * To achieve the same amount of backoff as the three %ccr reads give
+ * on earlier chips, we shift the backoff value up by 7 bits.  (Three
+ * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the
+ * whole amount we want to block into the pause register, rather than
+ * loop writing 128 each time.
+ */
+
 #define BACKOFF_LIMIT  (4 * 1024)
 
 #ifdef CONFIG_SMP
@@ -16,7 +56,7 @@
 88:    rd              %ccr, %g0;              \
        rd              %ccr, %g0;              \
        rd              %ccr, %g0;              \
-       .section        .pause_patch,"ax";      \
+       .section        .pause_3insn_patch,"ax";\
        .word           88b;                    \
        sllx            tmp, 7, tmp;            \
        wr              tmp, 0, %asr27;         \
index 9cdf52eec48ab222b551f5f9c94665c21fb0322d..721e25f0e2ea80f19d4a7868791d4653097a2124 100644 (file)
@@ -196,11 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task);
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been choosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
 #define cpu_relax()    asm volatile("\n99:\n\t"                        \
                                     "rd        %%ccr, %%g0\n\t"        \
                                     "rd        %%ccr, %%g0\n\t"        \
                                     "rd        %%ccr, %%g0\n\t"        \
-                                    ".section  .pause_patch,\"ax\"\n\t"\
+                                    ".section  .pause_3insn_patch,\"ax\"\n\t"\
                                     ".word     99b\n\t"                \
                                     "wr        %%g0, 128, %%asr27\n\t" \
                                     "nop\n\t"                          \
index 51742df63c757ee3dd017cbae8a42c1100e8f577..cc3c5cb47cdaa4d18e14533f5078b2dbd5f51649 100644 (file)
@@ -63,8 +63,8 @@ struct pause_patch_entry {
        unsigned int    addr;
        unsigned int    insns[3];
 };
-extern struct pause_patch_entry __pause_patch,
-       __pause_patch_end;
+extern struct pause_patch_entry __pause_3insn_patch,
+       __pause_3insn_patch_end;
 
 extern void __init per_cpu_patch(void);
 extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
index b45cff408de307e5ef123393f2fdca9ce9e9a8a9..0eaf0059aaefa483bdbb228bf38c8154a7d9656b 100644 (file)
@@ -320,8 +320,8 @@ static void __init pause_patch(void)
 {
        struct pause_patch_entry *p;
 
-       p = &__pause_patch;
-       while (p < &__pause_patch_end) {
+       p = &__pause_3insn_patch;
+       while (p < &__pause_3insn_patch_end) {
                unsigned long i, addr = p->addr;
 
                for (i = 0; i < 3; i++) {
index 847f9f793618f097eec67a558d4adbb713e46a47..0bacceb19150ebff3a9e9088b11b66d8f5d4490b 100644 (file)
@@ -132,10 +132,10 @@ SECTIONS
                *(.popc_6insn_patch)
                __popc_6insn_patch_end = .;
        }
-       .pause_patch : {
-               __pause_patch = .;
-               *(.pause_patch)
-               __pause_patch_end = .;
+       .pause_3insn_patch : {
+               __pause_3insn_patch = .;
+               *(.pause_3insn_patch)
+               __pause_3insn_patch_end = .;
        }
        PERCPU_SECTION(SMP_CACHE_BYTES)