]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
hw_breakpoint: Simplify list/idx mess in toggle_bp_slot() paths
authorOleg Nesterov <oleg@redhat.com>
Thu, 20 Jun 2013 15:50:11 +0000 (17:50 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 20 Jun 2013 15:58:55 +0000 (17:58 +0200)
The enable/disable logic in toggle_bp_slot() is not symmetrical
and imho very confusing. "old_count" in toggle_bp_task_slot() is
actually new_count because this bp was already removed from the
list.

Change toggle_bp_slot() to always call list_add/list_del after
toggle_bp_task_slot(). This way old_idx is task_bp_pinned() and
this entry should be decremented, new_idx is +/-weight and we
need to increment this element. The code/logic looks obvious.

Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155011.GA6330@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/events/hw_breakpoint.c

index ef8ebe560949266fef5d343d2e4b5447b2817338..dee0148dcf543a59c01f575d50d74670915cd0bb 100644 (file)
@@ -185,26 +185,20 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight)
 static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
                                enum bp_type_idx type, int weight)
 {
-       unsigned int *tsk_pinned;
-       int old_count = 0;
-       int old_idx = 0;
-       int idx = 0;
-
-       old_count = task_bp_pinned(cpu, bp, type);
-       old_idx = old_count - 1;
-       idx = old_idx + weight;
-
-       /* tsk_pinned[n] is the number of tasks having n breakpoints */
-       tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
-       if (enable) {
-               tsk_pinned[idx]++;
-               if (old_count > 0)
-                       tsk_pinned[old_idx]--;
-       } else {
-               tsk_pinned[idx]--;
-               if (old_count > 0)
-                       tsk_pinned[old_idx]++;
-       }
+       /* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */
+       unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
+       int old_idx, new_idx;
+
+       old_idx = task_bp_pinned(cpu, bp, type) - 1;
+       if (enable)
+               new_idx = old_idx + weight;
+       else
+               new_idx = old_idx - weight;
+
+       if (old_idx >= 0)
+               tsk_pinned[old_idx]--;
+       if (new_idx >= 0)
+               tsk_pinned[new_idx]++;
 }
 
 /*
@@ -228,10 +222,6 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
        }
 
        /* Pinned counter task profiling */
-
-       if (!enable)
-               list_del(&bp->hw.bp_list);
-
        if (cpu >= 0) {
                toggle_bp_task_slot(bp, cpu, enable, type, weight);
        } else {
@@ -241,6 +231,8 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
 
        if (enable)
                list_add_tail(&bp->hw.bp_list, &bp_task_head);
+       else
+               list_del(&bp->hw.bp_list);
 }
 
 /*