]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
hw_breakpoint: Introduce cpumask_of_bp()
authorOleg Nesterov <oleg@redhat.com>
Thu, 20 Jun 2013 15:50:15 +0000 (17:50 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 20 Jun 2013 15:58:56 +0000 (17:58 +0200)
Add the trivial helper which simply returns cpumask_of() or
cpu_possible_mask depending on bp->cpu.

Change fetch_bp_busy_slots() and toggle_bp_slot() to always do
for_each_cpu(cpumask_of_bp) to simplify the code and avoid the
code duplication.

Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/20130620155015.GA6340@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/events/hw_breakpoint.c

index 5cd4f6d9652c18104b9272ea8deeb9f680dcfe9b..9c71445328aff6cdc6fd5f8dcc5406bfd8ba36f7 100644 (file)
@@ -127,6 +127,13 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
        return count;
 }
 
+static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
+{
+       if (bp->cpu >= 0)
+               return cpumask_of(bp->cpu);
+       return cpu_possible_mask;
+}
+
 /*
  * Report the number of pinned/un-pinned breakpoints we have in
  * a given cpu (cpu > -1) or in all of them (cpu = -1).
@@ -135,25 +142,13 @@ static void
 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                    enum bp_type_idx type)
 {
-       int cpu = bp->cpu;
-       struct task_struct *tsk = bp->hw.bp_target;
-
-       if (cpu >= 0) {
-               slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
-               if (!tsk)
-                       slots->pinned += max_task_bp_pinned(cpu, type);
-               else
-                       slots->pinned += task_bp_pinned(cpu, bp, type);
-               slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
-
-               return;
-       }
+       const struct cpumask *cpumask = cpumask_of_bp(bp);
+       int cpu;
 
-       for_each_possible_cpu(cpu) {
-               unsigned int nr;
+       for_each_cpu(cpu, cpumask) {
+               unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
 
-               nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
-               if (!tsk)
+               if (!bp->hw.bp_target)
                        nr += max_task_bp_pinned(cpu, type);
                else
                        nr += task_bp_pinned(cpu, bp, type);
@@ -205,25 +200,21 @@ static void
 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
               int weight)
 {
-       int cpu = bp->cpu;
-       struct task_struct *tsk = bp->hw.bp_target;
+       const struct cpumask *cpumask = cpumask_of_bp(bp);
+       int cpu;
 
        if (!enable)
                weight = -weight;
 
        /* Pinned counter cpu profiling */
-       if (!tsk) {
-               per_cpu(nr_cpu_bp_pinned[type], cpu) += weight;
+       if (!bp->hw.bp_target) {
+               per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
                return;
        }
 
        /* Pinned counter task profiling */
-       if (cpu >= 0) {
+       for_each_cpu(cpu, cpumask)
                toggle_bp_task_slot(bp, cpu, type, weight);
-       } else {
-               for_each_possible_cpu(cpu)
-                       toggle_bp_task_slot(bp, cpu, type, weight);
-       }
 
        if (enable)
                list_add_tail(&bp->hw.bp_list, &bp_task_head);