]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - kernel/events/core.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / kernel / events / core.c
index 6ee1febdf6ff838b21db38277e0760ae0909e91f..3748cb7b2d6e317d2e22a9372923f17fcc08cdb1 100644 (file)
@@ -903,17 +903,14 @@ list_update_cgroup_event(struct perf_event *event,
         */
        cpuctx = __get_cpu_context(ctx);
 
-       /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
-       if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
-               /*
-                * We are removing the last cpu event in this context.
-                * If that event is not active in this cpu, cpuctx->cgrp
-                * should've been cleared by perf_cgroup_switch.
-                */
-               WARN_ON_ONCE(!add && cpuctx->cgrp);
-               return;
-       }
-       cpuctx->cgrp = add ? event->cgrp : NULL;
+       /*
+        * cpuctx->cgrp is NULL until a cgroup event is sched in or
+        * ctx->nr_cgroup == 0 .
+        */
+       if (add && perf_cgroup_from_task(current, ctx) == event->cgrp)
+               cpuctx->cgrp = event->cgrp;
+       else if (!add)
+               cpuctx->cgrp = NULL;
 }
 
 #else /* !CONFIG_CGROUP_PERF */
@@ -1053,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
        raw_spin_lock_init(&cpuctx->hrtimer_lock);
        hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        timer->function = perf_mux_hrtimer_handler;
+       timer->irqsafe = 1;
 }
 
 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
@@ -8338,6 +8336,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
 
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swevent_hrtimer;
+       hwc->hrtimer.irqsafe = 1;
 
        /*
         * Since hrtimers have a fixed rate, we can do a static freq->period