]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - block/blk-mq.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / block / blk-mq.c
index f3d27a6dee09dfa48dd7b78bc024f4a9809e8f88..31d06d463b2c6e887bda1ace7e73bea45ede23ba 100644 (file)
@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+       swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
 }
 
 /*
@@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
                percpu_ref_reinit(&q->q_usage_counter);
-               wake_up_all(&q->mq_freeze_wq);
+               swake_up_all(&q->mq_freeze_wq);
        }
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_queue *q)
         * dying, we need to ensure that processes currently waiting on
         * the queue are notified as well.
         */
-       wake_up_all(&q->mq_freeze_wq);
+       swake_up_all(&q->mq_freeze_wq);
 }
 
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
@@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
        rq->resid_len = 0;
        rq->sense = NULL;
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+       INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
        INIT_LIST_HEAD(&rq->timeout_list);
        rq->timeout = 0;
 
@@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *rq, int error)
 }
 EXPORT_SYMBOL(blk_mq_end_request);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+       struct request *rq = container_of(work, struct request, work);
+
+       rq->q->softirq_done_fn(rq);
+}
+
+#else
+
 static void __blk_mq_complete_request_remote(void *data)
 {
        struct request *rq = data;
@@ -352,6 +366,8 @@ static void __blk_mq_complete_request_remote(void *data)
        rq->q->softirq_done_fn(rq);
 }
 
+#endif
+
 static void blk_mq_ipi_complete_request(struct request *rq)
 {
        struct blk_mq_ctx *ctx = rq->mq_ctx;
@@ -363,19 +379,23 @@ static void blk_mq_ipi_complete_request(struct request *rq)
                return;
        }
 
-       cpu = get_cpu();
+       cpu = get_cpu_light();
        if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
                shared = cpus_share_cache(cpu, ctx->cpu);
 
        if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+               schedule_work_on(ctx->cpu, &rq->work);
+#else
                rq->csd.func = __blk_mq_complete_request_remote;
                rq->csd.info = rq;
                rq->csd.flags = 0;
                smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
        } else {
                rq->q->softirq_done_fn(rq);
        }
-       put_cpu();
+       put_cpu_light();
 }
 
 static void __blk_mq_complete_request(struct request *rq)
@@ -917,14 +937,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                return;
 
        if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
-               int cpu = get_cpu();
+               int cpu = get_cpu_light();
                if (cpumask_test_cpu(cpu, hctx->cpumask)) {
                        __blk_mq_run_hw_queue(hctx);
-                       put_cpu();
+                       put_cpu_light();
                        return;
                }
 
-               put_cpu();
+               put_cpu_light();
        }
 
        kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);