]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - block/blk-core.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / block / blk-core.c
index 14d7c0740dc07aa82e6d85b721daa5a7045e14f7..dfd905bea77c2a928779c6d21e84d462db427c7e 100644 (file)
@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 
        INIT_LIST_HEAD(&rq->queuelist);
        INIT_LIST_HEAD(&rq->timeout_list);
+#ifdef CONFIG_PREEMPT_RT_FULL
+       INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
        rq->cpu = -1;
        rq->q = q;
        rq->__sector = (sector_t) -1;
@@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
  **/
 void blk_start_queue(struct request_queue *q)
 {
-       WARN_ON(!irqs_disabled());
+       WARN_ON_NONRT(!irqs_disabled());
 
        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
        __blk_run_queue(q);
@@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
                if (nowait)
                        return -EBUSY;
 
-               ret = wait_event_interruptible(q->mq_freeze_wq,
+               ret = swait_event_interruptible(q->mq_freeze_wq,
                                !atomic_read(&q->mq_freeze_depth) ||
                                blk_queue_dying(q));
                if (blk_queue_dying(q))
@@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
        struct request_queue *q =
                container_of(ref, struct request_queue, q_usage_counter);
 
-       wake_up_all(&q->mq_freeze_wq);
+       swake_up_all(&q->mq_freeze_wq);
 }
 
 static void blk_rq_timed_out_timer(unsigned long data)
@@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->bypass_depth = 1;
        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 
-       init_waitqueue_head(&q->mq_freeze_wq);
+       init_swait_queue_head(&q->mq_freeze_wq);
 
        /*
         * Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -3177,7 +3180,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
                blk_run_queue_async(q);
        else
                __blk_run_queue(q);
-       spin_unlock(q->queue_lock);
+       spin_unlock_irq(q->queue_lock);
 }
 
 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -3225,7 +3228,6 @@ EXPORT_SYMBOL(blk_check_plugged);
 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
        struct request_queue *q;
-       unsigned long flags;
        struct request *rq;
        LIST_HEAD(list);
        unsigned int depth;
@@ -3245,11 +3247,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        q = NULL;
        depth = 0;
 
-       /*
-        * Save and disable interrupts here, to avoid doing it for every
-        * queue lock we have to take.
-        */
-       local_irq_save(flags);
        while (!list_empty(&list)) {
                rq = list_entry_rq(list.next);
                list_del_init(&rq->queuelist);
@@ -3262,7 +3259,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                                queue_unplugged(q, depth, from_schedule);
                        q = rq->q;
                        depth = 0;
-                       spin_lock(q->queue_lock);
+                       spin_lock_irq(q->queue_lock);
                }
 
                /*
@@ -3289,8 +3286,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         */
        if (q)
                queue_unplugged(q, depth, from_schedule);
-
-       local_irq_restore(flags);
 }
 
 void blk_finish_plug(struct blk_plug *plug)