]> rtime.felk.cvut.cz Git - zynq/linux.git/blobdiff - lib/irq_poll.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / lib / irq_poll.c
index 1d6565e810309eb710523a814983849d76101c6b..b23a79761df7a6fa77a7631f49d24843de6c9a2d 100644 (file)
@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop)
        list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
        __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 }
 EXPORT_SYMBOL(irq_poll_sched);
 
@@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop)
        local_irq_save(flags);
        __irq_poll_complete(iop);
        local_irq_restore(flags);
+       preempt_check_resched_rt();
 }
 EXPORT_SYMBOL(irq_poll_complete);
 
@@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
                }
 
                local_irq_enable();
+               preempt_check_resched_rt();
 
                /* Even though interrupts have been re-enabled, this
                 * access is safe because interrupts can only add new
@@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
                __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
 
        local_irq_enable();
+       preempt_check_resched_rt();
 }
 
 /**
@@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
                         this_cpu_ptr(&blk_cpu_iopoll));
        __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
        local_irq_enable();
+       preempt_check_resched_rt();
 
        return 0;
 }