]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
seqlock: consolidate spin_lock/unlock waiting with spin_unlock_wait
authorNicholas Mc Guire <der.herr@hofr.at>
Mon, 2 Dec 2013 04:03:52 +0000 (23:03 -0500)
committerMichal Sojka <sojka@merica.cz>
Sun, 13 Sep 2015 07:47:38 +0000 (09:47 +0200)
since c2f21ce ("locking: Implement new raw_spinlock")
include/linux/spinlock.h includes spin_unlock_wait() to wait for a concurren
holder of a lock. this patch just moves over to that API. spin_unlock_wait
covers both raw_spinlock_t and spinlock_t so it should be safe here as well.
the added rt-variant of read_seqbegin in include/linux/seqlock.h that is being
modified, was introduced by patch:
  seqlock-prevent-rt-starvation.patch

behavior should be unchanged.

Signed-off-by: Nicholas Mc Guire <der.herr@hofr.at>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/seqlock.h

index de343cda4680944cbbff807de3a621b990d1c389..4acd0e2fb5cbfcba5c84f8dbe644fefdeb43048f 100644 (file)
@@ -335,8 +335,7 @@ repeat:
                 * Take the lock and let the writer proceed (i.e. evtl
                 * boost it), otherwise we could loop here forever.
                 */
-               spin_lock(&sl->lock);
-               spin_unlock(&sl->lock);
+               spin_unlock_wait(&sl->lock);
                goto repeat;
        }
        return ret;
@@ -356,7 +355,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 static inline void write_seqlock(seqlock_t *sl)
 {
        spin_lock(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       __raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock(seqlock_t *sl)
@@ -368,7 +367,7 @@ static inline void write_sequnlock(seqlock_t *sl)
 static inline void write_seqlock_bh(seqlock_t *sl)
 {
        spin_lock_bh(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       __raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock_bh(seqlock_t *sl)
@@ -380,7 +379,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
 static inline void write_seqlock_irq(seqlock_t *sl)
 {
        spin_lock_irq(&sl->lock);
-       __write_seqcount_begin(&sl->seqcount);
+       __raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock_irq(seqlock_t *sl)
@@ -394,7 +393,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
        unsigned long flags;
 
        spin_lock_irqsave(&sl->lock, flags);
-       __write_seqcount_begin(&sl->seqcount);
+       __raw_write_seqcount_begin(&sl->seqcount);
        return flags;
 }