]> rtime.felk.cvut.cz Git - lincan.git/commitdiff
Correction of spinlock acquire order.
authorppisa <ppisa>
Thu, 24 Jun 2004 13:10:44 +0000 (13:10 +0000)
committerppisa <ppisa>
Thu, 24 Jun 2004 13:10:44 +0000 (13:10 +0000)
BUGFIX of possible deadlock on real SMP systems in the
canqueue functions. Significantly faster implementation
of canque_edge_decref() implemented for systems
defining CAN_HAVE_ARCH_CMPXCHG.

lincan/include/can_queue.h
lincan/include/can_sysdep.h
lincan/src/can_quekern.c
lincan/src/can_queue.c

index bcfa624adccf1a92487fdbf4c1dd4935b81eeada..3be84691cce7d0647c78051cbfc7bedd0911c002 100644 (file)
@@ -453,7 +453,8 @@ void canque_notify_bothends(struct canque_edge_t *qedge, int what)
  * @inends: input side of the edge
  *
  * Function call moves output side of the edge from idle onto active edges
- * list.
+ * list. This function has to be called with edge reference count held.
+ * that is same as for most of other edge functions.
  */
 static inline
 void canque_activate_edge(struct canque_ends_t *inends, struct canque_edge_t *qedge)
@@ -462,19 +463,16 @@ void canque_activate_edge(struct canque_ends_t *inends, struct canque_edge_t *qe
        struct canque_ends_t *outends;
        if(qedge->edge_prio>=CANQUEUE_PRIO_NR)
                qedge->edge_prio=CANQUEUE_PRIO_NR-1;
-       can_spin_lock_irqsave(&inends->ends_lock, flags);
        if((outends=qedge->outends)){
-               can_spin_lock(&outends->ends_lock);
+               can_spin_lock_irqsave(&outends->ends_lock, flags);
                can_spin_lock(&qedge->fifo.fifo_lock);
                if(!canque_fifo_test_fl(&qedge->fifo,EMPTY)){
                        list_del(&qedge->activepeers);
                        list_add_tail(&qedge->activepeers,&outends->active[qedge->edge_prio]);
                }
                can_spin_unlock(&qedge->fifo.fifo_lock);
-               can_spin_unlock(&outends->ends_lock);
-
+               can_spin_unlock_irqrestore(&outends->ends_lock, flags);
        }
-       can_spin_unlock_irqrestore(&inends->ends_lock, flags);
 }
 
 /**
@@ -543,8 +541,12 @@ int canqueue_ends_flush_outlist(struct canque_ends_t *qends);
 
 /* edge reference and traversal functions */
 
-void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl);
+void canque_edge_do_dead(struct canque_edge_t *edge);
 
+/**
+ * canque_edge_incref - increments edge reference count
+ * @qedg: pointer to the edge structure
+ */
 static inline
 void canque_edge_incref(struct canque_edge_t *edge)
 {
@@ -552,28 +554,79 @@ void canque_edge_incref(struct canque_edge_t *edge)
 }
 
 static inline
-void canque_edge_decref(struct canque_edge_t *edge)
+can_spin_irqflags_t canque_edge_lock_both_ends(struct canque_edge_t *edge)
+{
+       can_spin_irqflags_t  flags;
+       if(edge->inends<edge->outends) {
+               can_spin_lock_irqsave(&edge->inends->ends_lock, flags);
+               can_spin_lock(&edge->outends->ends_lock);
+       }else{
+               can_spin_lock_irqsave(&edge->outends->ends_lock, flags);
+               if(edge->outends!=edge->inends) can_spin_lock(&edge->inends->ends_lock);
+       }
+       return flags;   
+}
+
+static inline
+void canque_edge_unlock_both_ends(struct canque_edge_t *edge, can_spin_irqflags_t flags)
+{
+       if(edge->outends!=edge->inends) can_spin_unlock(&edge->outends->ends_lock);
+       can_spin_unlock_irqrestore(&edge->inends->ends_lock, flags);
+}
+
+/* Non-inlined version of edge reference decrement */
+void __canque_edge_decref(struct canque_edge_t *edge);
+
+static inline
+void __canque_edge_decref_body(struct canque_edge_t *edge)
 {
        can_spin_irqflags_t flags;
-       struct canque_ends_t *inends=edge->inends;
-       struct canque_ends_t *outends=edge->outends;
-       int dead_fl;
+       int dead_fl=0;
        
-       can_spin_lock_irqsave(&inends->ends_lock, flags);
-       can_spin_lock(&outends->ends_lock);
+       flags=canque_edge_lock_both_ends(edge);
        if(atomic_dec_and_test(&edge->edge_used)) {
-               dead_fl=canque_fifo_test_and_set_fl(&edge->fifo,DEAD);
+               dead_fl=!canque_fifo_test_and_set_fl(&edge->fifo,DEAD);
                /* Because of former evolution of edge references 
                   management notify of CANQUEUE_NOTIFY_NOUSR could
                   be moved to canque_edge_do_dead :-) */
-               can_spin_unlock(&outends->ends_lock);
-               can_spin_unlock_irqrestore(&inends->ends_lock, flags);
-               canque_edge_do_dead(edge, dead_fl);
-       } else {
-               can_spin_unlock(&outends->ends_lock);
-               can_spin_unlock_irqrestore(&inends->ends_lock, flags);
        }
+       canque_edge_unlock_both_ends(edge, flags);
+       if(dead_fl) canque_edge_do_dead(edge);
+}
+
+#ifndef CAN_HAVE_ARCH_CMPXCHG
+/**
+ * canque_edge_decref - decrements edge reference count
+ * @qedg: pointer to the edge structure
+ *
+ * This function has to be called without lock held for both ends of edge.
+ * If reference count drops to 0, function canque_edge_do_dead()
+ * is called.
+ */
+static inline
+void canque_edge_decref(struct canque_edge_t *edge)
+{
+       __canque_edge_decref_body(edge);
+}
+#else
+static inline
+void canque_edge_decref(struct canque_edge_t *edge)
+{
+       int x, y;
+       
+        x = atomic_read(&edge->edge_used);
+        do{
+               if(x<=1)
+                       return __canque_edge_decref(edge);
+               y=x;
+               /* This code strongly depends on the definition of atomic_t !!!! */
+               /* x=cmpxchg(&edge->edge_used, x, x-1); */
+               /* Next alternative could be more portable */
+               x=__cmpxchg(&edge->edge_used, x, x-1, sizeof(atomic_t));
+               /* If even this does not help, comment out CAN_HAVE_ARCH_CMPXCHG in can_sysdep.h */
+       } while(x!=y);
 }
+#endif
 
 static inline
 struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends)
index 07e6021a615884f8d871ffb78ea3f607fa14c525..53e2ce1cde76dcde50420cc0987dce0499949c95 100644 (file)
   #define del_timer_sync del_timer
 #endif /* <2.4.0 */
 
+#ifdef __HAVE_ARCH_CMPXCHG
+  #define CAN_HAVE_ARCH_CMPXCHG
+#endif
+
 #ifndef CAN_WITH_RTL
 /* Standard LINUX kernel */
 
index 140a0850f78f0def2aa76a7b416b94c9b1df0a4a..ac1554c7cb52b08beed9fc4380e5eada9f9ce5eb 100644 (file)
@@ -122,12 +122,10 @@ static inline void canque_dead_tasklet_schedule(void)
 }
 
 
-void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl)
+void canque_edge_do_dead(struct canque_edge_t *edge)
 {
        can_spin_irqflags_t flags;
        
-       if(dead_fl) return;
-       
        canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
     #ifdef CAN_WITH_RTL
        /* The problem of the above call is, that in RT-Linux to Linux notify
index dfdafbb132268b7665729121d3d29d925153b217..a75bb467899dba2291d09dd25a81187bc5bc7701 100644 (file)
@@ -97,6 +97,11 @@ int canque_fifo_init_slots(struct canque_fifo_t *fifo)
  list_entry(ptr, type, member)
 */
 
+void __canque_edge_decref(struct canque_edge_t *edge)
+{
+       __canque_edge_decref_body(edge);
+}
+
 /**
  * canque_get_inslot - finds one outgoing edge and allocates slot from it
  * @qends: ends structure belonging to calling communication object
@@ -519,8 +524,7 @@ int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *ine
        if(qedge == NULL) return -1;
        DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
        canque_edge_incref(qedge);
-       can_spin_lock_irqsave(&inends->ends_lock, flags);
-       can_spin_lock(&outends->ends_lock);
+       flags=canque_edge_lock_both_ends(qedge);
        can_spin_lock(&qedge->fifo.fifo_lock);
        qedge->inends=inends;
        list_add(&qedge->inpeers,&inends->inlist);
@@ -528,8 +532,7 @@ int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *ine
        list_add(&qedge->outpeers,&outends->outlist);
        list_add(&qedge->activepeers,&outends->idle);
        can_spin_unlock(&qedge->fifo.fifo_lock);
-       can_spin_unlock(&outends->ends_lock);
-       can_spin_unlock_irqrestore(&inends->ends_lock, flags);
+       canque_edge_unlock_both_ends(qedge, flags);
        canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATTACH);
 
        if(canque_fifo_test_and_set_fl(&qedge->fifo, READY))
@@ -551,9 +554,17 @@ int canqueue_disconnect_edge(struct canque_edge_t *qedge)
        struct canque_ends_t *inends, *outends;
 
        inends=qedge->inends;
-       if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags);
        outends=qedge->outends;
-       if(outends) can_spin_lock(&outends->ends_lock);
+
+       if(inends && outends) {
+               flags=canque_edge_lock_both_ends(qedge);
+       } else {
+               DEBUGQUE("canqueue_disconnect_edge called with not fully connected edge");
+               if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags);
+               if(outends) can_spin_lock(&outends->ends_lock);
+               flags=0;
+       }
+       
        can_spin_lock(&qedge->fifo.fifo_lock);
        if(atomic_read(&qedge->edge_used)==0) {
                if(qedge->outends){
@@ -569,8 +580,14 @@ int canqueue_disconnect_edge(struct canque_edge_t *qedge)
                ret=1;
        } else ret=-1;
        can_spin_unlock(&qedge->fifo.fifo_lock);
-       if(outends) can_spin_unlock(&outends->ends_lock);
-       if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags);
+
+       if(inends && outends) {
+               canque_edge_unlock_both_ends(qedge, flags);
+       } else {
+               if(outends) can_spin_unlock(&outends->ends_lock);
+               if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags);
+       }
+
        DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
        return ret;
 }