]> rtime.felk.cvut.cz Git - lincan.git/blobdiff - lincan/src/can_queue.c
CAN driver infrastructure redesign to LinCAN-0.2 version
[lincan.git] / lincan / src / can_queue.c
diff --git a/lincan/src/can_queue.c b/lincan/src/can_queue.c
new file mode 100644 (file)
index 0000000..ad6160c
--- /dev/null
@@ -0,0 +1,650 @@
+/* can_queue.c - CAN message queues
+ * Linux CAN-bus device driver.
+ * New CAN queues by Pavel Pisa - OCERA team member
+ * email:pisa@cmp.felk.cvut.cz
+ * This software is released under the GPL-License.
+ * Version lincan-0.2  9 Jul 2003
+ */
+
+#define __NO_VERSION__
+#include <linux/module.h>
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
+#include <linux/malloc.h>
+#else
+#include <linux/slab.h>
+#endif
+#include <linux/wait.h>
+#include "../include/can.h"
+#include "../include/can_queue.h"
+
+#ifdef CAN_DEBUG
+       #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
+       ##args)
+       
+  atomic_t edge_num_cnt;
+#else
+       #define DEBUGQUE(fmt,args...)
+#endif
+
+#define CANQUE_ROUNDROB 1
+
+/**
+ * canque_fifo_flush_slots - free all ready slots from the FIFO
+ * @fifo: pointer to the FIFO structure
+ *
+ * The caller should be prepared to handle situations, when some
+ * slots are held by input or output side slots processing.
+ * These slots cannot be flushed or their processing interrupted.
+ *
+ * Return Value: The nonzero value indicates, that queue has not been
+ *     empty before the function call.
+ */
+int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
+{
+       int ret;
+       unsigned long flags;
+       struct canque_slot_t *slot;
+       spin_lock_irqsave(&fifo->fifo_lock, flags);
+       slot=fifo->head;
+       *fifo->tail=fifo->flist;
+       fifo->flist=slot;
+       fifo->head=NULL;
+       fifo->tail=&fifo->head;
+       ret=canque_fifo_test_and_set_fl(fifo,EMPTY);
+       spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+       return ret;
+}
+
+
+/**
+ * canque_fifo_init_slots - initialize one CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ * @slotsnr: number of requested slots
+ *
+ * Return Value: The negative value indicates, that there is no memory
+ *     to allocate space for the requested number of the slots.
+ */
+int canque_fifo_init_slots(struct canque_fifo_t *fifo, int slotsnr)
+{
+       int size;
+       struct canque_slot_t *slot;
+       if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
+       size=sizeof(struct canque_slot_t)*slotsnr;
+       fifo->entry=kmalloc(size,GFP_KERNEL);
+       if(!fifo->entry) return -1;
+       slot=fifo->entry;
+       fifo->flist=slot;
+       while(--slotsnr){
+               slot->next=slot+1;
+               slot++;
+       }
+       slot->next=NULL;
+       fifo->head=NULL;
+       fifo->tail=&fifo->head;
+       canque_fifo_set_fl(fifo,EMPTY);
+       return 1;
+}
+
+/**
+ * canque_fifo_done - frees slots allocated for CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ */
+int canque_fifo_done(struct canque_fifo_t *fifo)
+{
+       if(fifo->entry)
+               kfree(fifo->entry);
+       fifo->entry=NULL;
+       return 1;
+}
+
+/* atomic_dec_and_test(&qedge->edge_used);
+ void atomic_inc(&qedge->edge_used);
+ list_add_tail(struct list_head *new, struct list_head *head)
+ list_for_each(edge,qends->inlist);
+ list_entry(ptr, type, member)
+*/
+
+int canque_get_inslot(struct canque_ends_t *qends,
+       struct canque_edge_t **qedgep, struct canque_slot_t **slotp, int cmd)
+{
+       int ret=-2;
+       unsigned long flags;
+       struct canque_edge_t *edge;
+       
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       if(!list_empty(&qends->inlist)){
+               edge=list_entry(qends->inlist.next,struct canque_edge_t,inpeers);
+               if(!canque_fifo_test_fl(&edge->fifo,BLOCK)&&!canque_fifo_test_fl(&edge->fifo,DEAD)){
+                       atomic_inc(&edge->edge_used);
+                       spin_unlock_irqrestore(&qends->ends_lock, flags);
+                       ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
+                       if(ret>0){
+                               *qedgep=edge;
+                               DEBUGQUE("canque_get_inslot cmd=%d found edge %d\n",cmd,edge->edge_num);
+                               return ret;
+
+                       }
+                       spin_lock_irqsave(&qends->ends_lock, flags);
+                       if(atomic_dec_and_test(&edge->edge_used))
+                               canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
+               }
+       }
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       *qedgep=NULL;
+       DEBUGQUE("canque_get_inslot cmd=%d failed\n",cmd);
+       return ret;
+}
+
+int canque_get_inslot4id(struct canque_ends_t *qends,
+       struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
+       int cmd, unsigned long id, int prio)
+{
+       int ret=-2;
+       unsigned long flags;
+       struct canque_edge_t *edge, *bestedge=NULL;
+       struct list_head *entry;
+       
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       list_for_each(entry,&qends->inlist){
+               edge=list_entry(entry,struct canque_edge_t,inpeers);
+               if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
+                       continue;
+               if((id^edge->filtid)&edge->filtmask)
+                       continue;
+               if(bestedge){
+                       if(bestedge->filtmask){
+                               if (!edge->filtmask) continue;
+                       } else {
+                               if(edge->filtmask){
+                                       bestedge=edge;
+                                       continue;
+                               }
+                       }
+                       if(bestedge->edge_prio<edge->edge_prio){
+                               if(edge->edge_prio>prio) continue;
+                       } else {
+                               if(bestedge->edge_prio<=prio) continue;
+                       }
+               }
+               bestedge=edge;
+       }
+       if((edge=bestedge)!=NULL){
+               atomic_inc(&edge->edge_used);
+               spin_unlock_irqrestore(&qends->ends_lock, flags);
+               ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
+               if(ret>0){
+                       *qedgep=edge;
+                       DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d found edge %d\n",cmd,id,prio,edge->edge_num);
+                       return ret;
+               }
+               spin_lock_irqsave(&qends->ends_lock, flags);
+               if(atomic_dec_and_test(&edge->edge_used))
+                       canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
+       }
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       *qedgep=NULL;
+       DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d failed\n",cmd,id,prio);
+       return ret;
+}
+
+
+int canque_put_inslot(struct canque_ends_t *qends,
+       struct canque_edge_t *qedge, struct canque_slot_t *slot)
+{
+       int ret;
+       unsigned long flags;
+       ret=canque_fifo_put_inslot(&qedge->fifo,slot);
+       if(ret) {
+               canque_activate_edge(qends,qedge);
+               canque_notify_outends(qedge,CANQUEUE_NOTIFY_PROC);
+       }
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       DEBUGQUE("canque_put_inslot for edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canque_abort_inslot(struct canque_ends_t *qends,
+       struct canque_edge_t *qedge, struct canque_slot_t *slot)
+{
+       int ret;
+       unsigned long flags;
+       ret=canque_fifo_abort_inslot(&qedge->fifo,slot);
+       if(ret) {
+               canque_notify_outends(qedge,CANQUEUE_NOTIFY_SPACE);
+       }
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       DEBUGQUE("canque_abort_inslot for edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canque_filter_msg2edges(struct canque_ends_t *qends, struct canmsg_t *msg)
+{
+       int destnr=0;
+       int ret;
+       unsigned long flags;
+       struct canque_edge_t *edge;
+       struct list_head *entry;
+       struct canque_slot_t *slot;
+       
+       DEBUGQUE("canque_filter_msg2edges for msg ID %ld\n",msg->id);
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       list_for_each(entry,&qends->inlist){
+               edge=list_entry(entry,struct canque_edge_t,inpeers);
+               if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
+                       continue;
+               if((msg->id^edge->filtid)&edge->filtmask)
+                       continue;
+               atomic_inc(&edge->edge_used);
+               spin_unlock_irqrestore(&qends->ends_lock, flags);
+               ret=canque_fifo_get_inslot(&edge->fifo, &slot, 0);
+               if(ret>0){
+                       slot->msg=*msg;
+                       destnr++;
+                       ret=canque_fifo_put_inslot(&edge->fifo,slot);
+                       if(ret) {
+                               canque_activate_edge(qends,edge);
+                               canque_notify_outends(edge,CANQUEUE_NOTIFY_PROC);
+                       }
+
+               }
+               spin_lock_irqsave(&qends->ends_lock, flags);
+               if(atomic_dec_and_test(&edge->edge_used))
+                       canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
+       }
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       DEBUGQUE("canque_filter_msg2edges sent msg ID %ld to %d edges\n",msg->id,destnr);
+       return destnr;
+}
+
+int canque_test_outslot(struct canque_ends_t *qends,
+       struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
+{
+       unsigned long flags;
+       int prio;
+       struct canque_edge_t *edge;
+       
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
+               if(!list_empty(&qends->active[prio])){
+                       edge=list_entry(qends->active[prio].next,struct canque_edge_t,outpeers);
+                       atomic_inc(&edge->edge_used);
+                       spin_unlock_irqrestore(&qends->ends_lock, flags);
+                       *qedgep=edge;
+                       DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
+                       return canque_fifo_test_outslot(&edge->fifo, slotp);
+               }
+       }
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       *qedgep=NULL;
+       DEBUGQUE("canque_test_outslot no ready slot\n");
+       return -1;
+}
+
+int canque_free_outslot(struct canque_ends_t *qends,
+       struct canque_edge_t *qedge, struct canque_slot_t *slot)
+{
+       int ret;
+       unsigned long flags;
+       ret=canque_fifo_free_outslot(&qedge->fifo, slot);
+       if(ret&CAN_FIFOF_EMPTY){
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
+       }
+       if(ret&CAN_FIFOF_FULL)
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB){
+               spin_lock(&qedge->fifo.fifo_lock);
+               if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
+                       list_del(&qedge->outpeers);
+                       list_add(&qedge->outpeers,&qends->idle);
+               }
+           #if CANQUE_ROUNDROB
+               else{
+                       list_del(&qedge->outpeers);
+                       list_add_tail(&qedge->outpeers,&qends->active[qedge->edge_prio]);
+               }
+           #endif /*CANQUE_ROUNDROB*/
+               spin_unlock(&qedge->fifo.fifo_lock);
+       }
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canque_again_outslot(struct canque_ends_t *qends,
+       struct canque_edge_t *qedge, struct canque_slot_t *slot)
+{
+       int ret;
+       unsigned long flags;
+       ret=canque_fifo_again_outslot(&qedge->fifo, slot);
+       if(ret&CAN_FIFOF_EMPTY){
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
+       }
+       if(ret&CAN_FIFOF_FULL)
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+       DEBUGQUE("canque_again_outslot for edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canque_set_filt(struct canque_edge_t *qedge,
+       unsigned long filtid, unsigned long filtmask)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+       atomic_inc(&qedge->edge_used);
+       qedge->filtid=filtid;
+       qedge->filtmask=filtmask;
+       if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
+       else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
+
+       spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+       if(ret>=0){
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
+       }
+       spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+       if(!ret)canque_fifo_clear_fl(&qedge->fifo,BLOCK);
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+       
+       DEBUGQUE("canque_set_filt for edge %d, ID %ld and mask %ld returned %d\n",qedge->edge_num,filtid,filtmask,ret);
+       return ret;
+}
+
+int canque_flush(struct canque_edge_t *qedge)
+{
+       int ret;
+       unsigned long flags;
+
+       atomic_inc(&qedge->edge_used);
+       ret=canque_fifo_flush_slots(&qedge->fifo);
+       if(ret){
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
+               canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
+               spin_lock_irqsave(&qedge->outends->ends_lock, flags);
+               spin_lock(&qedge->fifo.fifo_lock);
+               if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
+                       list_del(&qedge->outpeers);
+                       list_add(&qedge->outpeers,&qedge->outends->idle);
+               }
+               if(atomic_dec_and_test(&qedge->edge_used))
+                       canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+               spin_unlock(&qedge->fifo.fifo_lock);
+               spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
+       }
+       DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canqueue_ends_init_gen(struct canque_ends_t *qends)
+{
+       int i;
+       for(i=CANQUEUE_PRIO_NR;--i>=0;){
+               INIT_LIST_HEAD(&qends->active[i]);
+       }
+       INIT_LIST_HEAD(&qends->idle);
+       INIT_LIST_HEAD(&qends->inlist);
+       spin_lock_init(&qends->ends_lock);
+       return 0;
+}
+
+
+void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what)
+{
+       DEBUGQUE("canqueue_notify_kern for edge %d and event %d\n",qedge->edge_num,what);
+       switch(what){
+               case CANQUEUE_NOTIFY_EMPTY:
+                       wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
+                       break;
+               case CANQUEUE_NOTIFY_SPACE:
+                       wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
+                       break;
+               case CANQUEUE_NOTIFY_PROC:
+                       wake_up_interruptible(&qends->endinfo.fileinfo.readq);
+                       break;
+               case CANQUEUE_NOTIFY_NOUSR:
+                       wake_up_interruptible(&qends->endinfo.fileinfo.readq);
+                       wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
+                       wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
+                       break;
+               case CANQUEUE_NOTIFY_DEAD:
+                       if(atomic_read(&qedge->edge_used)>0)
+                               atomic_dec(&qedge->edge_used);
+                       break;
+               case CANQUEUE_NOTIFY_ATACH:
+                       atomic_inc(&qedge->edge_used);
+                       break;
+       }
+}
+
+int canqueue_ends_init_kern(struct canque_ends_t *qends)
+{
+       canqueue_ends_init_gen(qends);
+       qends->context=NULL;
+       init_waitqueue_head(&qends->endinfo.fileinfo.readq);
+       init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
+       init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
+       qends->notify=canqueue_notify_kern;
+       DEBUGQUE("canqueue_ends_init_kern\n");
+       return 0;
+}
+
+
+int canque_get_inslot4id_wait_kern(struct canque_ends_t *qends,
+       struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
+       int cmd, unsigned long id, int prio)
+{
+       int ret=-1;
+       DEBUGQUE("canque_get_inslot4id_wait_kern for cmd %d, id %ld, prio %d\n",cmd,id,prio);
+       wait_event_interruptible((qends->endinfo.fileinfo.writeq), 
+               (ret=canque_get_inslot4id(qends,qedgep,slotp,cmd,id,prio))!=-1);
+       return ret;
+}
+
+int canque_get_outslot_wait_kern(struct canque_ends_t *qends,
+       struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
+{
+       int ret=-1;
+       DEBUGQUE("canque_get_outslot_wait_kern\n");
+       wait_event_interruptible((qends->endinfo.fileinfo.readq), 
+               (ret=canque_test_outslot(qends,qedgep,slotp))!=-1);
+       return ret;
+}
+
+int canque_sync_wait_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
+{
+       int ret=-1;
+       DEBUGQUE("canque_sync_wait_kern\n");
+       wait_event_interruptible((qends->endinfo.fileinfo.emptyq), 
+               (ret=canque_fifo_test_fl(&qedge->fifo,EMPTY)));
+       return ret;
+}
+
+
+struct canque_edge_t *canque_new_edge_kern(int slotsnr)
+{
+       struct canque_edge_t *qedge;
+       qedge = (struct canque_edge_t *)kmalloc(sizeof(struct canque_edge_t), GFP_KERNEL);
+       if(qedge == NULL) return NULL;
+
+       memset(qedge,0,sizeof(struct canque_edge_t));
+       if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
+               kfree(qedge);
+               DEBUGQUE("canque_new_edge_kern failed\n");
+               return NULL;
+       }
+       atomic_set(&qedge->edge_used,0);
+       qedge->filtid = 0;
+       qedge->filtmask = 0;
+       qedge->edge_prio = 0;
+    #ifdef CAN_DEBUG
+       /* not exactly clean, but enough for debugging */
+       atomic_inc(&edge_num_cnt);
+       qedge->edge_num=atomic_read(&edge_num_cnt);
+    #endif /* CAN_DEBUG */
+       DEBUGQUE("canque_new_edge_kern %d\n",qedge->edge_num);
+       return qedge;
+}
+
+int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
+{
+       unsigned long flags;
+       if(qedge == NULL) return -1;
+       DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
+       atomic_inc(&qedge->edge_used);
+       spin_lock_irqsave(&inends->ends_lock, flags);
+       spin_lock(&outends->ends_lock);
+       spin_lock(&qedge->fifo.fifo_lock);
+       qedge->inends=inends;
+       list_add(&qedge->inpeers,&inends->inlist);
+       qedge->outends=outends;
+       list_add(&qedge->outpeers,&outends->idle);
+       spin_unlock(&qedge->fifo.fifo_lock);
+       spin_unlock(&outends->ends_lock);
+       spin_unlock_irqrestore(&inends->ends_lock, flags);
+       canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATACH);
+       
+       spin_lock_irqsave(&qedge->fifo.fifo_lock, flags);
+       if(atomic_dec_and_test(&qedge->edge_used))
+               canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
+       spin_unlock_irqrestore(&qedge->fifo.fifo_lock, flags);
+       return 0;
+}
+
+int canqueue_disconnect_edge(struct canque_edge_t *qedge)
+{
+       int ret;
+       unsigned long flags;
+       spin_lock_irqsave(&qedge->inends->ends_lock,flags);
+       spin_lock(&qedge->outends->ends_lock);
+       spin_lock(&qedge->fifo.fifo_lock);
+       if(atomic_read(&qedge->edge_used)==0) {
+               if(qedge->outends){
+                       list_del(&qedge->outpeers);
+                       qedge->outends=NULL;
+               }
+               if(qedge->inends){
+                       list_del(&qedge->inpeers);
+                       qedge->inends=NULL;
+               }
+               ret=1;
+       } else ret=-1;
+       spin_unlock(&qedge->fifo.fifo_lock);
+       spin_unlock(&qedge->outends->ends_lock);
+       spin_unlock_irqrestore(&qedge->inends->ends_lock,flags);
+       DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
+       return ret;
+}
+
+int canqueue_disconnect_edge_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
+{
+       canque_fifo_set_fl(&qedge->fifo,BLOCK);
+       DEBUGQUE("canqueue_disconnect_edge_kern %d called\n",qedge->edge_num);
+       if(!canque_fifo_test_and_set_fl(&qedge->fifo,DEAD)){
+               canque_notify_bothends(qedge, CANQUEUE_NOTIFY_DEAD);
+               if(atomic_read(&qedge->edge_used)>0)
+                       atomic_dec(&qedge->edge_used);
+               DEBUGQUE("canqueue_disconnect_edge_kern %d waiting\n",qedge->edge_num);
+               wait_event_interruptible((qends->endinfo.fileinfo.emptyq), 
+                       (canqueue_disconnect_edge(qedge)>=0));
+               return 0;
+       } else {
+               DEBUGQUE("canqueue_disconnect_edge_kern failed\n");
+               return -1;
+       }
+}
+
+
+int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
+{
+       struct canque_edge_t *edge;
+       unsigned long flags;
+       for(;;){
+               spin_lock_irqsave(&qends->ends_lock,flags);
+               if(list_empty(list)){
+                       spin_unlock_irqrestore(&qends->ends_lock,flags);
+                       return 0;
+               }
+               if(list == &qends->inlist)
+                       edge=list_entry(list->next,struct canque_edge_t,inpeers);
+               else
+                       edge=list_entry(list->next,struct canque_edge_t,outpeers);
+               atomic_inc(&edge->edge_used);
+               spin_unlock_irqrestore(&qends->ends_lock,flags);
+               if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
+                       /* Free edge memory */
+                       canque_fifo_done(&edge->fifo);
+                       kfree(edge);
+               }else{
+                       DEBUGQUE("canqueue_disconnect_list_kern in troubles\n");
+                       DEBUGQUE("the edge %d has usage count %d and flags %ld\n",edge->edge_num,atomic_read(&edge->edge_used),edge->fifo.fifo_flags);
+                       return -1;
+               }
+       }
+}
+
+void canqueue_block_list(struct canque_ends_t *qends, struct list_head *list)
+{
+       struct canque_edge_t *edge;
+       struct list_head *entry;
+       unsigned long flags;
+       
+       spin_lock_irqsave(&qends->ends_lock, flags);
+       list_for_each(entry,&qends->inlist){
+               if(list == &qends->inlist)
+                       edge=list_entry(list->next,struct canque_edge_t,inpeers);
+               else
+                       edge=list_entry(list->next,struct canque_edge_t,outpeers);
+               canque_fifo_set_fl(&edge->fifo,BLOCK);
+               /*spin_unlock_irqrestore(&qends->ends_lock, flags);*/
+               /* Loop can be break by interrupts and preempts there */
+               /*spin_lock_irqsave(&qends->ends_lock, flags);*/
+       }
+       spin_unlock_irqrestore(&qends->ends_lock, flags);
+}
+
+
+int canqueue_ends_done_kern(struct canque_ends_t *qends, int sync)
+{
+       unsigned long flags;
+       int i;
+
+       DEBUGQUE("canqueue_ends_done_kern\n");
+       spin_lock_irqsave(&qends->ends_lock,flags);
+       canqueue_block_list(qends, &qends->idle);
+       for(i=CANQUEUE_PRIO_NR;--i>=0;){
+               canqueue_block_list(qends, &qends->active[i]);
+       }
+       canqueue_block_list(qends, &qends->idle);
+       canqueue_block_list(qends, &qends->inlist);
+       spin_unlock_irqrestore(&qends->ends_lock,flags);
+
+       for(i=CANQUEUE_PRIO_NR;--i>=0;){
+               canqueue_disconnect_list_kern(qends, &qends->active[i]);
+       }
+       canqueue_disconnect_list_kern(qends, &qends->idle);
+       canqueue_disconnect_list_kern(qends, &qends->inlist);
+
+       wake_up_interruptible(&qends->endinfo.fileinfo.readq);
+       wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
+       wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
+       
+
+       return 0;
+}
+