atomic_t edge_num_cnt;
//#define CAN_DEBUG
+#undef CAN_DEBUG
#ifdef CAN_DEBUG
- #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
+ #define DEBUGQUE(fmt,args...) can_printk(KERN_ERR "can_queue (debug): " fmt,\
##args)
#else
int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_slot_t *slot;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
slot=fifo->head;
if(slot){
*fifo->tail=fifo->flist;
}
canque_fifo_clear_fl(fifo,FULL);
ret=canque_fifo_test_and_set_fl(fifo,EMPTY)?0:1;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
/**
- * canque_fifo_init_slots - initialize one CAN FIFO
+ * canque_fifo_init_slots - initializes slot chain of one CAN FIFO
* @fifo: pointer to the FIFO structure
- * @slotsnr: number of requested slots
*
* Return Value: The negative value indicates, that there is no memory
* to allocate space for the requested number of the slots.
*/
-int canque_fifo_init_slots(struct canque_fifo_t *fifo, int slotsnr)
+int canque_fifo_init_slots(struct canque_fifo_t *fifo)
{
- int size;
struct canque_slot_t *slot;
- if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
- size=sizeof(struct canque_slot_t)*slotsnr;
- fifo->entry=kmalloc(size,GFP_KERNEL);
- if(!fifo->entry) return -1;
+ int slotsnr=fifo->slotsnr;
+ if(!fifo->entry || !slotsnr) return -1;
slot=fifo->entry;
fifo->flist=slot;
while(--slotsnr){
return 1;
}
-/**
- * canque_fifo_done - frees slots allocated for CAN FIFO
- * @fifo: pointer to the FIFO structure
- */
-int canque_fifo_done(struct canque_fifo_t *fifo)
-{
- if(fifo->entry)
- kfree(fifo->entry);
- fifo->entry=NULL;
- return 1;
-}
-
/* atomic_dec_and_test(&qedge->edge_used);
void atomic_inc(&qedge->edge_used);
list_add_tail(struct list_head *new, struct list_head *head)
int canque_test_outslot(struct canque_ends_t *qends,
struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
int prio;
struct canque_edge_t *edge;
int ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
while(!list_empty(&qends->active[prio])){
- edge=list_entry(qends->active[prio].next,struct canque_edge_t,outpeers);
+ edge=list_entry(qends->active[prio].next,struct canque_edge_t,activepeers);
if(!canque_fifo_test_fl(&edge->fifo,DEAD)) {
canque_edge_incref(edge);
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=edge;
DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
ret=canque_fifo_test_outslot(&edge->fifo, slotp);
if(ret>=0)
return ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
}
- spin_lock(&edge->fifo.fifo_lock);
+ can_spin_lock(&edge->fifo.fifo_lock);
if(canque_fifo_test_and_set_fl(&edge->fifo,INACTIVE)) {
- list_del(&edge->outpeers);
- list_add(&edge->outpeers,&qends->idle);
+ list_del(&edge->activepeers);
+ list_add(&edge->activepeers,&qends->idle);
}
- spin_unlock(&edge->fifo.fifo_lock);
+ can_spin_unlock(&edge->fifo.fifo_lock);
}
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=NULL;
DEBUGQUE("canque_test_outslot no ready slot\n");
return -1;
struct canque_edge_t *qedge, struct canque_slot_t *slot)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_free_outslot(&qedge->fifo, slot);
if(ret&CAN_FIFOF_EMPTY){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
}
if(ret&CAN_FIFOF_FULL)
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB ){
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
canque_fifo_set_fl(&qedge->fifo,INACTIVE);
- list_del(&qedge->outpeers);
- list_add(&qedge->outpeers,&qends->idle);
+ list_del(&qedge->activepeers);
+ list_add(&qedge->activepeers,&qends->idle);
} else{
- list_del(&qedge->outpeers);
- list_add_tail(&qedge->outpeers,&qends->active[qedge->edge_prio]);
+ list_del(&qedge->activepeers);
+ list_add_tail(&qedge->activepeers,&qends->active[qedge->edge_prio]);
}
- spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
canque_edge_decref(qedge);
DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
unsigned long filtid, unsigned long filtmask, int filtflags)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!(filtflags&MSG_PROCESSLOCAL) && (processlocal<2))
filtflags |= MSG_LOCAL_MASK;
if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
if(ret>=0){
canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
}
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!ret) canque_fifo_clear_fl(&qedge->fifo,BLOCK);
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
DEBUGQUE("canque_set_filt for edge %d, ID %ld, mask %ld, flags %d returned %d\n",
qedge->edge_num,filtid,filtmask,filtflags,ret);
int canque_flush(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_flush_slots(&qedge->fifo);
if(ret){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qedge->outends->ends_lock, flags);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock_irqsave(&qedge->outends->ends_lock, flags);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
- list_del(&qedge->outpeers);
- list_add(&qedge->outpeers,&qedge->outends->idle);
+ list_del(&qedge->activepeers);
+ list_add(&qedge->activepeers,&qedge->outends->idle);
}
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
}
DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
}
INIT_LIST_HEAD(&qends->idle);
INIT_LIST_HEAD(&qends->inlist);
- spin_lock_init(&qends->ends_lock);
+ INIT_LIST_HEAD(&qends->outlist);
+ can_spin_lock_init(&qends->ends_lock);
return 0;
}
*/
int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
if(qedge == NULL) return -1;
DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
canque_edge_incref(qedge);
- spin_lock_irqsave(&inends->ends_lock, flags);
- spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock_irqsave(&inends->ends_lock, flags);
+ can_spin_lock(&outends->ends_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
qedge->inends=inends;
list_add(&qedge->inpeers,&inends->inlist);
qedge->outends=outends;
- list_add(&qedge->outpeers,&outends->idle);
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock(&outends->ends_lock);
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ list_add(&qedge->outpeers,&outends->outlist);
+ list_add(&qedge->activepeers,&outends->idle);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&outends->ends_lock);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATTACH);
if(canque_fifo_test_and_set_fl(&qedge->fifo, READY))
int canqueue_disconnect_edge(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_ends_t *inends, *outends;
inends=qedge->inends;
- if(inends) spin_lock_irqsave(&inends->ends_lock,flags);
+ if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags);
outends=qedge->outends;
- if(outends) spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ if(outends) can_spin_lock(&outends->ends_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(atomic_read(&qedge->edge_used)==0) {
if(qedge->outends){
+ list_del(&qedge->activepeers);
+ mb(); /* memory barrier for list_empty use in canque_dead_func */
list_del(&qedge->outpeers);
qedge->outends=NULL;
}
}
ret=1;
} else ret=-1;
- spin_unlock(&qedge->fifo.fifo_lock);
- if(outends) spin_unlock(&outends->ends_lock);
- if(inends) spin_unlock_irqrestore(&inends->ends_lock,flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ if(outends) can_spin_unlock(&outends->ends_lock);
+ if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags);
DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
return ret;
}
+
+/**
+ * canqueue_block_inlist - block slot allocation of all outgoing edges of specified ends
+ * @qends: pointer to ends structure
+ */
+void canqueue_block_inlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge) {
+ canque_fifo_set_fl(&edge->fifo,BLOCK);
+ }
+}
+
+
+/**
+ * canqueue_block_outlist - block slot allocation of all incoming edges of specified ends
+ * @qends: pointer to ends structure
+ */
+void canqueue_block_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge) {
+ canque_fifo_set_fl(&edge->fifo,BLOCK);
+ }
+}
+
+
+/**
+ * canqueue_ends_kill_inlist - sends request to die to all outgoing edges
+ * @qends: pointer to ends structure
+ * @send_rest: select, whether already allocated slots should be processed
+ * by FIFO output side
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ if(send_rest){
+ canque_edge_incref(edge);
+ if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
+ if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
+ continue;
+ if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
+ continue;
+ }
+ canque_edge_decref(edge);
+ }
+ }
+ return list_empty(&qends->inlist)?0:1;
+}
+
+
+/**
+ * canqueue_ends_kill_outlist - sends request to die to all incoming edges
+ * @qends: pointer to ends structure
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ }
+ return list_empty(&qends->outlist)?0:1;
+}
+
+
+