* New CAN queues by Pavel Pisa - OCERA team member
* email:pisa@cmp.felk.cvut.cz
* This software is released under the GPL-License.
- * Version lincan-0.2 9 Jul 2003
+ * Version lincan-0.3 17 Jun 2004
*/
#include "../include/can.h"
atomic_t edge_num_cnt;
//#define CAN_DEBUG
+#undef CAN_DEBUG
#ifdef CAN_DEBUG
- #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
+ #define DEBUGQUE(fmt,args...) can_printk(KERN_ERR "can_queue (debug): " fmt,\
##args)
#else
int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_slot_t *slot;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
slot=fifo->head;
if(slot){
*fifo->tail=fifo->flist;
}
canque_fifo_clear_fl(fifo,FULL);
ret=canque_fifo_test_and_set_fl(fifo,EMPTY)?0:1;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
/**
- * canque_fifo_init_slots - initialize one CAN FIFO
+ * canque_fifo_init_slots - initializes slot chain of one CAN FIFO
* @fifo: pointer to the FIFO structure
- * @slotsnr: number of requested slots
*
* Return Value: The negative value indicates, that there is no memory
* to allocate space for the requested number of the slots.
*/
-int canque_fifo_init_slots(struct canque_fifo_t *fifo, int slotsnr)
+int canque_fifo_init_slots(struct canque_fifo_t *fifo)
{
- int size;
struct canque_slot_t *slot;
- if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
- size=sizeof(struct canque_slot_t)*slotsnr;
- fifo->entry=kmalloc(size,GFP_KERNEL);
- if(!fifo->entry) return -1;
+ int slotsnr=fifo->slotsnr;
+ if(!fifo->entry || !slotsnr) return -1;
slot=fifo->entry;
fifo->flist=slot;
while(--slotsnr){
return 1;
}
-/**
- * canque_fifo_done - frees slots allocated for CAN FIFO
- * @fifo: pointer to the FIFO structure
- */
-int canque_fifo_done(struct canque_fifo_t *fifo)
-{
- if(fifo->entry)
- kfree(fifo->entry);
- fifo->entry=NULL;
- return 1;
-}
-
/* atomic_dec_and_test(&qedge->edge_used);
void atomic_inc(&qedge->edge_used);
list_add_tail(struct list_head *new, struct list_head *head)
list_entry(ptr, type, member)
*/
+void __canque_edge_decref(struct canque_edge_t *edge)
+{
+ __canque_edge_decref_body(edge);
+}
+
/**
* canque_get_inslot - finds one outgoing edge and allocates slot from it
* @qends: ends structure belonging to calling communication object
int canque_test_outslot(struct canque_ends_t *qends,
struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
int prio;
struct canque_edge_t *edge;
int ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
while(!list_empty(&qends->active[prio])){
- edge=list_entry(qends->active[prio].next,struct canque_edge_t,outpeers);
+ edge=list_entry(qends->active[prio].next,struct canque_edge_t,activepeers);
if(!canque_fifo_test_fl(&edge->fifo,DEAD)) {
+ /* The first test on unlocked FIFO */
+ if(canque_fifo_test_fl(&edge->fifo,EMPTY)) {
+ can_spin_lock(&edge->fifo.fifo_lock);
+ /* Test has to be repeated to ensure that EMPTY
+ state has not been nagated when locking FIFO */
+ if(canque_fifo_test_fl(&edge->fifo,EMPTY)) {
+ canque_fifo_set_fl(&edge->fifo,INACTIVE);
+ list_del(&edge->activepeers);
+ list_add(&edge->activepeers,&qends->idle);
+ can_spin_unlock(&edge->fifo.fifo_lock);
+ continue;
+ }
+ can_spin_unlock(&edge->fifo.fifo_lock);
+ }
canque_edge_incref(edge);
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=edge;
DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
ret=canque_fifo_test_outslot(&edge->fifo, slotp);
if(ret>=0)
return ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
- }
- spin_lock(&edge->fifo.fifo_lock);
- if(canque_fifo_test_and_set_fl(&edge->fifo,INACTIVE)) {
- list_del(&edge->outpeers);
- list_add(&edge->outpeers,&qends->idle);
+
+ canque_edge_decref(edge);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
+ } else {
+ can_spin_lock(&edge->fifo.fifo_lock);
+ canque_fifo_set_fl(&edge->fifo,INACTIVE);
+ list_del(&edge->activepeers);
+ list_add(&edge->activepeers,&qends->idle);
+ can_spin_unlock(&edge->fifo.fifo_lock);
}
- spin_unlock(&edge->fifo.fifo_lock);
}
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=NULL;
DEBUGQUE("canque_test_outslot no ready slot\n");
return -1;
struct canque_edge_t *qedge, struct canque_slot_t *slot)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_free_outslot(&qedge->fifo, slot);
if(ret&CAN_FIFOF_EMPTY){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
}
if(ret&CAN_FIFOF_FULL)
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB ){
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
canque_fifo_set_fl(&qedge->fifo,INACTIVE);
- list_del(&qedge->outpeers);
- list_add(&qedge->outpeers,&qends->idle);
+ list_del(&qedge->activepeers);
+ list_add(&qedge->activepeers,&qends->idle);
} else{
- list_del(&qedge->outpeers);
- list_add_tail(&qedge->outpeers,&qends->active[qedge->edge_prio]);
+ list_del(&qedge->activepeers);
+ list_add_tail(&qedge->activepeers,&qends->active[qedge->edge_prio]);
}
- spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
canque_edge_decref(qedge);
DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
unsigned long filtid, unsigned long filtmask, int filtflags)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!(filtflags&MSG_PROCESSLOCAL) && (processlocal<2))
filtflags |= MSG_LOCAL_MASK;
if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
if(ret>=0){
canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
}
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!ret) canque_fifo_clear_fl(&qedge->fifo,BLOCK);
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
DEBUGQUE("canque_set_filt for edge %d, ID %ld, mask %ld, flags %d returned %d\n",
qedge->edge_num,filtid,filtmask,filtflags,ret);
int canque_flush(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_flush_slots(&qedge->fifo);
if(ret){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qedge->outends->ends_lock, flags);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock_irqsave(&qedge->outends->ends_lock, flags);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
- list_del(&qedge->outpeers);
- list_add(&qedge->outpeers,&qedge->outends->idle);
+ list_del(&qedge->activepeers);
+ list_add(&qedge->activepeers,&qedge->outends->idle);
}
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
}
DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
int canqueue_ends_init_gen(struct canque_ends_t *qends)
{
int i;
+ qends->ends_flags=0;
for(i=CANQUEUE_PRIO_NR;--i>=0;){
INIT_LIST_HEAD(&qends->active[i]);
}
INIT_LIST_HEAD(&qends->idle);
INIT_LIST_HEAD(&qends->inlist);
- spin_lock_init(&qends->ends_lock);
+ INIT_LIST_HEAD(&qends->outlist);
+ can_spin_lock_init(&qends->ends_lock);
return 0;
}
*/
int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
if(qedge == NULL) return -1;
DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
canque_edge_incref(qedge);
- spin_lock_irqsave(&inends->ends_lock, flags);
- spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ flags=canque_edge_lock_both_ends(inends, outends);
+ can_spin_lock(&qedge->fifo.fifo_lock);
qedge->inends=inends;
list_add(&qedge->inpeers,&inends->inlist);
qedge->outends=outends;
- list_add(&qedge->outpeers,&outends->idle);
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock(&outends->ends_lock);
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ list_add(&qedge->outpeers,&outends->outlist);
+ list_add(&qedge->activepeers,&outends->idle);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ canque_edge_unlock_both_ends(inends, outends, flags);
canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATTACH);
if(canque_fifo_test_and_set_fl(&qedge->fifo, READY))
int canqueue_disconnect_edge(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_ends_t *inends, *outends;
inends=qedge->inends;
- if(inends) spin_lock_irqsave(&inends->ends_lock,flags);
outends=qedge->outends;
- if(outends) spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+
+ if(inends && outends) {
+ flags=canque_edge_lock_both_ends(inends, outends);
+ } else {
+ DEBUGQUE("canqueue_disconnect_edge called with not fully connected edge");
+ if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags);
+ if(outends) can_spin_lock(&outends->ends_lock);
+ flags=0;
+ }
+
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(atomic_read(&qedge->edge_used)==0) {
if(qedge->outends){
+ list_del(&qedge->activepeers);
+ mb(); /* memory barrier for list_empty use in canque_dead_func */
list_del(&qedge->outpeers);
qedge->outends=NULL;
}
}
ret=1;
} else ret=-1;
- spin_unlock(&qedge->fifo.fifo_lock);
- if(outends) spin_unlock(&outends->ends_lock);
- if(inends) spin_unlock_irqrestore(&inends->ends_lock,flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+
+ if(inends && outends) {
+ canque_edge_unlock_both_ends(inends, outends, flags);
+ } else {
+ if(outends) can_spin_unlock(&outends->ends_lock);
+ if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags);
+ }
+
DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
return ret;
}
+
+/**
+ * canqueue_block_inlist - block slot allocation of all outgoing edges of specified ends
+ * @qends: pointer to ends structure
+ */
+void canqueue_block_inlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge) {
+ canque_fifo_set_fl(&edge->fifo,BLOCK);
+ }
+}
+
+
+/**
+ * canqueue_block_outlist - block slot allocation of all incoming edges of specified ends
+ * @qends: pointer to ends structure
+ */
+void canqueue_block_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge) {
+ canque_fifo_set_fl(&edge->fifo,BLOCK);
+ }
+}
+
+
+/**
+ * canqueue_ends_kill_inlist - sends request to die to all outgoing edges
+ * @qends: pointer to ends structure
+ * @send_rest: select, whether already allocated slots should be processed
+ * by FIFO output side
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ if(send_rest){
+ canque_edge_incref(edge);
+ if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
+ if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
+ continue;
+ if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
+ continue;
+ }
+ canque_edge_decref(edge);
+ }
+ }
+ return list_empty(&qends->inlist)?0:1;
+}
+
+
+/**
+ * canqueue_ends_kill_outlist - sends request to die to all incoming edges
+ * @qends: pointer to ends structure
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ }
+ return list_empty(&qends->outlist)?0:1;
+}
+
+
+/**
+ * canqueue_ends_filt_conjuction - computes conjunction of incoming edges filters filters
+ * @qends: pointer to ends structure
+ * @filt: pointer the filter structure filled by computed filters conjunction
+ *
+ * Return Value: Number of incoming edges
+ */
+int canqueue_ends_filt_conjuction(struct canque_ends_t *qends, struct canfilt_t *filt)
+{
+ struct canque_edge_t *edge;
+ int cnt=0;
+ unsigned long filtid=0;
+ unsigned long filtmask=~0;
+ unsigned long local_only=canque_filtid2internal(0,MSG_LOCAL);
+
+ canque_for_each_inedge(qends, edge){
+ /* skip edges processing only local messages */
+ if(edge->filtid & edge->filtmask & local_only)
+ continue;
+
+ if(!cnt++)
+ filtid = edge->filtid;
+ else
+ filtmask &= ~(filtid ^ edge->filtid);
+
+ filtmask &= edge->filtmask;
+ }
+
+ filt->id = filtid & MSG_ID_MASK;
+ filt->mask = filtmask & MSG_ID_MASK;
+ filtid >>= 28;
+ filtmask >>= 28;
+ filt->flags = filtid & MSG_EXT;
+ if(filtmask & (MSG_EXT))
+ filt->flags |= MSG_EXT_MASK;
+ if(filtid & (MSG_RTR<<1))
+ filt->flags |= MSG_RTR<<1;
+ if(filtmask & (MSG_RTR<<1))
+ filt->flags |= MSG_RTR_MASK;
+ return cnt;
+}
+
+
+/**
+ * canqueue_ends_flush_inlist - flushes all messages in incoming edges
+ * @qends: pointer to ends structure
+ *
+ * Return Value: Negative value informs about unsuccessful result
+ */
+int canqueue_ends_flush_inlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge){
+ canque_flush(edge);
+ }
+ return 0;
+}
+
+
+/**
+ * canqueue_ends_flush_outlist - flushes all messages in outgoing edges
+ * @qends: pointer to ends structure
+ *
+ * Return Value: Negative value informs about unsuccessful result
+ */
+int canqueue_ends_flush_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge){
+ canque_flush(edge);
+ }
+ return 0;
+}
+
+
+
+