1 /* can_queue.c - CAN message queues
2 * Linux CAN-bus device driver.
3 * New CAN queues by Pavel Pisa - OCERA team member
4 * email:pisa@cmp.felk.cvut.cz
5 * This software is released under the GPL-License.
6 * Version lincan-0.2 9 Jul 2003
10 #include <linux/module.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
13 #include <linux/malloc.h>
15 #include <linux/slab.h>
17 #include <linux/wait.h>
18 #include "../include/can.h"
19 #include "../include/can_queue.h"
22 #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
25 atomic_t edge_num_cnt;
27 #define DEBUGQUE(fmt,args...)
30 #define CANQUE_ROUNDROB 1
33 * canque_fifo_flush_slots - free all ready slots from the FIFO
34 * @fifo: pointer to the FIFO structure
36 * The caller should be prepared to handle situations, when some
37 * slots are held by input or output side slots processing.
38 * These slots cannot be flushed or their processing interrupted.
40 * Return Value: The nonzero value indicates, that queue has not been
41 * empty before the function call.
43 int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
47 struct canque_slot_t *slot;
48 spin_lock_irqsave(&fifo->fifo_lock, flags);
50 *fifo->tail=fifo->flist;
53 fifo->tail=&fifo->head;
54 ret=canque_fifo_test_and_set_fl(fifo,EMPTY);
55 spin_unlock_irqrestore(&fifo->fifo_lock, flags);
61 * canque_fifo_init_slots - initialize one CAN FIFO
62 * @fifo: pointer to the FIFO structure
63 * @slotsnr: number of requested slots
65 * Return Value: The negative value indicates, that there is no memory
66 * to allocate space for the requested number of the slots.
68 int canque_fifo_init_slots(struct canque_fifo_t *fifo, int slotsnr)
71 struct canque_slot_t *slot;
72 if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
73 size=sizeof(struct canque_slot_t)*slotsnr;
74 fifo->entry=kmalloc(size,GFP_KERNEL);
75 if(!fifo->entry) return -1;
84 fifo->tail=&fifo->head;
85 canque_fifo_set_fl(fifo,EMPTY);
90 * canque_fifo_done - frees slots allocated for CAN FIFO
91 * @fifo: pointer to the FIFO structure
93 int canque_fifo_done(struct canque_fifo_t *fifo)
101 /* atomic_dec_and_test(&qedge->edge_used);
102 void atomic_inc(&qedge->edge_used);
103 list_add_tail(struct list_head *new, struct list_head *head)
104 list_for_each(edge,qends->inlist);
105 list_entry(ptr, type, member)
108 int canque_get_inslot(struct canque_ends_t *qends,
109 struct canque_edge_t **qedgep, struct canque_slot_t **slotp, int cmd)
113 struct canque_edge_t *edge;
115 spin_lock_irqsave(&qends->ends_lock, flags);
116 if(!list_empty(&qends->inlist)){
117 edge=list_entry(qends->inlist.next,struct canque_edge_t,inpeers);
118 if(!canque_fifo_test_fl(&edge->fifo,BLOCK)&&!canque_fifo_test_fl(&edge->fifo,DEAD)){
119 atomic_inc(&edge->edge_used);
120 spin_unlock_irqrestore(&qends->ends_lock, flags);
121 ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
124 DEBUGQUE("canque_get_inslot cmd=%d found edge %d\n",cmd,edge->edge_num);
128 spin_lock_irqsave(&qends->ends_lock, flags);
129 if(atomic_dec_and_test(&edge->edge_used))
130 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
133 spin_unlock_irqrestore(&qends->ends_lock, flags);
135 DEBUGQUE("canque_get_inslot cmd=%d failed\n",cmd);
139 int canque_get_inslot4id(struct canque_ends_t *qends,
140 struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
141 int cmd, unsigned long id, int prio)
145 struct canque_edge_t *edge, *bestedge=NULL;
146 struct list_head *entry;
148 spin_lock_irqsave(&qends->ends_lock, flags);
149 list_for_each(entry,&qends->inlist){
150 edge=list_entry(entry,struct canque_edge_t,inpeers);
151 if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
153 if((id^edge->filtid)&edge->filtmask)
156 if(bestedge->filtmask){
157 if (!edge->filtmask) continue;
164 if(bestedge->edge_prio<edge->edge_prio){
165 if(edge->edge_prio>prio) continue;
167 if(bestedge->edge_prio<=prio) continue;
172 if((edge=bestedge)!=NULL){
173 atomic_inc(&edge->edge_used);
174 spin_unlock_irqrestore(&qends->ends_lock, flags);
175 ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
178 DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d found edge %d\n",cmd,id,prio,edge->edge_num);
181 spin_lock_irqsave(&qends->ends_lock, flags);
182 if(atomic_dec_and_test(&edge->edge_used))
183 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
185 spin_unlock_irqrestore(&qends->ends_lock, flags);
187 DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d failed\n",cmd,id,prio);
192 int canque_put_inslot(struct canque_ends_t *qends,
193 struct canque_edge_t *qedge, struct canque_slot_t *slot)
197 ret=canque_fifo_put_inslot(&qedge->fifo,slot);
199 canque_activate_edge(qends,qedge);
200 canque_notify_outends(qedge,CANQUEUE_NOTIFY_PROC);
202 spin_lock_irqsave(&qends->ends_lock, flags);
203 if(atomic_dec_and_test(&qedge->edge_used))
204 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
205 spin_unlock_irqrestore(&qends->ends_lock, flags);
206 DEBUGQUE("canque_put_inslot for edge %d returned %d\n",qedge->edge_num,ret);
210 int canque_abort_inslot(struct canque_ends_t *qends,
211 struct canque_edge_t *qedge, struct canque_slot_t *slot)
215 ret=canque_fifo_abort_inslot(&qedge->fifo,slot);
217 canque_notify_outends(qedge,CANQUEUE_NOTIFY_SPACE);
219 spin_lock_irqsave(&qends->ends_lock, flags);
220 if(atomic_dec_and_test(&qedge->edge_used))
221 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
222 spin_unlock_irqrestore(&qends->ends_lock, flags);
223 DEBUGQUE("canque_abort_inslot for edge %d returned %d\n",qedge->edge_num,ret);
227 int canque_filter_msg2edges(struct canque_ends_t *qends, struct canmsg_t *msg)
232 struct canque_edge_t *edge;
233 struct list_head *entry;
234 struct canque_slot_t *slot;
236 DEBUGQUE("canque_filter_msg2edges for msg ID %ld\n",msg->id);
237 spin_lock_irqsave(&qends->ends_lock, flags);
238 list_for_each(entry,&qends->inlist){
239 edge=list_entry(entry,struct canque_edge_t,inpeers);
240 if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
242 if((msg->id^edge->filtid)&edge->filtmask)
244 atomic_inc(&edge->edge_used);
245 spin_unlock_irqrestore(&qends->ends_lock, flags);
246 ret=canque_fifo_get_inslot(&edge->fifo, &slot, 0);
250 ret=canque_fifo_put_inslot(&edge->fifo,slot);
252 canque_activate_edge(qends,edge);
253 canque_notify_outends(edge,CANQUEUE_NOTIFY_PROC);
257 spin_lock_irqsave(&qends->ends_lock, flags);
258 if(atomic_dec_and_test(&edge->edge_used))
259 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
261 spin_unlock_irqrestore(&qends->ends_lock, flags);
262 DEBUGQUE("canque_filter_msg2edges sent msg ID %ld to %d edges\n",msg->id,destnr);
266 int canque_test_outslot(struct canque_ends_t *qends,
267 struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
271 struct canque_edge_t *edge;
273 spin_lock_irqsave(&qends->ends_lock, flags);
274 for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
275 if(!list_empty(&qends->active[prio])){
276 edge=list_entry(qends->active[prio].next,struct canque_edge_t,outpeers);
277 atomic_inc(&edge->edge_used);
278 spin_unlock_irqrestore(&qends->ends_lock, flags);
280 DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
281 return canque_fifo_test_outslot(&edge->fifo, slotp);
284 spin_unlock_irqrestore(&qends->ends_lock, flags);
286 DEBUGQUE("canque_test_outslot no ready slot\n");
290 int canque_free_outslot(struct canque_ends_t *qends,
291 struct canque_edge_t *qedge, struct canque_slot_t *slot)
295 ret=canque_fifo_free_outslot(&qedge->fifo, slot);
296 if(ret&CAN_FIFOF_EMPTY){
297 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
299 if(ret&CAN_FIFOF_FULL)
300 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
301 spin_lock_irqsave(&qends->ends_lock, flags);
302 if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB){
303 spin_lock(&qedge->fifo.fifo_lock);
304 if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
305 list_del(&qedge->outpeers);
306 list_add(&qedge->outpeers,&qends->idle);
310 list_del(&qedge->outpeers);
311 list_add_tail(&qedge->outpeers,&qends->active[qedge->edge_prio]);
313 #endif /*CANQUE_ROUNDROB*/
314 spin_unlock(&qedge->fifo.fifo_lock);
316 if(atomic_dec_and_test(&qedge->edge_used))
317 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
318 spin_unlock_irqrestore(&qends->ends_lock, flags);
319 DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
323 int canque_again_outslot(struct canque_ends_t *qends,
324 struct canque_edge_t *qedge, struct canque_slot_t *slot)
328 ret=canque_fifo_again_outslot(&qedge->fifo, slot);
329 if(ret&CAN_FIFOF_EMPTY){
330 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
332 if(ret&CAN_FIFOF_FULL)
333 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
334 spin_lock_irqsave(&qends->ends_lock, flags);
335 if(atomic_dec_and_test(&qedge->edge_used))
336 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
337 spin_unlock_irqrestore(&qends->ends_lock, flags);
338 DEBUGQUE("canque_again_outslot for edge %d returned %d\n",qedge->edge_num,ret);
342 int canque_set_filt(struct canque_edge_t *qedge,
343 unsigned long filtid, unsigned long filtmask)
348 spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
349 atomic_inc(&qedge->edge_used);
350 qedge->filtid=filtid;
351 qedge->filtmask=filtmask;
352 if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
353 else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
355 spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
357 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
359 spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
360 if(!ret)canque_fifo_clear_fl(&qedge->fifo,BLOCK);
361 if(atomic_dec_and_test(&qedge->edge_used))
362 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
363 spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
365 DEBUGQUE("canque_set_filt for edge %d, ID %ld and mask %ld returned %d\n",qedge->edge_num,filtid,filtmask,ret);
369 int canque_flush(struct canque_edge_t *qedge)
374 atomic_inc(&qedge->edge_used);
375 ret=canque_fifo_flush_slots(&qedge->fifo);
377 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
378 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
379 spin_lock_irqsave(&qedge->outends->ends_lock, flags);
380 spin_lock(&qedge->fifo.fifo_lock);
381 if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
382 list_del(&qedge->outpeers);
383 list_add(&qedge->outpeers,&qedge->outends->idle);
385 if(atomic_dec_and_test(&qedge->edge_used))
386 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
387 spin_unlock(&qedge->fifo.fifo_lock);
388 spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
390 DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
394 int canqueue_ends_init_gen(struct canque_ends_t *qends)
397 for(i=CANQUEUE_PRIO_NR;--i>=0;){
398 INIT_LIST_HEAD(&qends->active[i]);
400 INIT_LIST_HEAD(&qends->idle);
401 INIT_LIST_HEAD(&qends->inlist);
402 spin_lock_init(&qends->ends_lock);
407 void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what)
409 DEBUGQUE("canqueue_notify_kern for edge %d and event %d\n",qedge->edge_num,what);
411 case CANQUEUE_NOTIFY_EMPTY:
412 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
414 case CANQUEUE_NOTIFY_SPACE:
415 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
417 case CANQUEUE_NOTIFY_PROC:
418 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
420 case CANQUEUE_NOTIFY_NOUSR:
421 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
422 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
423 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
425 case CANQUEUE_NOTIFY_DEAD:
426 if(atomic_read(&qedge->edge_used)>0)
427 atomic_dec(&qedge->edge_used);
429 case CANQUEUE_NOTIFY_ATACH:
430 atomic_inc(&qedge->edge_used);
435 int canqueue_ends_init_kern(struct canque_ends_t *qends)
437 canqueue_ends_init_gen(qends);
439 init_waitqueue_head(&qends->endinfo.fileinfo.readq);
440 init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
441 init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
442 qends->notify=canqueue_notify_kern;
443 DEBUGQUE("canqueue_ends_init_kern\n");
448 int canque_get_inslot4id_wait_kern(struct canque_ends_t *qends,
449 struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
450 int cmd, unsigned long id, int prio)
453 DEBUGQUE("canque_get_inslot4id_wait_kern for cmd %d, id %ld, prio %d\n",cmd,id,prio);
454 wait_event_interruptible((qends->endinfo.fileinfo.writeq),
455 (ret=canque_get_inslot4id(qends,qedgep,slotp,cmd,id,prio))!=-1);
459 int canque_get_outslot_wait_kern(struct canque_ends_t *qends,
460 struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
463 DEBUGQUE("canque_get_outslot_wait_kern\n");
464 wait_event_interruptible((qends->endinfo.fileinfo.readq),
465 (ret=canque_test_outslot(qends,qedgep,slotp))!=-1);
469 int canque_sync_wait_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
472 DEBUGQUE("canque_sync_wait_kern\n");
473 wait_event_interruptible((qends->endinfo.fileinfo.emptyq),
474 (ret=canque_fifo_test_fl(&qedge->fifo,EMPTY)));
479 struct canque_edge_t *canque_new_edge_kern(int slotsnr)
481 struct canque_edge_t *qedge;
482 qedge = (struct canque_edge_t *)kmalloc(sizeof(struct canque_edge_t), GFP_KERNEL);
483 if(qedge == NULL) return NULL;
485 memset(qedge,0,sizeof(struct canque_edge_t));
486 if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
488 DEBUGQUE("canque_new_edge_kern failed\n");
491 atomic_set(&qedge->edge_used,0);
494 qedge->edge_prio = 0;
496 /* not exactly clean, but enough for debugging */
497 atomic_inc(&edge_num_cnt);
498 qedge->edge_num=atomic_read(&edge_num_cnt);
499 #endif /* CAN_DEBUG */
500 DEBUGQUE("canque_new_edge_kern %d\n",qedge->edge_num);
504 int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
507 if(qedge == NULL) return -1;
508 DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
509 atomic_inc(&qedge->edge_used);
510 spin_lock_irqsave(&inends->ends_lock, flags);
511 spin_lock(&outends->ends_lock);
512 spin_lock(&qedge->fifo.fifo_lock);
513 qedge->inends=inends;
514 list_add(&qedge->inpeers,&inends->inlist);
515 qedge->outends=outends;
516 list_add(&qedge->outpeers,&outends->idle);
517 spin_unlock(&qedge->fifo.fifo_lock);
518 spin_unlock(&outends->ends_lock);
519 spin_unlock_irqrestore(&inends->ends_lock, flags);
520 canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATACH);
522 spin_lock_irqsave(&qedge->fifo.fifo_lock, flags);
523 if(atomic_dec_and_test(&qedge->edge_used))
524 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
525 spin_unlock_irqrestore(&qedge->fifo.fifo_lock, flags);
529 int canqueue_disconnect_edge(struct canque_edge_t *qedge)
533 spin_lock_irqsave(&qedge->inends->ends_lock,flags);
534 spin_lock(&qedge->outends->ends_lock);
535 spin_lock(&qedge->fifo.fifo_lock);
536 if(atomic_read(&qedge->edge_used)==0) {
538 list_del(&qedge->outpeers);
542 list_del(&qedge->inpeers);
547 spin_unlock(&qedge->fifo.fifo_lock);
548 spin_unlock(&qedge->outends->ends_lock);
549 spin_unlock_irqrestore(&qedge->inends->ends_lock,flags);
550 DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
554 int canqueue_disconnect_edge_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
556 canque_fifo_set_fl(&qedge->fifo,BLOCK);
557 DEBUGQUE("canqueue_disconnect_edge_kern %d called\n",qedge->edge_num);
558 if(!canque_fifo_test_and_set_fl(&qedge->fifo,DEAD)){
559 canque_notify_bothends(qedge, CANQUEUE_NOTIFY_DEAD);
560 if(atomic_read(&qedge->edge_used)>0)
561 atomic_dec(&qedge->edge_used);
562 DEBUGQUE("canqueue_disconnect_edge_kern %d waiting\n",qedge->edge_num);
563 wait_event_interruptible((qends->endinfo.fileinfo.emptyq),
564 (canqueue_disconnect_edge(qedge)>=0));
567 DEBUGQUE("canqueue_disconnect_edge_kern failed\n");
573 int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
575 struct canque_edge_t *edge;
578 spin_lock_irqsave(&qends->ends_lock,flags);
579 if(list_empty(list)){
580 spin_unlock_irqrestore(&qends->ends_lock,flags);
583 if(list == &qends->inlist)
584 edge=list_entry(list->next,struct canque_edge_t,inpeers);
586 edge=list_entry(list->next,struct canque_edge_t,outpeers);
587 atomic_inc(&edge->edge_used);
588 spin_unlock_irqrestore(&qends->ends_lock,flags);
589 if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
590 /* Free edge memory */
591 canque_fifo_done(&edge->fifo);
594 DEBUGQUE("canqueue_disconnect_list_kern in troubles\n");
595 DEBUGQUE("the edge %d has usage count %d and flags %ld\n",edge->edge_num,atomic_read(&edge->edge_used),edge->fifo.fifo_flags);
601 void canqueue_block_list(struct canque_ends_t *qends, struct list_head *list)
603 struct canque_edge_t *edge;
604 struct list_head *entry;
607 spin_lock_irqsave(&qends->ends_lock, flags);
608 list_for_each(entry,&qends->inlist){
609 if(list == &qends->inlist)
610 edge=list_entry(list->next,struct canque_edge_t,inpeers);
612 edge=list_entry(list->next,struct canque_edge_t,outpeers);
613 canque_fifo_set_fl(&edge->fifo,BLOCK);
614 /*spin_unlock_irqrestore(&qends->ends_lock, flags);*/
615 /* Loop can be break by interrupts and preempts there */
616 /*spin_lock_irqsave(&qends->ends_lock, flags);*/
618 spin_unlock_irqrestore(&qends->ends_lock, flags);
622 int canqueue_ends_done_kern(struct canque_ends_t *qends, int sync)
627 DEBUGQUE("canqueue_ends_done_kern\n");
628 spin_lock_irqsave(&qends->ends_lock,flags);
629 canqueue_block_list(qends, &qends->idle);
630 for(i=CANQUEUE_PRIO_NR;--i>=0;){
631 canqueue_block_list(qends, &qends->active[i]);
633 canqueue_block_list(qends, &qends->idle);
634 canqueue_block_list(qends, &qends->inlist);
635 spin_unlock_irqrestore(&qends->ends_lock,flags);
637 for(i=CANQUEUE_PRIO_NR;--i>=0;){
638 canqueue_disconnect_list_kern(qends, &qends->active[i]);
640 canqueue_disconnect_list_kern(qends, &qends->idle);
641 canqueue_disconnect_list_kern(qends, &qends->inlist);
643 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
644 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
645 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);