1 /* can_queue.c - CAN message queues
2 * Linux CAN-bus device driver.
3 * New CAN queues by Pavel Pisa - OCERA team member
4 * email:pisa@cmp.felk.cvut.cz
5 * This software is released under the GPL-License.
6 * Version lincan-0.2 9 Jul 2003
10 #include <linux/module.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
13 #include <linux/malloc.h>
15 #include <linux/slab.h>
17 #include <linux/wait.h>
18 #include "../include/can.h"
19 #include "../include/can_queue.h"
22 #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
25 atomic_t edge_num_cnt;
27 #define DEBUGQUE(fmt,args...)
30 #define CANQUE_ROUNDROB 1
33 * canque_fifo_flush_slots - free all ready slots from the FIFO
34 * @fifo: pointer to the FIFO structure
36 * The caller should be prepared to handle situations, when some
37 * slots are held by input or output side slots processing.
38 * These slots cannot be flushed or their processing interrupted.
40 * Return Value: The nonzero value indicates, that queue has not been
41 * empty before the function call.
43 int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
47 struct canque_slot_t *slot;
48 spin_lock_irqsave(&fifo->fifo_lock, flags);
50 *fifo->tail=fifo->flist;
53 fifo->tail=&fifo->head;
54 ret=canque_fifo_test_and_set_fl(fifo,EMPTY);
55 spin_unlock_irqrestore(&fifo->fifo_lock, flags);
61 * canque_fifo_init_slots - initialize one CAN FIFO
62 * @fifo: pointer to the FIFO structure
63 * @slotsnr: number of requested slots
65 * Return Value: The negative value indicates, that there is no memory
66 * to allocate space for the requested number of the slots.
68 int canque_fifo_init_slots(struct canque_fifo_t *fifo, int slotsnr)
71 struct canque_slot_t *slot;
72 if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
73 size=sizeof(struct canque_slot_t)*slotsnr;
74 fifo->entry=kmalloc(size,GFP_KERNEL);
75 if(!fifo->entry) return -1;
84 fifo->tail=&fifo->head;
85 canque_fifo_set_fl(fifo,EMPTY);
90 * canque_fifo_done - frees slots allocated for CAN FIFO
91 * @fifo: pointer to the FIFO structure
93 int canque_fifo_done(struct canque_fifo_t *fifo)
101 /* atomic_dec_and_test(&qedge->edge_used);
102 void atomic_inc(&qedge->edge_used);
103 list_add_tail(struct list_head *new, struct list_head *head)
104 list_for_each(edge,qends->inlist);
105 list_entry(ptr, type, member)
108 int canque_get_inslot(struct canque_ends_t *qends,
109 struct canque_edge_t **qedgep, struct canque_slot_t **slotp, int cmd)
113 struct canque_edge_t *edge;
115 spin_lock_irqsave(&qends->ends_lock, flags);
116 if(!list_empty(&qends->inlist)){
117 edge=list_entry(qends->inlist.next,struct canque_edge_t,inpeers);
118 if(!canque_fifo_test_fl(&edge->fifo,BLOCK)&&!canque_fifo_test_fl(&edge->fifo,DEAD)){
119 atomic_inc(&edge->edge_used);
120 spin_unlock_irqrestore(&qends->ends_lock, flags);
121 ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
124 DEBUGQUE("canque_get_inslot cmd=%d found edge %d\n",cmd,edge->edge_num);
128 spin_lock_irqsave(&qends->ends_lock, flags);
129 if(atomic_dec_and_test(&edge->edge_used))
130 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
133 spin_unlock_irqrestore(&qends->ends_lock, flags);
135 DEBUGQUE("canque_get_inslot cmd=%d failed\n",cmd);
139 int canque_get_inslot4id(struct canque_ends_t *qends,
140 struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
141 int cmd, unsigned long id, int prio)
145 struct canque_edge_t *edge, *bestedge=NULL;
146 struct list_head *entry;
148 spin_lock_irqsave(&qends->ends_lock, flags);
149 list_for_each(entry,&qends->inlist){
150 edge=list_entry(entry,struct canque_edge_t,inpeers);
151 if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
153 if((id^edge->filtid)&edge->filtmask)
156 if(bestedge->filtmask){
157 if (!edge->filtmask) continue;
164 if(bestedge->edge_prio<edge->edge_prio){
165 if(edge->edge_prio>prio) continue;
167 if(bestedge->edge_prio<=prio) continue;
172 if((edge=bestedge)!=NULL){
173 atomic_inc(&edge->edge_used);
174 spin_unlock_irqrestore(&qends->ends_lock, flags);
175 ret=canque_fifo_get_inslot(&edge->fifo, slotp, cmd);
178 DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d found edge %d\n",cmd,id,prio,edge->edge_num);
181 spin_lock_irqsave(&qends->ends_lock, flags);
182 if(atomic_dec_and_test(&edge->edge_used))
183 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
185 spin_unlock_irqrestore(&qends->ends_lock, flags);
187 DEBUGQUE("canque_get_inslot4id cmd=%d id=%ld prio=%d failed\n",cmd,id,prio);
192 int canque_put_inslot(struct canque_ends_t *qends,
193 struct canque_edge_t *qedge, struct canque_slot_t *slot)
197 ret=canque_fifo_put_inslot(&qedge->fifo,slot);
199 canque_activate_edge(qends,qedge);
200 canque_notify_outends(qedge,CANQUEUE_NOTIFY_PROC);
202 spin_lock_irqsave(&qends->ends_lock, flags);
203 if(atomic_dec_and_test(&qedge->edge_used))
204 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
205 spin_unlock_irqrestore(&qends->ends_lock, flags);
206 DEBUGQUE("canque_put_inslot for edge %d returned %d\n",qedge->edge_num,ret);
210 int canque_abort_inslot(struct canque_ends_t *qends,
211 struct canque_edge_t *qedge, struct canque_slot_t *slot)
215 ret=canque_fifo_abort_inslot(&qedge->fifo,slot);
217 canque_notify_outends(qedge,CANQUEUE_NOTIFY_SPACE);
219 spin_lock_irqsave(&qends->ends_lock, flags);
220 if(atomic_dec_and_test(&qedge->edge_used))
221 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
222 spin_unlock_irqrestore(&qends->ends_lock, flags);
223 DEBUGQUE("canque_abort_inslot for edge %d returned %d\n",qedge->edge_num,ret);
227 int canque_filter_msg2edges(struct canque_ends_t *qends, struct canmsg_t *msg)
232 struct canque_edge_t *edge;
233 struct list_head *entry;
234 struct canque_slot_t *slot;
236 DEBUGQUE("canque_filter_msg2edges for msg ID %ld\n",msg->id);
237 spin_lock_irqsave(&qends->ends_lock, flags);
238 list_for_each(entry,&qends->inlist){
239 edge=list_entry(entry,struct canque_edge_t,inpeers);
240 if(canque_fifo_test_fl(&edge->fifo,BLOCK)||canque_fifo_test_fl(&edge->fifo,DEAD))
242 /* FIXME: the next comparison should be outside of ends lock */
243 if((msg->id^edge->filtid)&edge->filtmask)
245 atomic_inc(&edge->edge_used);
246 spin_unlock_irqrestore(&qends->ends_lock, flags);
247 ret=canque_fifo_get_inslot(&edge->fifo, &slot, 0);
251 ret=canque_fifo_put_inslot(&edge->fifo,slot);
253 canque_activate_edge(qends,edge);
254 canque_notify_outends(edge,CANQUEUE_NOTIFY_PROC);
258 spin_lock_irqsave(&qends->ends_lock, flags);
259 if(atomic_dec_and_test(&edge->edge_used))
260 canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
262 spin_unlock_irqrestore(&qends->ends_lock, flags);
263 DEBUGQUE("canque_filter_msg2edges sent msg ID %ld to %d edges\n",msg->id,destnr);
267 int canque_test_outslot(struct canque_ends_t *qends,
268 struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
272 struct canque_edge_t *edge;
274 spin_lock_irqsave(&qends->ends_lock, flags);
275 for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
276 if(!list_empty(&qends->active[prio])){
277 edge=list_entry(qends->active[prio].next,struct canque_edge_t,outpeers);
278 atomic_inc(&edge->edge_used);
279 spin_unlock_irqrestore(&qends->ends_lock, flags);
281 DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
282 return canque_fifo_test_outslot(&edge->fifo, slotp);
285 spin_unlock_irqrestore(&qends->ends_lock, flags);
287 DEBUGQUE("canque_test_outslot no ready slot\n");
291 int canque_free_outslot(struct canque_ends_t *qends,
292 struct canque_edge_t *qedge, struct canque_slot_t *slot)
296 ret=canque_fifo_free_outslot(&qedge->fifo, slot);
297 if(ret&CAN_FIFOF_EMPTY){
298 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
300 if(ret&CAN_FIFOF_FULL)
301 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
302 spin_lock_irqsave(&qends->ends_lock, flags);
303 if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB){
304 spin_lock(&qedge->fifo.fifo_lock);
305 if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
306 list_del(&qedge->outpeers);
307 list_add(&qedge->outpeers,&qends->idle);
311 list_del(&qedge->outpeers);
312 list_add_tail(&qedge->outpeers,&qends->active[qedge->edge_prio]);
314 #endif /*CANQUE_ROUNDROB*/
315 spin_unlock(&qedge->fifo.fifo_lock);
317 if(atomic_dec_and_test(&qedge->edge_used))
318 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
319 spin_unlock_irqrestore(&qends->ends_lock, flags);
320 DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
324 int canque_again_outslot(struct canque_ends_t *qends,
325 struct canque_edge_t *qedge, struct canque_slot_t *slot)
329 ret=canque_fifo_again_outslot(&qedge->fifo, slot);
330 if(ret&CAN_FIFOF_EMPTY){
331 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
333 if(ret&CAN_FIFOF_FULL)
334 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
335 spin_lock_irqsave(&qends->ends_lock, flags);
336 if(atomic_dec_and_test(&qedge->edge_used))
337 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
338 spin_unlock_irqrestore(&qends->ends_lock, flags);
339 DEBUGQUE("canque_again_outslot for edge %d returned %d\n",qedge->edge_num,ret);
343 int canque_set_filt(struct canque_edge_t *qedge,
344 unsigned long filtid, unsigned long filtmask)
349 spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
350 atomic_inc(&qedge->edge_used);
351 qedge->filtid=filtid;
352 qedge->filtmask=filtmask;
353 if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
354 else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
356 spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
358 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
360 spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
361 if(!ret)canque_fifo_clear_fl(&qedge->fifo,BLOCK);
362 if(atomic_dec_and_test(&qedge->edge_used))
363 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
364 spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
366 DEBUGQUE("canque_set_filt for edge %d, ID %ld and mask %ld returned %d\n",qedge->edge_num,filtid,filtmask,ret);
370 int canque_flush(struct canque_edge_t *qedge)
375 atomic_inc(&qedge->edge_used);
376 ret=canque_fifo_flush_slots(&qedge->fifo);
378 canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
379 canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
380 spin_lock_irqsave(&qedge->outends->ends_lock, flags);
381 spin_lock(&qedge->fifo.fifo_lock);
382 if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
383 list_del(&qedge->outpeers);
384 list_add(&qedge->outpeers,&qedge->outends->idle);
386 if(atomic_dec_and_test(&qedge->edge_used))
387 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
388 spin_unlock(&qedge->fifo.fifo_lock);
389 spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
391 DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
395 int canqueue_ends_init_gen(struct canque_ends_t *qends)
398 for(i=CANQUEUE_PRIO_NR;--i>=0;){
399 INIT_LIST_HEAD(&qends->active[i]);
401 INIT_LIST_HEAD(&qends->idle);
402 INIT_LIST_HEAD(&qends->inlist);
403 spin_lock_init(&qends->ends_lock);
408 void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what)
410 DEBUGQUE("canqueue_notify_kern for edge %d and event %d\n",qedge->edge_num,what);
412 case CANQUEUE_NOTIFY_EMPTY:
413 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
415 case CANQUEUE_NOTIFY_SPACE:
416 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
418 case CANQUEUE_NOTIFY_PROC:
419 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
421 case CANQUEUE_NOTIFY_NOUSR:
422 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
423 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
424 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);
426 case CANQUEUE_NOTIFY_DEAD:
427 if(atomic_read(&qedge->edge_used)>0)
428 atomic_dec(&qedge->edge_used);
430 case CANQUEUE_NOTIFY_ATACH:
431 atomic_inc(&qedge->edge_used);
436 int canqueue_ends_init_kern(struct canque_ends_t *qends)
438 canqueue_ends_init_gen(qends);
440 init_waitqueue_head(&qends->endinfo.fileinfo.readq);
441 init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
442 init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
443 qends->notify=canqueue_notify_kern;
444 DEBUGQUE("canqueue_ends_init_kern\n");
449 int canque_get_inslot4id_wait_kern(struct canque_ends_t *qends,
450 struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
451 int cmd, unsigned long id, int prio)
454 DEBUGQUE("canque_get_inslot4id_wait_kern for cmd %d, id %ld, prio %d\n",cmd,id,prio);
455 wait_event_interruptible((qends->endinfo.fileinfo.writeq),
456 (ret=canque_get_inslot4id(qends,qedgep,slotp,cmd,id,prio))!=-1);
460 int canque_get_outslot_wait_kern(struct canque_ends_t *qends,
461 struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
464 DEBUGQUE("canque_get_outslot_wait_kern\n");
465 wait_event_interruptible((qends->endinfo.fileinfo.readq),
466 (ret=canque_test_outslot(qends,qedgep,slotp))!=-1);
470 int canque_sync_wait_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
473 DEBUGQUE("canque_sync_wait_kern\n");
474 wait_event_interruptible((qends->endinfo.fileinfo.emptyq),
475 (ret=canque_fifo_test_fl(&qedge->fifo,EMPTY)));
480 struct canque_edge_t *canque_new_edge_kern(int slotsnr)
482 struct canque_edge_t *qedge;
483 qedge = (struct canque_edge_t *)kmalloc(sizeof(struct canque_edge_t), GFP_KERNEL);
484 if(qedge == NULL) return NULL;
486 memset(qedge,0,sizeof(struct canque_edge_t));
487 spin_lock_init(&qedge->fifo.fifo_lock);
488 if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
490 DEBUGQUE("canque_new_edge_kern failed\n");
493 atomic_set(&qedge->edge_used,0);
496 qedge->edge_prio = 0;
498 /* not exactly clean, but enough for debugging */
499 atomic_inc(&edge_num_cnt);
500 qedge->edge_num=atomic_read(&edge_num_cnt);
501 #endif /* CAN_DEBUG */
502 DEBUGQUE("canque_new_edge_kern %d\n",qedge->edge_num);
506 int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
509 if(qedge == NULL) return -1;
510 DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
511 atomic_inc(&qedge->edge_used);
512 spin_lock_irqsave(&inends->ends_lock, flags);
513 spin_lock(&outends->ends_lock);
514 spin_lock(&qedge->fifo.fifo_lock);
515 qedge->inends=inends;
516 list_add(&qedge->inpeers,&inends->inlist);
517 qedge->outends=outends;
518 list_add(&qedge->outpeers,&outends->idle);
519 spin_unlock(&qedge->fifo.fifo_lock);
520 spin_unlock(&outends->ends_lock);
521 spin_unlock_irqrestore(&inends->ends_lock, flags);
522 canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATACH);
524 spin_lock_irqsave(&qedge->fifo.fifo_lock, flags);
525 if(atomic_dec_and_test(&qedge->edge_used))
526 canque_notify_bothends(qedge,CANQUEUE_NOTIFY_NOUSR);
527 spin_unlock_irqrestore(&qedge->fifo.fifo_lock, flags);
531 int canqueue_disconnect_edge(struct canque_edge_t *qedge)
535 struct canque_ends_t *inends, *outends;
537 inends=qedge->inends;
538 if(inends) spin_lock_irqsave(&inends->ends_lock,flags);
539 outends=qedge->outends;
540 if(outends) spin_lock(&outends->ends_lock);
541 spin_lock(&qedge->fifo.fifo_lock);
542 if(atomic_read(&qedge->edge_used)==0) {
544 list_del(&qedge->outpeers);
548 list_del(&qedge->inpeers);
553 spin_unlock(&qedge->fifo.fifo_lock);
554 if(outends) spin_unlock(&outends->ends_lock);
555 if(inends) spin_unlock_irqrestore(&inends->ends_lock,flags);
556 DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
560 int canqueue_disconnect_edge_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge)
562 canque_fifo_set_fl(&qedge->fifo,BLOCK);
563 DEBUGQUE("canqueue_disconnect_edge_kern %d called\n",qedge->edge_num);
564 if(!canque_fifo_test_and_set_fl(&qedge->fifo,DEAD)){
565 canque_notify_bothends(qedge, CANQUEUE_NOTIFY_DEAD);
566 if(atomic_read(&qedge->edge_used)>0)
567 atomic_dec(&qedge->edge_used);
568 DEBUGQUE("canqueue_disconnect_edge_kern %d waiting\n",qedge->edge_num);
569 wait_event_interruptible((qends->endinfo.fileinfo.emptyq),
570 (canqueue_disconnect_edge(qedge)>=0));
573 DEBUGQUE("canqueue_disconnect_edge_kern failed\n");
579 int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
581 struct canque_edge_t *edge;
584 spin_lock_irqsave(&qends->ends_lock,flags);
585 if(list_empty(list)){
586 spin_unlock_irqrestore(&qends->ends_lock,flags);
589 if(list == &qends->inlist)
590 edge=list_entry(list->next,struct canque_edge_t,inpeers);
592 edge=list_entry(list->next,struct canque_edge_t,outpeers);
593 atomic_inc(&edge->edge_used);
594 spin_unlock_irqrestore(&qends->ends_lock,flags);
595 if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
596 /* Free edge memory */
597 canque_fifo_done(&edge->fifo);
600 DEBUGQUE("canqueue_disconnect_list_kern in troubles\n");
601 DEBUGQUE("the edge %d has usage count %d and flags %ld\n",edge->edge_num,atomic_read(&edge->edge_used),edge->fifo.fifo_flags);
607 void canqueue_block_list(struct canque_ends_t *qends, struct list_head *list)
609 struct canque_edge_t *edge;
610 struct list_head *entry;
612 /* has to be called with qends->ends_lock already locked */
613 list_for_each(entry,&qends->inlist){
614 if(list == &qends->inlist)
615 edge=list_entry(list->next,struct canque_edge_t,inpeers);
617 edge=list_entry(list->next,struct canque_edge_t,outpeers);
618 canque_fifo_set_fl(&edge->fifo,BLOCK);
623 int canqueue_ends_done_kern(struct canque_ends_t *qends, int sync)
628 DEBUGQUE("canqueue_ends_done_kern\n");
629 spin_lock_irqsave(&qends->ends_lock,flags);
630 canqueue_block_list(qends, &qends->idle);
631 for(i=CANQUEUE_PRIO_NR;--i>=0;){
632 canqueue_block_list(qends, &qends->active[i]);
634 canqueue_block_list(qends, &qends->idle);
635 canqueue_block_list(qends, &qends->inlist);
636 spin_unlock_irqrestore(&qends->ends_lock,flags);
638 for(i=CANQUEUE_PRIO_NR;--i>=0;){
639 canqueue_disconnect_list_kern(qends, &qends->active[i]);
641 canqueue_disconnect_list_kern(qends, &qends->idle);
642 canqueue_disconnect_list_kern(qends, &qends->inlist);
644 wake_up_interruptible(&qends->endinfo.fileinfo.readq);
645 wake_up_interruptible(&qends->endinfo.fileinfo.writeq);
646 wake_up_interruptible(&qends->endinfo.fileinfo.emptyq);