From: ppisa Date: Wed, 19 Nov 2003 00:31:18 +0000 (+0000) Subject: Added support for fasync system call and replacement of spinXXX by can_spinXXX X-Git-Tag: CLT_COMM_CAN_pre_canmsg_change~15 X-Git-Url: http://rtime.felk.cvut.cz/gitweb/lincan.git/commitdiff_plain/91dfab84182937b62ef64cb12d8e4f89e6d5c3ec Added support for fasync system call and replacement of spinXXX by can_spinXXX --- diff --git a/lincan/include/can_queue.h b/lincan/include/can_queue.h index 264fce5..2fc422f 100644 --- a/lincan/include/can_queue.h +++ b/lincan/include/can_queue.h @@ -51,7 +51,7 @@ struct canque_fifo_t { struct canque_slot_t **tail; /* points to NULL pointer for chaining */ struct canque_slot_t *flist; /* points the first entry in the free list */ struct canque_slot_t *entry; /* points to first allocated entry */ - spinlock_t fifo_lock; /* spin_lock_irqsave / spin_lock_irqrestore */ + can_spinlock_t fifo_lock; /* can_spin_lock_irqsave / can_spin_unlock_irqrestore */ }; #define CAN_FIFOF_DESTROY_b 15 @@ -102,21 +102,21 @@ struct canque_fifo_t { static inline int canque_fifo_get_inslot(struct canque_fifo_t *fifo, struct canque_slot_t **slotp, int cmd) { - unsigned long flags; + can_spin_irqflags_t flags; struct canque_slot_t *slot; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_lock_irqsave(&fifo->fifo_lock, flags); /* get the first free slot slot from flist */ if(!(slot=fifo->flist)) { canque_fifo_set_fl(fifo,OVERRUN); canque_fifo_set_fl(fifo,FULL); - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); *slotp=NULL; return -1; } /* adjust free slot list */ if(!(fifo->flist=slot->next)) canque_fifo_set_fl(fifo,FULL); - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); *slotp=slot; slot->slot_flags=cmd&CAN_SLOTF_CMD; return 1; @@ -134,9 +134,9 @@ static inline int canque_fifo_put_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; slot->next=NULL; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_lock_irqsave(&fifo->fifo_lock, flags); if(*fifo->tail) printk(KERN_CRIT "canque_fifo_put_inslot: fifo->tail != NULL\n"); *fifo->tail=slot; fifo->tail=&slot->next; @@ -145,7 +145,7 @@ int canque_fifo_put_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *slo ret=CAN_FIFOF_EMPTY; /* Fifo has been empty before put */ if(canque_fifo_test_and_clear_fl(fifo,INACTIVE)) ret=CAN_FIFOF_INACTIVE; /* Fifo has been empty before put */ - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); return ret; } @@ -160,13 +160,13 @@ static inline int canque_fifo_abort_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot) { int ret=0; - unsigned long flags; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_irqflags_t flags; + can_spin_lock_irqsave(&fifo->fifo_lock, flags); slot->next=fifo->flist; fifo->flist=slot; if(canque_fifo_test_and_clear_fl(fifo,FULL)) ret=CAN_FIFOF_FULL; - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); return ret; } @@ -184,19 +184,19 @@ int canque_fifo_abort_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *s static inline int canque_fifo_test_outslot(struct canque_fifo_t *fifo, struct canque_slot_t **slotp) { - unsigned long flags; + can_spin_irqflags_t flags; int cmd; struct canque_slot_t *slot; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_lock_irqsave(&fifo->fifo_lock, flags); if(!(slot=fifo->head)){; canque_fifo_set_fl(fifo,EMPTY); - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); *slotp=NULL; return -1; } if(!(fifo->head=slot->next)) fifo->tail=&fifo->head; - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); *slotp=slot; cmd=slot->slot_flags; @@ -218,8 +218,8 @@ static inline int canque_fifo_free_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot) { int ret=0; - unsigned long flags; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_irqflags_t flags; + can_spin_lock_irqsave(&fifo->fifo_lock, flags); slot->next=fifo->flist; fifo->flist=slot; if(canque_fifo_test_and_clear_fl(fifo,FULL)) @@ -228,7 +228,7 @@ int canque_fifo_free_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *s canque_fifo_set_fl(fifo,EMPTY); ret|=CAN_FIFOF_EMPTY; } - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); return ret; } @@ -242,12 +242,12 @@ int canque_fifo_free_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *s static inline int canque_fifo_again_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot) { - unsigned long flags; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_irqflags_t flags; + can_spin_lock_irqsave(&fifo->fifo_lock, flags); if(!(slot->next=fifo->head)) fifo->tail=&slot->next; fifo->head=slot; - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); return 1; } @@ -333,7 +333,7 @@ struct canque_ends_t { struct list_head idle; struct list_head inlist; struct list_head outlist; - spinlock_t ends_lock; /* spin_lock_irqsave / spin_lock_irqrestore */ + can_spinlock_t ends_lock; /* can_spin_lock_irqsave / can_spin_unlock_irqrestore */ void (*notify)(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what); void *context; union { @@ -341,6 +341,9 @@ struct canque_ends_t { wait_queue_head_t readq; wait_queue_head_t writeq; wait_queue_head_t emptyq; + #ifdef CAN_ENABLE_KERN_FASYNC + struct fasync_struct *fasync; + #endif /*CAN_ENABLE_KERN_FASYNC*/ } fileinfo; struct { wait_queue_head_t daemonq; @@ -415,23 +418,23 @@ void canque_notify_bothends(struct canque_edge_t *qedge, int what) static inline void canque_activate_edge(struct canque_ends_t *inends, struct canque_edge_t *qedge) { - unsigned long flags; + can_spin_irqflags_t flags; struct canque_ends_t *outends; if(qedge->edge_prio>=CANQUEUE_PRIO_NR) qedge->edge_prio=CANQUEUE_PRIO_NR-1; - spin_lock_irqsave(&inends->ends_lock, flags); + can_spin_lock_irqsave(&inends->ends_lock, flags); if((outends=qedge->outends)){ - spin_lock(&outends->ends_lock); - spin_lock(&qedge->fifo.fifo_lock); + can_spin_lock(&outends->ends_lock); + can_spin_lock(&qedge->fifo.fifo_lock); if(!canque_fifo_test_fl(&qedge->fifo,EMPTY)){ list_del(&qedge->activepeers); list_add_tail(&qedge->activepeers,&outends->active[qedge->edge_prio]); } - spin_unlock(&qedge->fifo.fifo_lock); - spin_unlock(&outends->ends_lock); + can_spin_unlock(&qedge->fifo.fifo_lock); + can_spin_unlock(&outends->ends_lock); } - spin_unlock_irqrestore(&inends->ends_lock, flags); + can_spin_unlock_irqrestore(&inends->ends_lock, flags); } /** @@ -488,6 +491,10 @@ void canqueue_block_inlist(struct canque_ends_t *qends); void canqueue_block_outlist(struct canque_ends_t *qends); +int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest); + +int canqueue_ends_kill_outlist(struct canque_ends_t *qends); + /* edge reference and traversal functions */ void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl); @@ -501,34 +508,34 @@ void canque_edge_incref(struct canque_edge_t *edge) static inline void canque_edge_decref(struct canque_edge_t *edge) { - unsigned long flags; + can_spin_irqflags_t flags; struct canque_ends_t *inends=edge->inends; struct canque_ends_t *outends=edge->outends; int dead_fl; - spin_lock_irqsave(&inends->ends_lock, flags); - spin_lock(&outends->ends_lock); + can_spin_lock_irqsave(&inends->ends_lock, flags); + can_spin_lock(&outends->ends_lock); if(atomic_dec_and_test(&edge->edge_used)) { dead_fl=canque_fifo_test_and_set_fl(&edge->fifo,DEAD); /*This should not be there, but it cannot be outside of the lock :-(*/ canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR); - spin_unlock(&outends->ends_lock); - spin_unlock_irqrestore(&inends->ends_lock, flags); + can_spin_unlock(&outends->ends_lock); + can_spin_unlock_irqrestore(&inends->ends_lock, flags); canque_edge_do_dead(edge, dead_fl); } else { - spin_unlock(&outends->ends_lock); - spin_unlock_irqrestore(&inends->ends_lock, flags); + can_spin_unlock(&outends->ends_lock); + can_spin_unlock_irqrestore(&inends->ends_lock, flags); } } static inline struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends) { - unsigned long flags; + can_spin_irqflags_t flags; struct list_head *entry; struct canque_edge_t *edge; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); entry=qends->inlist.next; skip_dead: if(entry != &qends->inlist) { @@ -541,7 +548,7 @@ struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends) } else { edge=NULL; } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); return edge; } @@ -549,11 +556,11 @@ struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends) static inline struct canque_edge_t *canque_next_inedge(struct canque_ends_t *qends, struct canque_edge_t *edge) { - unsigned long flags; + can_spin_irqflags_t flags; struct list_head *entry; struct canque_edge_t *next; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); entry=edge->inpeers.next; skip_dead: if(entry != &qends->inlist) { @@ -566,7 +573,7 @@ struct canque_edge_t *canque_next_inedge(struct canque_ends_t *qends, struct can } else { next=NULL; } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); canque_edge_decref(edge); return next; } @@ -577,11 +584,11 @@ struct canque_edge_t *canque_next_inedge(struct canque_ends_t *qends, struct can static inline struct canque_edge_t *canque_first_outedge(struct canque_ends_t *qends) { - unsigned long flags; + can_spin_irqflags_t flags; struct list_head *entry; struct canque_edge_t *edge; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); entry=qends->outlist.next; skip_dead: if(entry != &qends->outlist) { @@ -594,7 +601,7 @@ struct canque_edge_t *canque_first_outedge(struct canque_ends_t *qends) } else { edge=NULL; } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); return edge; } @@ -602,11 +609,11 @@ struct canque_edge_t *canque_first_outedge(struct canque_ends_t *qends) static inline struct canque_edge_t *canque_next_outedge(struct canque_ends_t *qends, struct canque_edge_t *edge) { - unsigned long flags; + can_spin_irqflags_t flags; struct list_head *entry; struct canque_edge_t *next; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); entry=edge->outpeers.next; skip_dead: if(entry != &qends->outlist) { @@ -619,7 +626,7 @@ struct canque_edge_t *canque_next_outedge(struct canque_ends_t *qends, struct ca } else { next=NULL; } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); canque_edge_decref(edge); return next; } diff --git a/lincan/include/can_sysdep.h b/lincan/include/can_sysdep.h index 9e6adf5..0297e21 100644 --- a/lincan/include/can_sysdep.h +++ b/lincan/include/can_sysdep.h @@ -72,4 +72,28 @@ #define del_timer_sync del_timer #endif /* <2.4.0 */ +#define CAN_ENABLE_KERN_FASYNC + +#ifndef CAN_WITH_RTL + +#define can_spinlock_t spinlock_t +#define can_spin_irqflags_t unsigned long +#define can_spin_lock spin_lock +#define can_spin_unlock spin_unlock +#define can_spin_lock_irqsave spin_lock_irqsave +#define can_spin_unlock_irqrestore spin_unlock_irqrestore +#define can_spin_lock_init spin_lock_init + +#else /*CAN_WITH_RTL*/ + +#define can_spinlock_t rtl_spinlock_t +#define can_spin_irqflags_t unsigned long +#define can_spin_lock rtl_spin_lock +#define can_spin_unlock rtl_spin_unlock +#define can_spin_lock_irqsave rtl_spin_lock_irqsave +#define can_spin_unlock_irqrestore rtl_spin_unlock_irqrestore +#define can_spin_lock_init rtl_spin_lock_init + +#endif /*CAN_WITH_RTL*/ + #endif /*_CAN_SYSDEP_H*/ diff --git a/lincan/include/fasync.h b/lincan/include/fasync.h new file mode 100644 index 0000000..9f479db --- /dev/null +++ b/lincan/include/fasync.h @@ -0,0 +1,11 @@ +/* fasync.h + * Header file for the Linux CAN-bus driver. + * Written by Arnaud Westenberg email:arnaud@wanadoo.nl + * Rewritten for new CAN queues by Pavel Pisa - OCERA team member + * email:pisa@cmp.felk.cvut.cz + * This software is released under the GPL-License. + * Version lincan-0.2 9 Jul 2003 + */ + +int can_fasync(int fd, struct file *filp, int on); + diff --git a/lincan/include/main.h b/lincan/include/main.h index 03ee976..4bb0e0e 100644 --- a/lincan/include/main.h +++ b/lincan/include/main.h @@ -32,7 +32,7 @@ struct canhardware_t { int nr_boards; struct rtr_id *rtr_queue; - spinlock_t rtr_lock; + can_spinlock_t rtr_lock; struct candevice_t *candevice[MAX_HW_CARDS]; }; diff --git a/lincan/src/Makefile b/lincan/src/Makefile index 7d87097..ae8bd4d 100644 --- a/lincan/src/Makefile +++ b/lincan/src/Makefile @@ -128,7 +128,8 @@ O_OBJS += $(SUPPORTED_CARDS:%=%.o) O_OBJS += can_queue.o can_quekern.o devcommon.o main.o modparms.o \ setup.o finish.o irq.o boardlist.o \ sja1000p.o sja1000.o i82527.o \ - open.o proc.o close.o write.o read.o ioctl.o select.o + open.o proc.o close.o write.o read.o \ + ioctl.o select.o fasync.o # Objects with exported symbols (-DEXPORT_SYMTAB) OX_OBJS = # Module objects diff --git a/lincan/src/bfadcan.c b/lincan/src/bfadcan.c index 0f34958..21de5c7 100644 --- a/lincan/src/bfadcan.c +++ b/lincan/src/bfadcan.c @@ -29,7 +29,7 @@ MODULE_PARM(clock_freq,"i"); /* cli and sti are not allowed in 2.5.5x SMP kernels */ #ifdef WINDOWED_ACCESS -spinlock_t bfadcan_win_lock=SPIN_LOCK_UNLOCKED; +can_spinlock_t bfadcan_win_lock=SPIN_LOCK_UNLOCKED; #endif /* @@ -274,11 +274,11 @@ int bfadcan_program_irq(struct candevice_t *candev) void bfadcan_write_register(unsigned char data, unsigned long address) { #ifdef WINDOWED_ACCESS - unsigned long flags; - spin_lock_irqsave(&bfadcan_win_lock,flags); + can_spin_irqflags_t flags; + can_spin_lock_irqsave(&bfadcan_win_lock,flags); outb(address&0x00ff,0x200); outb(data, 0x201); - spin_unlock_irqrestore(&bfadcan_win_lock,flags); + can_spin_unlock_irqrestore(&bfadcan_win_lock,flags); #else outb(data,address); #endif @@ -297,12 +297,12 @@ void bfadcan_write_register(unsigned char data, unsigned long address) unsigned bfadcan_read_register(unsigned long address) { #ifdef WINDOWED_ACCESS - unsigned long flags; + can_spin_irqflags_t flags; int ret; - spin_lock_irqsave(&bfadcan_win_lock,flags); + can_spin_lock_irqsave(&bfadcan_win_lock,flags); outb(address&0x00ff,0x200); ret = inb(0x201); - spin_unlock_irqrestore(&bfadcan_win_lock,flags); + can_spin_unlock_irqrestore(&bfadcan_win_lock,flags); return ret; #else return inb(address); diff --git a/lincan/src/can_quekern.c b/lincan/src/can_quekern.c index 55e8063..fa7e33c 100644 --- a/lincan/src/can_quekern.c +++ b/lincan/src/can_quekern.c @@ -37,7 +37,7 @@ extern int processlocal; void canque_dead_func(unsigned long data); /* Support for dead ends structures left after client close */ -spinlock_t canque_dead_func_lock; +can_spinlock_t canque_dead_func_lock; LIST_HEAD(canque_dead_ends); /* retrieved by list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers) */ LIST_HEAD(canque_dead_edges); @@ -49,22 +49,22 @@ DECLARE_TASKLET(canque_dead_tl, canque_dead_func, 0); static inline struct canque_edge_t *canque_dead_edges_cut_first(void) { - unsigned long flags; + can_spin_irqflags_t flags; struct canque_edge_t *edge; - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); if(list_empty(&canque_dead_edges)) edge=NULL; else{ edge=list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers); list_del(&edge->inpeers); } - spin_unlock_irqrestore(&canque_dead_func_lock, flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock, flags); return edge; } void canque_dead_func(unsigned long data) { - unsigned long flags; + can_spin_irqflags_t flags; struct canque_edge_t *qedge; struct canque_ends_t *qends; struct list_head *entry; @@ -74,9 +74,9 @@ void canque_dead_func(unsigned long data) kfree(qedge); } - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); entry=canque_dead_ends.next; - spin_unlock_irqrestore(&canque_dead_func_lock,flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock,flags); while(entry!=&canque_dead_ends){ qends=list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers); entry=entry->next; @@ -84,9 +84,9 @@ void canque_dead_func(unsigned long data) continue; if(!list_empty(&qends->outlist)) continue; - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); list_del(&qends->dead_peers); - spin_unlock_irqrestore(&canque_dead_func_lock,flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock,flags); DEBUGQUE("ends structure disposed\n"); kfree(qends); } @@ -96,7 +96,7 @@ void canque_dead_func(unsigned long data) void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl) { - unsigned long flags; + can_spin_irqflags_t flags; if(dead_fl) return; @@ -105,19 +105,19 @@ void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl) return; } - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); list_add(&edge->inpeers,&canque_dead_edges); - spin_unlock_irqrestore(&canque_dead_func_lock, flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock, flags); tasklet_schedule(&canque_dead_tl); } /*if(qends->ends_flags & CAN_ENDSF_DEAD){ - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); list_del(&qends->dead_peers); list_add(&qends->dead_peers,&canque_dead_ends); - spin_unlock_irqrestore(&canque_dead_func_lock, flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock, flags); tasklet_schedule(&canque_dead_tl); }*/ @@ -140,9 +140,17 @@ void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qed break; case CANQUEUE_NOTIFY_SPACE: wake_up(&qends->endinfo.fileinfo.writeq); + #ifdef CAN_ENABLE_KERN_FASYNC + /* Asynchronous I/O processing */ + kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_OUT); + #endif /*CAN_ENABLE_KERN_FASYNC*/ break; case CANQUEUE_NOTIFY_PROC: wake_up(&qends->endinfo.fileinfo.readq); + #ifdef CAN_ENABLE_KERN_FASYNC + /* Asynchronous I/O processing */ + kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_IN); + #endif /*CAN_ENABLE_KERN_FASYNC*/ break; case CANQUEUE_NOTIFY_NOUSR: wake_up(&qends->endinfo.fileinfo.readq); @@ -170,6 +178,10 @@ int canqueue_ends_init_kern(struct canque_ends_t *qends) init_waitqueue_head(&qends->endinfo.fileinfo.readq); init_waitqueue_head(&qends->endinfo.fileinfo.writeq); init_waitqueue_head(&qends->endinfo.fileinfo.emptyq); + #ifdef CAN_ENABLE_KERN_FASYNC + qends->endinfo.fileinfo.fasync=NULL; + #endif /*CAN_ENABLE_KERN_FASYNC*/ + qends->notify=canqueue_notify_kern; DEBUGQUE("canqueue_ends_init_kern\n"); return 0; @@ -255,7 +267,7 @@ struct canque_edge_t *canque_new_edge_kern(int slotsnr) if(qedge == NULL) return NULL; memset(qedge,0,sizeof(struct canque_edge_t)); - spin_lock_init(&qedge->fifo.fifo_lock); + can_spin_lock_init(&qedge->fifo.fifo_lock); if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){ kfree(qedge); DEBUGQUE("canque_new_edge_kern failed\n"); @@ -274,7 +286,7 @@ struct canque_edge_t *canque_new_edge_kern(int slotsnr) return qedge; } -#ifdef USE_DELAYED_DISCONNECT_EDGE_KERN +#ifdef USE_SYNC_DISCONNECT_EDGE_KERN /** * canqueue_disconnect_edge_kern - disconnect edge from communicating entities with wait @@ -313,11 +325,11 @@ int canqueue_disconnect_edge_kern(struct canque_ends_t *qends, struct canque_edg int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list) { struct canque_edge_t *edge; - unsigned long flags; + can_spin_irqflags_t flags; for(;;){ - spin_lock_irqsave(&qends->ends_lock,flags); + can_spin_lock_irqsave(&qends->ends_lock,flags); if(list_empty(list)){ - spin_unlock_irqrestore(&qends->ends_lock,flags); + can_spin_unlock_irqrestore(&qends->ends_lock,flags); return 0; } if(list == &qends->inlist) @@ -325,7 +337,7 @@ int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head else edge=list_entry(list->next,struct canque_edge_t,outpeers); atomic_inc(&edge->edge_used); - spin_unlock_irqrestore(&qends->ends_lock,flags); + can_spin_unlock_irqrestore(&qends->ends_lock,flags); if(canqueue_disconnect_edge_kern(qends, edge)>=0) { /* Free edge memory */ canque_fifo_done(&edge->fifo); @@ -340,7 +352,7 @@ int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head } } -#endif /*USE_DELAYED_DISCONNECT_EDGE_KERN*/ +#endif /*USE_SYNC_DISCONNECT_EDGE_KERN*/ int canqueue_ends_sync_all_kern(struct canque_ends_t *qends) @@ -354,38 +366,6 @@ int canqueue_ends_sync_all_kern(struct canque_ends_t *qends) return 0; } -int canqueue_ends_done_inends(struct canque_ends_t *qends, int send_rest) -{ - struct canque_edge_t *edge; - - canque_for_each_inedge(qends, edge){ - canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED); - if(send_rest){ - canque_edge_incref(edge); - if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){ - if(!canque_fifo_test_fl(&edge->fifo, EMPTY)) - continue; - if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY)) - continue; - } - canque_edge_decref(edge); - } - } - return list_empty(&qends->inlist)?0:1; -} - - -int canqueue_ends_done_outends(struct canque_ends_t *qends) -{ - struct canque_edge_t *edge; - - canque_for_each_outedge(qends, edge){ - canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED); - } - return list_empty(&qends->outlist)?0:1; -} - - /** * canqueue_ends_dispose_kern - finalizing of the ends structure for Linux kernel clients * @qends: pointer to ends structure @@ -396,7 +376,7 @@ int canqueue_ends_done_outends(struct canque_ends_t *qends) */ int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync) { - unsigned long flags; + can_spin_irqflags_t flags; int delayed; DEBUGQUE("canqueue_ends_dispose_kern\n"); @@ -407,20 +387,20 @@ int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync) if(sync) canqueue_ends_sync_all_kern(qends); - /* Finish all outgoing edges listed in inends */ - delayed=canqueue_ends_done_inends(qends, 1); - - delayed|=canqueue_ends_done_outends(qends); + /* Finish or kill all outgoing edges listed in inends */ + delayed=canqueue_ends_kill_inlist(qends, 1); + /* Kill all incoming edges listed in outends */ + delayed|=canqueue_ends_kill_outlist(qends); wake_up(&qends->endinfo.fileinfo.readq); wake_up(&qends->endinfo.fileinfo.writeq); wake_up(&qends->endinfo.fileinfo.emptyq); if(delayed){ - spin_lock_irqsave(&canque_dead_func_lock, flags); + can_spin_lock_irqsave(&canque_dead_func_lock, flags); qends->ends_flags |= CAN_ENDSF_DEAD; list_add(&qends->dead_peers,&canque_dead_ends); - spin_unlock_irqrestore(&canque_dead_func_lock, flags); + can_spin_unlock_irqrestore(&canque_dead_func_lock, flags); tasklet_schedule(&canque_dead_tl); DEBUGQUE("canqueue_ends_dispose_kern delayed\n"); diff --git a/lincan/src/can_queue.c b/lincan/src/can_queue.c index ef06005..c0f1a6d 100644 --- a/lincan/src/can_queue.c +++ b/lincan/src/can_queue.c @@ -47,9 +47,9 @@ atomic_t edge_num_cnt; int canque_fifo_flush_slots(struct canque_fifo_t *fifo) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; struct canque_slot_t *slot; - spin_lock_irqsave(&fifo->fifo_lock, flags); + can_spin_lock_irqsave(&fifo->fifo_lock, flags); slot=fifo->head; if(slot){ *fifo->tail=fifo->flist; @@ -59,7 +59,7 @@ int canque_fifo_flush_slots(struct canque_fifo_t *fifo) } canque_fifo_clear_fl(fifo,FULL); ret=canque_fifo_test_and_set_fl(fifo,EMPTY)?0:1; - spin_unlock_irqrestore(&fifo->fifo_lock, flags); + can_spin_unlock_irqrestore(&fifo->fifo_lock, flags); return ret; } @@ -318,34 +318,34 @@ int canque_filter_msg2edges(struct canque_ends_t *qends, struct canmsg_t *msg) int canque_test_outslot(struct canque_ends_t *qends, struct canque_edge_t **qedgep, struct canque_slot_t **slotp) { - unsigned long flags; + can_spin_irqflags_t flags; int prio; struct canque_edge_t *edge; int ret; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); for(prio=CANQUEUE_PRIO_NR;--prio>=0;){ while(!list_empty(&qends->active[prio])){ edge=list_entry(qends->active[prio].next,struct canque_edge_t,activepeers); if(!canque_fifo_test_fl(&edge->fifo,DEAD)) { canque_edge_incref(edge); - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); *qedgep=edge; DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num); ret=canque_fifo_test_outslot(&edge->fifo, slotp); if(ret>=0) return ret; - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); } - spin_lock(&edge->fifo.fifo_lock); + can_spin_lock(&edge->fifo.fifo_lock); if(canque_fifo_test_and_set_fl(&edge->fifo,INACTIVE)) { list_del(&edge->activepeers); list_add(&edge->activepeers,&qends->idle); } - spin_unlock(&edge->fifo.fifo_lock); + can_spin_unlock(&edge->fifo.fifo_lock); } } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); *qedgep=NULL; DEBUGQUE("canque_test_outslot no ready slot\n"); return -1; @@ -366,16 +366,16 @@ int canque_free_outslot(struct canque_ends_t *qends, struct canque_edge_t *qedge, struct canque_slot_t *slot) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; ret=canque_fifo_free_outslot(&qedge->fifo, slot); if(ret&CAN_FIFOF_EMPTY){ canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY); } if(ret&CAN_FIFOF_FULL) canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE); - spin_lock_irqsave(&qends->ends_lock, flags); + can_spin_lock_irqsave(&qends->ends_lock, flags); if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB ){ - spin_lock(&qedge->fifo.fifo_lock); + can_spin_lock(&qedge->fifo.fifo_lock); if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){ canque_fifo_set_fl(&qedge->fifo,INACTIVE); list_del(&qedge->activepeers); @@ -384,9 +384,9 @@ int canque_free_outslot(struct canque_ends_t *qends, list_del(&qedge->activepeers); list_add_tail(&qedge->activepeers,&qends->active[qedge->edge_prio]); } - spin_unlock(&qedge->fifo.fifo_lock); + can_spin_unlock(&qedge->fifo.fifo_lock); } - spin_unlock_irqrestore(&qends->ends_lock, flags); + can_spin_unlock_irqrestore(&qends->ends_lock, flags); canque_edge_decref(qedge); DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret); return ret; @@ -425,9 +425,9 @@ int canque_set_filt(struct canque_edge_t *qedge, unsigned long filtid, unsigned long filtmask, int filtflags) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; - spin_lock_irqsave(&qedge->fifo.fifo_lock,flags); + can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags); if(!(filtflags&MSG_PROCESSLOCAL) && (processlocal<2)) filtflags |= MSG_LOCAL_MASK; @@ -438,13 +438,13 @@ int canque_set_filt(struct canque_edge_t *qedge, if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1; else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0; - spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags); + can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags); if(ret>=0){ canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH); } - spin_lock_irqsave(&qedge->fifo.fifo_lock,flags); + can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags); if(!ret) canque_fifo_clear_fl(&qedge->fifo,BLOCK); - spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags); + can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags); DEBUGQUE("canque_set_filt for edge %d, ID %ld, mask %ld, flags %d returned %d\n", qedge->edge_num,filtid,filtmask,filtflags,ret); @@ -464,20 +464,20 @@ int canque_set_filt(struct canque_edge_t *qedge, int canque_flush(struct canque_edge_t *qedge) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; ret=canque_fifo_flush_slots(&qedge->fifo); if(ret){ canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY); canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE); - spin_lock_irqsave(&qedge->outends->ends_lock, flags); - spin_lock(&qedge->fifo.fifo_lock); + can_spin_lock_irqsave(&qedge->outends->ends_lock, flags); + can_spin_lock(&qedge->fifo.fifo_lock); if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){ list_del(&qedge->activepeers); list_add(&qedge->activepeers,&qedge->outends->idle); } - spin_unlock(&qedge->fifo.fifo_lock); - spin_unlock_irqrestore(&qedge->outends->ends_lock, flags); + can_spin_unlock(&qedge->fifo.fifo_lock); + can_spin_unlock_irqrestore(&qedge->outends->ends_lock, flags); } DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret); return ret; @@ -498,7 +498,7 @@ int canqueue_ends_init_gen(struct canque_ends_t *qends) INIT_LIST_HEAD(&qends->idle); INIT_LIST_HEAD(&qends->inlist); INIT_LIST_HEAD(&qends->outlist); - spin_lock_init(&qends->ends_lock); + can_spin_lock_init(&qends->ends_lock); return 0; } @@ -513,21 +513,21 @@ int canqueue_ends_init_gen(struct canque_ends_t *qends) */ int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends) { - unsigned long flags; + can_spin_irqflags_t flags; if(qedge == NULL) return -1; DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num); canque_edge_incref(qedge); - spin_lock_irqsave(&inends->ends_lock, flags); - spin_lock(&outends->ends_lock); - spin_lock(&qedge->fifo.fifo_lock); + can_spin_lock_irqsave(&inends->ends_lock, flags); + can_spin_lock(&outends->ends_lock); + can_spin_lock(&qedge->fifo.fifo_lock); qedge->inends=inends; list_add(&qedge->inpeers,&inends->inlist); qedge->outends=outends; list_add(&qedge->outpeers,&outends->outlist); list_add(&qedge->activepeers,&outends->idle); - spin_unlock(&qedge->fifo.fifo_lock); - spin_unlock(&outends->ends_lock); - spin_unlock_irqrestore(&inends->ends_lock, flags); + can_spin_unlock(&qedge->fifo.fifo_lock); + can_spin_unlock(&outends->ends_lock); + can_spin_unlock_irqrestore(&inends->ends_lock, flags); canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATTACH); if(canque_fifo_test_and_set_fl(&qedge->fifo, READY)) @@ -545,14 +545,14 @@ int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *ine int canqueue_disconnect_edge(struct canque_edge_t *qedge) { int ret; - unsigned long flags; + can_spin_irqflags_t flags; struct canque_ends_t *inends, *outends; inends=qedge->inends; - if(inends) spin_lock_irqsave(&inends->ends_lock,flags); + if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags); outends=qedge->outends; - if(outends) spin_lock(&outends->ends_lock); - spin_lock(&qedge->fifo.fifo_lock); + if(outends) can_spin_lock(&outends->ends_lock); + can_spin_lock(&qedge->fifo.fifo_lock); if(atomic_read(&qedge->edge_used)==0) { if(qedge->outends){ list_del(&qedge->activepeers); @@ -566,9 +566,9 @@ int canqueue_disconnect_edge(struct canque_edge_t *qedge) } ret=1; } else ret=-1; - spin_unlock(&qedge->fifo.fifo_lock); - if(outends) spin_unlock(&outends->ends_lock); - if(inends) spin_unlock_irqrestore(&inends->ends_lock,flags); + can_spin_unlock(&qedge->fifo.fifo_lock); + if(outends) can_spin_unlock(&outends->ends_lock); + if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags); DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret); return ret; } @@ -602,3 +602,52 @@ void canqueue_block_outlist(struct canque_ends_t *qends) } +/** + * canqueue_ends_kill_inlist - sends request to die to all outgoing edges + * @qends: pointer to ends structure + * @send_rest: select, whether already allocated slots should be processed + * by FIFO output side + * + * Return Value: Non-zero value means, that not all edges could be immediately + * disconnected and that ends structure memory release has to be delayed + */ +int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest) +{ + struct canque_edge_t *edge; + + canque_for_each_inedge(qends, edge){ + canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED); + if(send_rest){ + canque_edge_incref(edge); + if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){ + if(!canque_fifo_test_fl(&edge->fifo, EMPTY)) + continue; + if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY)) + continue; + } + canque_edge_decref(edge); + } + } + return list_empty(&qends->inlist)?0:1; +} + + +/** + * canqueue_ends_kill_outlist - sends request to die to all incoming edges + * @qends: pointer to ends structure + * + * Return Value: Non-zero value means, that not all edges could be immediately + * disconnected and that ends structure memory release has to be delayed + */ +int canqueue_ends_kill_outlist(struct canque_ends_t *qends) +{ + struct canque_edge_t *edge; + + canque_for_each_outedge(qends, edge){ + canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED); + } + return list_empty(&qends->outlist)?0:1; +} + + + diff --git a/lincan/src/close.c b/lincan/src/close.c index c42869e..4df1533 100644 --- a/lincan/src/close.c +++ b/lincan/src/close.c @@ -13,6 +13,7 @@ #include "../include/close.h" #include "../include/i82527.h" #include "../include/setup.h" +#include "../include/fasync.h" #define __NO_VERSION__ #include @@ -31,6 +32,12 @@ int can_close(struct inode *inode, struct file *file) obj = canuser->msgobj; qends = canuser->qends; + #ifdef CAN_ENABLE_KERN_FASYNC + + can_fasync(-1, file, 0); + + #endif /*CAN_ENABLE_KERN_FASYNC*/ + list_del(&canuser->peers); canuser->qends = NULL; canqueue_ends_dispose_kern(qends, file->f_flags & O_SYNC); @@ -42,8 +49,8 @@ int can_close(struct inode *inode, struct file *file) /* FIXME: what about clearing chip HW status, stopping sending messages etc? */ }; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,50)) + #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,50)) MOD_DEC_USE_COUNT; -#endif + #endif return 0; } diff --git a/lincan/src/devcommon.c b/lincan/src/devcommon.c index dce205a..c26a36f 100644 --- a/lincan/src/devcommon.c +++ b/lincan/src/devcommon.c @@ -58,5 +58,12 @@ int canqueue_ends_init_chip(struct canque_ends_t *qends, struct chip_t *chip, st int canqueue_ends_done_chip(struct canque_ends_t *qends) { - return 0; + int delayed; + + /* Finish or kill all outgoing edges listed in inends */ + delayed=canqueue_ends_kill_inlist(qends, 1); + /* Kill all incoming edges listed in outends */ + delayed|=canqueue_ends_kill_outlist(qends); + + return delayed; } diff --git a/lincan/src/fasync.c b/lincan/src/fasync.c new file mode 100644 index 0000000..bca32e0 --- /dev/null +++ b/lincan/src/fasync.c @@ -0,0 +1,40 @@ +/* open.c + * Linux CAN-bus device driver. + * Written by Arnaud Westenberg email:arnaud@wanadoo.nl + * Rewritten for new CAN queues by Pavel Pisa - OCERA team member + * email:pisa@cmp.felk.cvut.cz + * This software is released under the GPL-License. + * Version lincan-0.2 9 Jul 2003 + */ + +#include "../include/can.h" +#include "../include/can_sysdep.h" +#include "../include/main.h" +#include "../include/fasync.h" + +#ifdef CAN_ENABLE_KERN_FASYNC + +int can_fasync(int fd, struct file *file, int on) +{ + struct canuser_t *canuser = (struct canuser_t*)(file->private_data); + struct canque_ends_t *qends; + + if(!canuser || (canuser->magic != CAN_USER_MAGIC)){ + CANMSG("can_close: bad canuser magic\n"); + return -ENODEV; + } + + qends = canuser->qends; + + int retval = fasync_helper(fd, file, on, &qends->endinfo.fileinfo.fasync); + + if (retval < 0) + return retval; + return 0; +} + + + + + +#endif /*CAN_ENABLE_KERN_FASYNC*/ diff --git a/lincan/src/finish.c b/lincan/src/finish.c index a5cd8d1..fdca959 100644 --- a/lincan/src/finish.c +++ b/lincan/src/finish.c @@ -9,8 +9,10 @@ void msgobj_done(struct msgobj_t *obj) { + int delayed=0; if(obj->qends) { - if(canqueue_ends_done_chip(obj->qends) < 0) + delayed=canqueue_ends_done_chip(obj->qends); + if(delayed < 0) CANMSG("msgobj_done: problem with chip queue ends\n"); } @@ -32,7 +34,11 @@ void msgobj_done(struct msgobj_t *obj) del_timer_sync(&obj->tx_timeout); if(obj->qends) { - can_checked_free(obj->qends); + /*delayed free could be required there in the future, + actual use patter cannot generate such situation*/ + if(!delayed) { + can_checked_free(obj->qends); + } } obj->qends=NULL; } diff --git a/lincan/src/i82527.c b/lincan/src/i82527.c index cbd7870..7a57f45 100644 --- a/lincan/src/i82527.c +++ b/lincan/src/i82527.c @@ -520,14 +520,14 @@ irqreturn_t i82527_irq_handler(int irq, void *dev_id, struct pt_regs *regs) message_id=(id0|id1)>>5; } - spin_lock(&hardware_p->rtr_lock); + can_spin_lock(&hardware_p->rtr_lock); rtr_search=hardware_p->rtr_queue; while (rtr_search != NULL) { if (rtr_search->id == message_id) break; rtr_search=rtr_search->next; } - spin_unlock(&hardware_p->rtr_lock); + can_spin_unlock(&hardware_p->rtr_lock); if ((rtr_search!=NULL) && (rtr_search->id==message_id)) i82527_irq_rtr_handler(chip, obj, rtr_search, message_id); else @@ -547,14 +547,14 @@ void i82527_irq_rtr_handler(struct chip_t *chip, struct msgobj_t *obj, canobj_write_reg(chip,obj,(MVAL_RES|TXIE_RES|RXIE_RES|INTPD_RES),iMSGCTL0); canobj_write_reg(chip,obj,(RMPD_RES|TXRQ_RES|MLST_RES|NEWD_RES),iMSGCTL1); - spin_lock(&hardware_p->rtr_lock); + can_spin_lock(&hardware_p->rtr_lock); rtr_search->rtr_message->id=message_id; rtr_search->rtr_message->length=(canobj_read_reg(chip,obj,iMSGCFG) & 0xf0)>>4; for (i=0; irtr_message->length; i++) rtr_search->rtr_message->data[i]=canobj_read_reg(chip,obj,iMSGDAT0+i); - spin_unlock(&hardware_p->rtr_lock); + can_spin_unlock(&hardware_p->rtr_lock); if (waitqueue_active(&rtr_search->rtr_wq)) wake_up(&rtr_search->rtr_wq); @@ -563,11 +563,11 @@ void i82527_irq_rtr_handler(struct chip_t *chip, struct msgobj_t *obj, int i82527_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) { /* dummy lock to prevent preemption fully portable way */ - spinlock_t dummy_lock; + can_spinlock_t dummy_lock; /* preempt_disable() */ - spin_lock_init(&dummy_lock); - spin_lock(&dummy_lock); + can_spin_lock_init(&dummy_lock); + can_spin_lock(&dummy_lock); set_bit(OBJ_TX_REQUEST,&obj->flags); while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){ @@ -581,7 +581,7 @@ int i82527_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) } /* preempt_enable(); */ - spin_unlock(&dummy_lock); + can_spin_unlock(&dummy_lock); return 0; } diff --git a/lincan/src/main.c b/lincan/src/main.c index a1b6631..918c2f7 100644 --- a/lincan/src/main.c +++ b/lincan/src/main.c @@ -54,6 +54,7 @@ #include "../include/ioctl.h" #include "../include/write.h" #include "../include/finish.h" +#include "../include/fasync.h" #define EXPORT_SYMTAB @@ -111,33 +112,21 @@ devfs_handle_t devfs_handles[MAX_TOT_MSGOBJS]; */ struct mem_addr *mem_head=NULL; -#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,0)) -struct file_operations can_fops= -{ - NULL, /* llseek */ - read: can_read, - write: can_write, - NULL, /* readdir */ - poll: can_poll, - ioctl: can_ioctl, - NULL, /* mmap */ - open: can_open, - NULL, /* flush */ - release: can_close, - NULL, /* fsync */ -}; -#else struct file_operations can_fops= { + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)) owner: THIS_MODULE, + #endif read: can_read, write: can_write, poll: can_poll, ioctl: can_ioctl, open: can_open, release: can_close, + #ifdef CAN_ENABLE_KERN_FASYNC + fasync: can_fasync + #endif /*CAN_ENABLE_KERN_FASYNC*/ }; -#endif EXPORT_SYMBOL(can_fops); @@ -178,7 +167,7 @@ int init_module(void) goto reset_error; } - spin_lock_init(&hardware_p->rtr_lock); + can_spin_lock_init(&hardware_p->rtr_lock); hardware_p->rtr_queue=NULL; for (i=0; inr_boards; i++) { diff --git a/lincan/src/read.c b/lincan/src/read.c index 56cb1d5..d203f0f 100644 --- a/lincan/src/read.c +++ b/lincan/src/read.c @@ -63,16 +63,16 @@ inline ssize_t can_std_read(struct file *file, struct canque_ends_t *qends, inline ssize_t can_rtr_read(struct chip_t *chip, struct msgobj_t *obj, char *buffer) { - unsigned long flags; + can_spin_irqflags_t flags; struct rtr_id *rtr_current, *new_rtr_entry; struct canmsg_t read_msg; DEBUGMSG("Remote transmission request\n"); - spin_lock_irqsave(&hardware_p->rtr_lock, flags); + can_spin_lock_irqsave(&hardware_p->rtr_lock, flags); if (hardware_p->rtr_queue == NULL) { //No remote messages pending new_rtr_entry=(struct rtr_id *)kmalloc(sizeof(struct rtr_id),GFP_ATOMIC); if (new_rtr_entry == NULL) { - spin_unlock_irqrestore(&hardware_p->rtr_lock, + can_spin_unlock_irqrestore(&hardware_p->rtr_lock, flags); return -ENOMEM; } @@ -90,14 +90,14 @@ inline ssize_t can_rtr_read(struct chip_t *chip, struct msgobj_t *obj, new_rtr_entry->rtr_message = &read_msg; new_rtr_entry->next=NULL; - spin_unlock_irqrestore(&hardware_p->rtr_lock, flags); + can_spin_unlock_irqrestore(&hardware_p->rtr_lock, flags); /* Send remote transmission request */ chip->chipspecops->remote_request(chip,obj); obj->ret = 0; interruptible_sleep_on(&new_rtr_entry->rtr_wq); - spin_lock_irqsave(&hardware_p->rtr_lock, flags); + can_spin_lock_irqsave(&hardware_p->rtr_lock, flags); copy_to_user(buffer, &read_msg, sizeof(struct canmsg_t)); if (hardware_p->rtr_queue == new_rtr_entry) { if (new_rtr_entry->next != NULL) @@ -114,7 +114,7 @@ inline ssize_t can_rtr_read(struct chip_t *chip, struct msgobj_t *obj, else rtr_current->next=NULL; } - spin_unlock_irqrestore(&hardware_p->rtr_lock, flags); + can_spin_unlock_irqrestore(&hardware_p->rtr_lock, flags); kfree(new_rtr_entry); return obj->ret; diff --git a/lincan/src/select.c b/lincan/src/select.c index 79d97fc..894214d 100644 --- a/lincan/src/select.c +++ b/lincan/src/select.c @@ -20,9 +20,7 @@ unsigned int can_poll(struct file *file, poll_table *wait) struct canque_ends_t *qends; struct msgobj_t *obj; unsigned int mask = 0; - unsigned long flags; struct canque_edge_t *edge; - struct list_head *entry; int full=0; int i; @@ -45,13 +43,10 @@ unsigned int can_poll(struct file *file, poll_table *wait) if ((file->f_mode & FMODE_WRITE) && !(file->f_flags & O_SYNC)) { poll_wait(file, &qends->endinfo.fileinfo.writeq, wait); - spin_lock_irqsave(&qends->ends_lock, flags); - list_for_each(entry,&qends->inlist){ - edge=list_entry(entry,struct canque_edge_t,inpeers); + canque_for_each_inedge(qends, edge) { if(canque_fifo_test_fl(&edge->fifo,FULL)) full=1; } - spin_unlock_irqrestore(&qends->ends_lock, flags); if(!full) mask |= POLLOUT | POLLWRNORM; @@ -60,13 +55,10 @@ unsigned int can_poll(struct file *file, poll_table *wait) if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_SYNC)) { poll_wait(file, &qends->endinfo.fileinfo.emptyq, wait); - spin_lock_irqsave(&qends->ends_lock, flags); - list_for_each(entry,&qends->inlist){ - edge=list_entry(entry,struct canque_edge_t,inpeers); + canque_for_each_inedge(qends, edge) { if(!canque_fifo_test_fl(&edge->fifo,EMPTY)) full=1; } - spin_unlock_irqrestore(&qends->ends_lock, flags); if(!full) mask |= POLLOUT | POLLWRNORM; diff --git a/lincan/src/sja1000.c b/lincan/src/sja1000.c index 9211511..7b1f88d 100644 --- a/lincan/src/sja1000.c +++ b/lincan/src/sja1000.c @@ -447,11 +447,11 @@ void sja1000_irq_write_handler(struct chip_t *chip, struct msgobj_t *obj) int sja1000_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) { /* dummy lock to prevent preemption fully portable way */ - spinlock_t dummy_lock; + can_spinlock_t dummy_lock; /* preempt_disable() */ - spin_lock_init(&dummy_lock); - spin_lock(&dummy_lock); + can_spin_lock_init(&dummy_lock); + can_spin_lock(&dummy_lock); set_bit(OBJ_TX_REQUEST,&obj->flags); while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){ @@ -465,7 +465,7 @@ int sja1000_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) } /* preempt_enable(); */ - spin_unlock(&dummy_lock); + can_spin_unlock(&dummy_lock); return 0; } diff --git a/lincan/src/sja1000p.c b/lincan/src/sja1000p.c index bc2fcc3..b34e984 100644 --- a/lincan/src/sja1000p.c +++ b/lincan/src/sja1000p.c @@ -691,11 +691,11 @@ irqreturn_t sja1000p_irq_handler(int irq, void *dev_id, struct pt_regs *regs) int sja1000p_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) { /* dummy lock to prevent preemption fully portable way */ - spinlock_t dummy_lock; + can_spinlock_t dummy_lock; /* preempt_disable() */ - spin_lock_init(&dummy_lock); - spin_lock(&dummy_lock); + can_spin_lock_init(&dummy_lock); + can_spin_lock(&dummy_lock); set_bit(OBJ_TX_REQUEST,&obj->flags); while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){ @@ -712,7 +712,7 @@ int sja1000p_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj) } /* preempt_enable(); */ - spin_unlock(&dummy_lock); + can_spin_unlock(&dummy_lock); return 0; } diff --git a/lincan/src/virtual.c b/lincan/src/virtual.c index d51a5cb..e4b9c17 100644 --- a/lincan/src/virtual.c +++ b/lincan/src/virtual.c @@ -290,11 +290,11 @@ void virtual_schedule_next(struct msgobj_t *obj) { int cmd; /* dummy lock to prevent preemption fully portable way */ - spinlock_t dummy_lock; + can_spinlock_t dummy_lock; /* preempt_disable() */ - spin_lock_init(&dummy_lock); - spin_lock(&dummy_lock); + can_spin_lock_init(&dummy_lock); + can_spin_lock(&dummy_lock); set_bit(OBJ_TX_REQUEST,&obj->flags); @@ -316,7 +316,7 @@ void virtual_schedule_next(struct msgobj_t *obj) } /* preempt_enable(); */ - spin_unlock(&dummy_lock); + can_spin_unlock(&dummy_lock); }