struct canque_slot_t **tail; /* points to NULL pointer for chaining */
struct canque_slot_t *flist; /* points the first entry in the free list */
struct canque_slot_t *entry; /* points to first allocated entry */
- spinlock_t fifo_lock; /* spin_lock_irqsave / spin_lock_irqrestore */
+ can_spinlock_t fifo_lock; /* can_spin_lock_irqsave / can_spin_unlock_irqrestore */
};
#define CAN_FIFOF_DESTROY_b 15
static inline
int canque_fifo_get_inslot(struct canque_fifo_t *fifo, struct canque_slot_t **slotp, int cmd)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_slot_t *slot;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
/* get the first free slot slot from flist */
if(!(slot=fifo->flist)) {
canque_fifo_set_fl(fifo,OVERRUN);
canque_fifo_set_fl(fifo,FULL);
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
*slotp=NULL;
return -1;
}
/* adjust free slot list */
if(!(fifo->flist=slot->next))
canque_fifo_set_fl(fifo,FULL);
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
*slotp=slot;
slot->slot_flags=cmd&CAN_SLOTF_CMD;
return 1;
int canque_fifo_put_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
slot->next=NULL;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
if(*fifo->tail) printk(KERN_CRIT "canque_fifo_put_inslot: fifo->tail != NULL\n");
*fifo->tail=slot;
fifo->tail=&slot->next;
ret=CAN_FIFOF_EMPTY; /* Fifo has been empty before put */
if(canque_fifo_test_and_clear_fl(fifo,INACTIVE))
ret=CAN_FIFOF_INACTIVE; /* Fifo has been empty before put */
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
int canque_fifo_abort_inslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot)
{
int ret=0;
- unsigned long flags;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_irqflags_t flags;
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
slot->next=fifo->flist;
fifo->flist=slot;
if(canque_fifo_test_and_clear_fl(fifo,FULL))
ret=CAN_FIFOF_FULL;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
static inline
int canque_fifo_test_outslot(struct canque_fifo_t *fifo, struct canque_slot_t **slotp)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
int cmd;
struct canque_slot_t *slot;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
if(!(slot=fifo->head)){;
canque_fifo_set_fl(fifo,EMPTY);
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
*slotp=NULL;
return -1;
}
if(!(fifo->head=slot->next))
fifo->tail=&fifo->head;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
*slotp=slot;
cmd=slot->slot_flags;
int canque_fifo_free_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot)
{
int ret=0;
- unsigned long flags;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_irqflags_t flags;
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
slot->next=fifo->flist;
fifo->flist=slot;
if(canque_fifo_test_and_clear_fl(fifo,FULL))
canque_fifo_set_fl(fifo,EMPTY);
ret|=CAN_FIFOF_EMPTY;
}
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
static inline
int canque_fifo_again_outslot(struct canque_fifo_t *fifo, struct canque_slot_t *slot)
{
- unsigned long flags;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_irqflags_t flags;
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
if(!(slot->next=fifo->head))
fifo->tail=&slot->next;
fifo->head=slot;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return 1;
}
struct list_head idle;
struct list_head inlist;
struct list_head outlist;
- spinlock_t ends_lock; /* spin_lock_irqsave / spin_lock_irqrestore */
+ can_spinlock_t ends_lock; /* can_spin_lock_irqsave / can_spin_unlock_irqrestore */
void (*notify)(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what);
void *context;
union {
wait_queue_head_t readq;
wait_queue_head_t writeq;
wait_queue_head_t emptyq;
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ struct fasync_struct *fasync;
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
} fileinfo;
struct {
wait_queue_head_t daemonq;
static inline
void canque_activate_edge(struct canque_ends_t *inends, struct canque_edge_t *qedge)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_ends_t *outends;
if(qedge->edge_prio>=CANQUEUE_PRIO_NR)
qedge->edge_prio=CANQUEUE_PRIO_NR-1;
- spin_lock_irqsave(&inends->ends_lock, flags);
+ can_spin_lock_irqsave(&inends->ends_lock, flags);
if((outends=qedge->outends)){
- spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock(&outends->ends_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(!canque_fifo_test_fl(&qedge->fifo,EMPTY)){
list_del(&qedge->activepeers);
list_add_tail(&qedge->activepeers,&outends->active[qedge->edge_prio]);
}
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock(&outends->ends_lock);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&outends->ends_lock);
}
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
}
/**
void canqueue_block_outlist(struct canque_ends_t *qends);
+int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest);
+
+int canqueue_ends_kill_outlist(struct canque_ends_t *qends);
+
/* edge reference and traversal functions */
void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl);
static inline
void canque_edge_decref(struct canque_edge_t *edge)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_ends_t *inends=edge->inends;
struct canque_ends_t *outends=edge->outends;
int dead_fl;
- spin_lock_irqsave(&inends->ends_lock, flags);
- spin_lock(&outends->ends_lock);
+ can_spin_lock_irqsave(&inends->ends_lock, flags);
+ can_spin_lock(&outends->ends_lock);
if(atomic_dec_and_test(&edge->edge_used)) {
dead_fl=canque_fifo_test_and_set_fl(&edge->fifo,DEAD);
/*This should not be there, but it cannot be outside of the lock :-(*/
canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
- spin_unlock(&outends->ends_lock);
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ can_spin_unlock(&outends->ends_lock);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
canque_edge_do_dead(edge, dead_fl);
} else {
- spin_unlock(&outends->ends_lock);
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ can_spin_unlock(&outends->ends_lock);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
}
}
static inline
struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *edge;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=qends->inlist.next;
skip_dead:
if(entry != &qends->inlist) {
} else {
edge=NULL;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
return edge;
}
static inline
struct canque_edge_t *canque_next_inedge(struct canque_ends_t *qends, struct canque_edge_t *edge)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *next;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=edge->inpeers.next;
skip_dead:
if(entry != &qends->inlist) {
} else {
next=NULL;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
canque_edge_decref(edge);
return next;
}
static inline
struct canque_edge_t *canque_first_outedge(struct canque_ends_t *qends)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *edge;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=qends->outlist.next;
skip_dead:
if(entry != &qends->outlist) {
} else {
edge=NULL;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
return edge;
}
static inline
struct canque_edge_t *canque_next_outedge(struct canque_ends_t *qends, struct canque_edge_t *edge)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *next;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=edge->outpeers.next;
skip_dead:
if(entry != &qends->outlist) {
} else {
next=NULL;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
canque_edge_decref(edge);
return next;
}
#define del_timer_sync del_timer
#endif /* <2.4.0 */
+#define CAN_ENABLE_KERN_FASYNC
+
+#ifndef CAN_WITH_RTL
+
+#define can_spinlock_t spinlock_t
+#define can_spin_irqflags_t unsigned long
+#define can_spin_lock spin_lock
+#define can_spin_unlock spin_unlock
+#define can_spin_lock_irqsave spin_lock_irqsave
+#define can_spin_unlock_irqrestore spin_unlock_irqrestore
+#define can_spin_lock_init spin_lock_init
+
+#else /*CAN_WITH_RTL*/
+
+#define can_spinlock_t rtl_spinlock_t
+#define can_spin_irqflags_t unsigned long
+#define can_spin_lock rtl_spin_lock
+#define can_spin_unlock rtl_spin_unlock
+#define can_spin_lock_irqsave rtl_spin_lock_irqsave
+#define can_spin_unlock_irqrestore rtl_spin_unlock_irqrestore
+#define can_spin_lock_init rtl_spin_lock_init
+
+#endif /*CAN_WITH_RTL*/
+
#endif /*_CAN_SYSDEP_H*/
--- /dev/null
+/* fasync.h
+ * Header file for the Linux CAN-bus driver.
+ * Written by Arnaud Westenberg email:arnaud@wanadoo.nl
+ * Rewritten for new CAN queues by Pavel Pisa - OCERA team member
+ * email:pisa@cmp.felk.cvut.cz
+ * This software is released under the GPL-License.
+ * Version lincan-0.2 9 Jul 2003
+ */
+
+int can_fasync(int fd, struct file *filp, int on);
+
struct canhardware_t {
int nr_boards;
struct rtr_id *rtr_queue;
- spinlock_t rtr_lock;
+ can_spinlock_t rtr_lock;
struct candevice_t *candevice[MAX_HW_CARDS];
};
O_OBJS += can_queue.o can_quekern.o devcommon.o main.o modparms.o \
setup.o finish.o irq.o boardlist.o \
sja1000p.o sja1000.o i82527.o \
- open.o proc.o close.o write.o read.o ioctl.o select.o
+ open.o proc.o close.o write.o read.o \
+ ioctl.o select.o fasync.o
# Objects with exported symbols (-DEXPORT_SYMTAB)
OX_OBJS =
# Module objects
/* cli and sti are not allowed in 2.5.5x SMP kernels */
#ifdef WINDOWED_ACCESS
-spinlock_t bfadcan_win_lock=SPIN_LOCK_UNLOCKED;
+can_spinlock_t bfadcan_win_lock=SPIN_LOCK_UNLOCKED;
#endif
/*
void bfadcan_write_register(unsigned char data, unsigned long address)
{
#ifdef WINDOWED_ACCESS
- unsigned long flags;
- spin_lock_irqsave(&bfadcan_win_lock,flags);
+ can_spin_irqflags_t flags;
+ can_spin_lock_irqsave(&bfadcan_win_lock,flags);
outb(address&0x00ff,0x200);
outb(data, 0x201);
- spin_unlock_irqrestore(&bfadcan_win_lock,flags);
+ can_spin_unlock_irqrestore(&bfadcan_win_lock,flags);
#else
outb(data,address);
#endif
unsigned bfadcan_read_register(unsigned long address)
{
#ifdef WINDOWED_ACCESS
- unsigned long flags;
+ can_spin_irqflags_t flags;
int ret;
- spin_lock_irqsave(&bfadcan_win_lock,flags);
+ can_spin_lock_irqsave(&bfadcan_win_lock,flags);
outb(address&0x00ff,0x200);
ret = inb(0x201);
- spin_unlock_irqrestore(&bfadcan_win_lock,flags);
+ can_spin_unlock_irqrestore(&bfadcan_win_lock,flags);
return ret;
#else
return inb(address);
void canque_dead_func(unsigned long data);
/* Support for dead ends structures left after client close */
-spinlock_t canque_dead_func_lock;
+can_spinlock_t canque_dead_func_lock;
LIST_HEAD(canque_dead_ends);
/* retrieved by list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers) */
LIST_HEAD(canque_dead_edges);
static inline
struct canque_edge_t *canque_dead_edges_cut_first(void)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *edge;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
if(list_empty(&canque_dead_edges))
edge=NULL;
else{
edge=list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers);
list_del(&edge->inpeers);
}
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
return edge;
}
void canque_dead_func(unsigned long data)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *qedge;
struct canque_ends_t *qends;
struct list_head *entry;
kfree(qedge);
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
entry=canque_dead_ends.next;
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
while(entry!=&canque_dead_ends){
qends=list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers);
entry=entry->next;
continue;
if(!list_empty(&qends->outlist))
continue;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
DEBUGQUE("ends structure disposed\n");
kfree(qends);
}
void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
if(dead_fl) return;
return;
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_add(&edge->inpeers,&canque_dead_edges);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
}
/*if(qends->ends_flags & CAN_ENDSF_DEAD){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
}*/
break;
case CANQUEUE_NOTIFY_SPACE:
wake_up(&qends->endinfo.fileinfo.writeq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_OUT);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_PROC:
wake_up(&qends->endinfo.fileinfo.readq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_IN);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_NOUSR:
wake_up(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ qends->endinfo.fileinfo.fasync=NULL;
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
+
qends->notify=canqueue_notify_kern;
DEBUGQUE("canqueue_ends_init_kern\n");
return 0;
if(qedge == NULL) return NULL;
memset(qedge,0,sizeof(struct canque_edge_t));
- spin_lock_init(&qedge->fifo.fifo_lock);
+ can_spin_lock_init(&qedge->fifo.fifo_lock);
if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
kfree(qedge);
DEBUGQUE("canque_new_edge_kern failed\n");
return qedge;
}
-#ifdef USE_DELAYED_DISCONNECT_EDGE_KERN
+#ifdef USE_SYNC_DISCONNECT_EDGE_KERN
/**
* canqueue_disconnect_edge_kern - disconnect edge from communicating entities with wait
int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
{
struct canque_edge_t *edge;
- unsigned long flags;
+ can_spin_irqflags_t flags;
for(;;){
- spin_lock_irqsave(&qends->ends_lock,flags);
+ can_spin_lock_irqsave(&qends->ends_lock,flags);
if(list_empty(list)){
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
return 0;
}
if(list == &qends->inlist)
else
edge=list_entry(list->next,struct canque_edge_t,outpeers);
atomic_inc(&edge->edge_used);
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
/* Free edge memory */
canque_fifo_done(&edge->fifo);
}
}
-#endif /*USE_DELAYED_DISCONNECT_EDGE_KERN*/
+#endif /*USE_SYNC_DISCONNECT_EDGE_KERN*/
int canqueue_ends_sync_all_kern(struct canque_ends_t *qends)
return 0;
}
-int canqueue_ends_done_inends(struct canque_ends_t *qends, int send_rest)
-{
- struct canque_edge_t *edge;
-
- canque_for_each_inedge(qends, edge){
- canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
- if(send_rest){
- canque_edge_incref(edge);
- if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
- if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
- continue;
- if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
- continue;
- }
- canque_edge_decref(edge);
- }
- }
- return list_empty(&qends->inlist)?0:1;
-}
-
-
-int canqueue_ends_done_outends(struct canque_ends_t *qends)
-{
- struct canque_edge_t *edge;
-
- canque_for_each_outedge(qends, edge){
- canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
- }
- return list_empty(&qends->outlist)?0:1;
-}
-
-
/**
* canqueue_ends_dispose_kern - finalizing of the ends structure for Linux kernel clients
* @qends: pointer to ends structure
*/
int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
int delayed;
DEBUGQUE("canqueue_ends_dispose_kern\n");
if(sync)
canqueue_ends_sync_all_kern(qends);
- /* Finish all outgoing edges listed in inends */
- delayed=canqueue_ends_done_inends(qends, 1);
-
- delayed|=canqueue_ends_done_outends(qends);
+ /* Finish or kill all outgoing edges listed in inends */
+ delayed=canqueue_ends_kill_inlist(qends, 1);
+ /* Kill all incoming edges listed in outends */
+ delayed|=canqueue_ends_kill_outlist(qends);
wake_up(&qends->endinfo.fileinfo.readq);
wake_up(&qends->endinfo.fileinfo.writeq);
wake_up(&qends->endinfo.fileinfo.emptyq);
if(delayed){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
qends->ends_flags |= CAN_ENDSF_DEAD;
list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
DEBUGQUE("canqueue_ends_dispose_kern delayed\n");
int canque_fifo_flush_slots(struct canque_fifo_t *fifo)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_slot_t *slot;
- spin_lock_irqsave(&fifo->fifo_lock, flags);
+ can_spin_lock_irqsave(&fifo->fifo_lock, flags);
slot=fifo->head;
if(slot){
*fifo->tail=fifo->flist;
}
canque_fifo_clear_fl(fifo,FULL);
ret=canque_fifo_test_and_set_fl(fifo,EMPTY)?0:1;
- spin_unlock_irqrestore(&fifo->fifo_lock, flags);
+ can_spin_unlock_irqrestore(&fifo->fifo_lock, flags);
return ret;
}
int canque_test_outslot(struct canque_ends_t *qends,
struct canque_edge_t **qedgep, struct canque_slot_t **slotp)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
int prio;
struct canque_edge_t *edge;
int ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
for(prio=CANQUEUE_PRIO_NR;--prio>=0;){
while(!list_empty(&qends->active[prio])){
edge=list_entry(qends->active[prio].next,struct canque_edge_t,activepeers);
if(!canque_fifo_test_fl(&edge->fifo,DEAD)) {
canque_edge_incref(edge);
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=edge;
DEBUGQUE("canque_test_outslot found edge %d\n",edge->edge_num);
ret=canque_fifo_test_outslot(&edge->fifo, slotp);
if(ret>=0)
return ret;
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
}
- spin_lock(&edge->fifo.fifo_lock);
+ can_spin_lock(&edge->fifo.fifo_lock);
if(canque_fifo_test_and_set_fl(&edge->fifo,INACTIVE)) {
list_del(&edge->activepeers);
list_add(&edge->activepeers,&qends->idle);
}
- spin_unlock(&edge->fifo.fifo_lock);
+ can_spin_unlock(&edge->fifo.fifo_lock);
}
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
*qedgep=NULL;
DEBUGQUE("canque_test_outslot no ready slot\n");
return -1;
struct canque_edge_t *qedge, struct canque_slot_t *slot)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_free_outslot(&qedge->fifo, slot);
if(ret&CAN_FIFOF_EMPTY){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
}
if(ret&CAN_FIFOF_FULL)
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qends->ends_lock, flags);
+ can_spin_lock_irqsave(&qends->ends_lock, flags);
if((ret&CAN_FIFOF_EMPTY) || CANQUE_ROUNDROB ){
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
canque_fifo_set_fl(&qedge->fifo,INACTIVE);
list_del(&qedge->activepeers);
list_del(&qedge->activepeers);
list_add_tail(&qedge->activepeers,&qends->active[qedge->edge_prio]);
}
- spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock, flags);
canque_edge_decref(qedge);
DEBUGQUE("canque_free_outslot for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
unsigned long filtid, unsigned long filtmask, int filtflags)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!(filtflags&MSG_PROCESSLOCAL) && (processlocal<2))
filtflags |= MSG_LOCAL_MASK;
if(canque_fifo_test_fl(&qedge->fifo,DEAD)) ret=-1;
else ret=canque_fifo_test_and_set_fl(&qedge->fifo,BLOCK)?1:0;
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
if(ret>=0){
canque_notify_bothends(qedge,CANQUEUE_NOTIFY_FILTCH);
}
- spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
+ can_spin_lock_irqsave(&qedge->fifo.fifo_lock,flags);
if(!ret) canque_fifo_clear_fl(&qedge->fifo,BLOCK);
- spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
+ can_spin_unlock_irqrestore(&qedge->fifo.fifo_lock,flags);
DEBUGQUE("canque_set_filt for edge %d, ID %ld, mask %ld, flags %d returned %d\n",
qedge->edge_num,filtid,filtmask,filtflags,ret);
int canque_flush(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
ret=canque_fifo_flush_slots(&qedge->fifo);
if(ret){
canque_notify_inends(qedge,CANQUEUE_NOTIFY_EMPTY);
canque_notify_inends(qedge,CANQUEUE_NOTIFY_SPACE);
- spin_lock_irqsave(&qedge->outends->ends_lock, flags);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock_irqsave(&qedge->outends->ends_lock, flags);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(canque_fifo_test_fl(&qedge->fifo,EMPTY)){
list_del(&qedge->activepeers);
list_add(&qedge->activepeers,&qedge->outends->idle);
}
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock_irqrestore(&qedge->outends->ends_lock, flags);
}
DEBUGQUE("canque_flush for edge %d returned %d\n",qedge->edge_num,ret);
return ret;
INIT_LIST_HEAD(&qends->idle);
INIT_LIST_HEAD(&qends->inlist);
INIT_LIST_HEAD(&qends->outlist);
- spin_lock_init(&qends->ends_lock);
+ can_spin_lock_init(&qends->ends_lock);
return 0;
}
*/
int canqueue_connect_edge(struct canque_edge_t *qedge, struct canque_ends_t *inends, struct canque_ends_t *outends)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
if(qedge == NULL) return -1;
DEBUGQUE("canqueue_connect_edge %d\n",qedge->edge_num);
canque_edge_incref(qedge);
- spin_lock_irqsave(&inends->ends_lock, flags);
- spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ can_spin_lock_irqsave(&inends->ends_lock, flags);
+ can_spin_lock(&outends->ends_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
qedge->inends=inends;
list_add(&qedge->inpeers,&inends->inlist);
qedge->outends=outends;
list_add(&qedge->outpeers,&outends->outlist);
list_add(&qedge->activepeers,&outends->idle);
- spin_unlock(&qedge->fifo.fifo_lock);
- spin_unlock(&outends->ends_lock);
- spin_unlock_irqrestore(&inends->ends_lock, flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ can_spin_unlock(&outends->ends_lock);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
canque_notify_bothends(qedge, CANQUEUE_NOTIFY_ATTACH);
if(canque_fifo_test_and_set_fl(&qedge->fifo, READY))
int canqueue_disconnect_edge(struct canque_edge_t *qedge)
{
int ret;
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_ends_t *inends, *outends;
inends=qedge->inends;
- if(inends) spin_lock_irqsave(&inends->ends_lock,flags);
+ if(inends) can_spin_lock_irqsave(&inends->ends_lock,flags);
outends=qedge->outends;
- if(outends) spin_lock(&outends->ends_lock);
- spin_lock(&qedge->fifo.fifo_lock);
+ if(outends) can_spin_lock(&outends->ends_lock);
+ can_spin_lock(&qedge->fifo.fifo_lock);
if(atomic_read(&qedge->edge_used)==0) {
if(qedge->outends){
list_del(&qedge->activepeers);
}
ret=1;
} else ret=-1;
- spin_unlock(&qedge->fifo.fifo_lock);
- if(outends) spin_unlock(&outends->ends_lock);
- if(inends) spin_unlock_irqrestore(&inends->ends_lock,flags);
+ can_spin_unlock(&qedge->fifo.fifo_lock);
+ if(outends) can_spin_unlock(&outends->ends_lock);
+ if(inends) can_spin_unlock_irqrestore(&inends->ends_lock,flags);
DEBUGQUE("canqueue_disconnect_edge %d returned %d\n",qedge->edge_num,ret);
return ret;
}
}
+/**
+ * canqueue_ends_kill_inlist - sends request to die to all outgoing edges
+ * @qends: pointer to ends structure
+ * @send_rest: select, whether already allocated slots should be processed
+ * by FIFO output side
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_inlist(struct canque_ends_t *qends, int send_rest)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_inedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ if(send_rest){
+ canque_edge_incref(edge);
+ if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
+ if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
+ continue;
+ if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
+ continue;
+ }
+ canque_edge_decref(edge);
+ }
+ }
+ return list_empty(&qends->inlist)?0:1;
+}
+
+
+/**
+ * canqueue_ends_kill_outlist - sends request to die to all incoming edges
+ * @qends: pointer to ends structure
+ *
+ * Return Value: Non-zero value means, that not all edges could be immediately
+ * disconnected and that ends structure memory release has to be delayed
+ */
+int canqueue_ends_kill_outlist(struct canque_ends_t *qends)
+{
+ struct canque_edge_t *edge;
+
+ canque_for_each_outedge(qends, edge){
+ canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
+ }
+ return list_empty(&qends->outlist)?0:1;
+}
+
+
+
#include "../include/close.h"
#include "../include/i82527.h"
#include "../include/setup.h"
+#include "../include/fasync.h"
#define __NO_VERSION__
#include <linux/module.h>
obj = canuser->msgobj;
qends = canuser->qends;
+ #ifdef CAN_ENABLE_KERN_FASYNC
+
+ can_fasync(-1, file, 0);
+
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
+
list_del(&canuser->peers);
canuser->qends = NULL;
canqueue_ends_dispose_kern(qends, file->f_flags & O_SYNC);
/* FIXME: what about clearing chip HW status, stopping sending messages etc? */
};
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,50))
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,50))
MOD_DEC_USE_COUNT;
-#endif
+ #endif
return 0;
}
int canqueue_ends_done_chip(struct canque_ends_t *qends)
{
- return 0;
+ int delayed;
+
+ /* Finish or kill all outgoing edges listed in inends */
+ delayed=canqueue_ends_kill_inlist(qends, 1);
+ /* Kill all incoming edges listed in outends */
+ delayed|=canqueue_ends_kill_outlist(qends);
+
+ return delayed;
}
--- /dev/null
+/* open.c
+ * Linux CAN-bus device driver.
+ * Written by Arnaud Westenberg email:arnaud@wanadoo.nl
+ * Rewritten for new CAN queues by Pavel Pisa - OCERA team member
+ * email:pisa@cmp.felk.cvut.cz
+ * This software is released under the GPL-License.
+ * Version lincan-0.2 9 Jul 2003
+ */
+
+#include "../include/can.h"
+#include "../include/can_sysdep.h"
+#include "../include/main.h"
+#include "../include/fasync.h"
+
+#ifdef CAN_ENABLE_KERN_FASYNC
+
+int can_fasync(int fd, struct file *file, int on)
+{
+ struct canuser_t *canuser = (struct canuser_t*)(file->private_data);
+ struct canque_ends_t *qends;
+
+ if(!canuser || (canuser->magic != CAN_USER_MAGIC)){
+ CANMSG("can_close: bad canuser magic\n");
+ return -ENODEV;
+ }
+
+ qends = canuser->qends;
+
+ int retval = fasync_helper(fd, file, on, &qends->endinfo.fileinfo.fasync);
+
+ if (retval < 0)
+ return retval;
+ return 0;
+}
+
+
+
+
+
+#endif /*CAN_ENABLE_KERN_FASYNC*/
void msgobj_done(struct msgobj_t *obj)
{
+ int delayed=0;
if(obj->qends) {
- if(canqueue_ends_done_chip(obj->qends) < 0)
+ delayed=canqueue_ends_done_chip(obj->qends);
+ if(delayed < 0)
CANMSG("msgobj_done: problem with chip queue ends\n");
}
del_timer_sync(&obj->tx_timeout);
if(obj->qends) {
- can_checked_free(obj->qends);
+ /*delayed free could be required there in the future,
+ actual use patter cannot generate such situation*/
+ if(!delayed) {
+ can_checked_free(obj->qends);
+ }
}
obj->qends=NULL;
}
message_id=(id0|id1)>>5;
}
- spin_lock(&hardware_p->rtr_lock);
+ can_spin_lock(&hardware_p->rtr_lock);
rtr_search=hardware_p->rtr_queue;
while (rtr_search != NULL) {
if (rtr_search->id == message_id)
break;
rtr_search=rtr_search->next;
}
- spin_unlock(&hardware_p->rtr_lock);
+ can_spin_unlock(&hardware_p->rtr_lock);
if ((rtr_search!=NULL) && (rtr_search->id==message_id))
i82527_irq_rtr_handler(chip, obj, rtr_search, message_id);
else
canobj_write_reg(chip,obj,(MVAL_RES|TXIE_RES|RXIE_RES|INTPD_RES),iMSGCTL0);
canobj_write_reg(chip,obj,(RMPD_RES|TXRQ_RES|MLST_RES|NEWD_RES),iMSGCTL1);
- spin_lock(&hardware_p->rtr_lock);
+ can_spin_lock(&hardware_p->rtr_lock);
rtr_search->rtr_message->id=message_id;
rtr_search->rtr_message->length=(canobj_read_reg(chip,obj,iMSGCFG) & 0xf0)>>4;
for (i=0; i<rtr_search->rtr_message->length; i++)
rtr_search->rtr_message->data[i]=canobj_read_reg(chip,obj,iMSGDAT0+i);
- spin_unlock(&hardware_p->rtr_lock);
+ can_spin_unlock(&hardware_p->rtr_lock);
if (waitqueue_active(&rtr_search->rtr_wq))
wake_up(&rtr_search->rtr_wq);
int i82527_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj)
{
/* dummy lock to prevent preemption fully portable way */
- spinlock_t dummy_lock;
+ can_spinlock_t dummy_lock;
/* preempt_disable() */
- spin_lock_init(&dummy_lock);
- spin_lock(&dummy_lock);
+ can_spin_lock_init(&dummy_lock);
+ can_spin_lock(&dummy_lock);
set_bit(OBJ_TX_REQUEST,&obj->flags);
while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){
}
/* preempt_enable(); */
- spin_unlock(&dummy_lock);
+ can_spin_unlock(&dummy_lock);
return 0;
}
#include "../include/ioctl.h"
#include "../include/write.h"
#include "../include/finish.h"
+#include "../include/fasync.h"
#define EXPORT_SYMTAB
*/
struct mem_addr *mem_head=NULL;
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,3,0))
-struct file_operations can_fops=
-{
- NULL, /* llseek */
- read: can_read,
- write: can_write,
- NULL, /* readdir */
- poll: can_poll,
- ioctl: can_ioctl,
- NULL, /* mmap */
- open: can_open,
- NULL, /* flush */
- release: can_close,
- NULL, /* fsync */
-};
-#else
struct file_operations can_fops=
{
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0))
owner: THIS_MODULE,
+ #endif
read: can_read,
write: can_write,
poll: can_poll,
ioctl: can_ioctl,
open: can_open,
release: can_close,
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ fasync: can_fasync
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
};
-#endif
EXPORT_SYMBOL(can_fops);
goto reset_error;
}
- spin_lock_init(&hardware_p->rtr_lock);
+ can_spin_lock_init(&hardware_p->rtr_lock);
hardware_p->rtr_queue=NULL;
for (i=0; i<hardware_p->nr_boards; i++) {
inline ssize_t can_rtr_read(struct chip_t *chip, struct msgobj_t *obj,
char *buffer)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct rtr_id *rtr_current, *new_rtr_entry;
struct canmsg_t read_msg;
DEBUGMSG("Remote transmission request\n");
- spin_lock_irqsave(&hardware_p->rtr_lock, flags);
+ can_spin_lock_irqsave(&hardware_p->rtr_lock, flags);
if (hardware_p->rtr_queue == NULL) { //No remote messages pending
new_rtr_entry=(struct rtr_id *)kmalloc(sizeof(struct rtr_id),GFP_ATOMIC);
if (new_rtr_entry == NULL) {
- spin_unlock_irqrestore(&hardware_p->rtr_lock,
+ can_spin_unlock_irqrestore(&hardware_p->rtr_lock,
flags);
return -ENOMEM;
}
new_rtr_entry->rtr_message = &read_msg;
new_rtr_entry->next=NULL;
- spin_unlock_irqrestore(&hardware_p->rtr_lock, flags);
+ can_spin_unlock_irqrestore(&hardware_p->rtr_lock, flags);
/* Send remote transmission request */
chip->chipspecops->remote_request(chip,obj);
obj->ret = 0;
interruptible_sleep_on(&new_rtr_entry->rtr_wq);
- spin_lock_irqsave(&hardware_p->rtr_lock, flags);
+ can_spin_lock_irqsave(&hardware_p->rtr_lock, flags);
copy_to_user(buffer, &read_msg, sizeof(struct canmsg_t));
if (hardware_p->rtr_queue == new_rtr_entry) {
if (new_rtr_entry->next != NULL)
else
rtr_current->next=NULL;
}
- spin_unlock_irqrestore(&hardware_p->rtr_lock, flags);
+ can_spin_unlock_irqrestore(&hardware_p->rtr_lock, flags);
kfree(new_rtr_entry);
return obj->ret;
struct canque_ends_t *qends;
struct msgobj_t *obj;
unsigned int mask = 0;
- unsigned long flags;
struct canque_edge_t *edge;
- struct list_head *entry;
int full=0;
int i;
if ((file->f_mode & FMODE_WRITE) && !(file->f_flags & O_SYNC)) {
poll_wait(file, &qends->endinfo.fileinfo.writeq, wait);
- spin_lock_irqsave(&qends->ends_lock, flags);
- list_for_each(entry,&qends->inlist){
- edge=list_entry(entry,struct canque_edge_t,inpeers);
+ canque_for_each_inedge(qends, edge) {
if(canque_fifo_test_fl(&edge->fifo,FULL))
full=1;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
if(!full)
mask |= POLLOUT | POLLWRNORM;
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_SYNC)) {
poll_wait(file, &qends->endinfo.fileinfo.emptyq, wait);
- spin_lock_irqsave(&qends->ends_lock, flags);
- list_for_each(entry,&qends->inlist){
- edge=list_entry(entry,struct canque_edge_t,inpeers);
+ canque_for_each_inedge(qends, edge) {
if(!canque_fifo_test_fl(&edge->fifo,EMPTY))
full=1;
}
- spin_unlock_irqrestore(&qends->ends_lock, flags);
if(!full)
mask |= POLLOUT | POLLWRNORM;
int sja1000_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj)
{
/* dummy lock to prevent preemption fully portable way */
- spinlock_t dummy_lock;
+ can_spinlock_t dummy_lock;
/* preempt_disable() */
- spin_lock_init(&dummy_lock);
- spin_lock(&dummy_lock);
+ can_spin_lock_init(&dummy_lock);
+ can_spin_lock(&dummy_lock);
set_bit(OBJ_TX_REQUEST,&obj->flags);
while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){
}
/* preempt_enable(); */
- spin_unlock(&dummy_lock);
+ can_spin_unlock(&dummy_lock);
return 0;
}
int sja1000p_wakeup_tx(struct chip_t *chip, struct msgobj_t *obj)
{
/* dummy lock to prevent preemption fully portable way */
- spinlock_t dummy_lock;
+ can_spinlock_t dummy_lock;
/* preempt_disable() */
- spin_lock_init(&dummy_lock);
- spin_lock(&dummy_lock);
+ can_spin_lock_init(&dummy_lock);
+ can_spin_lock(&dummy_lock);
set_bit(OBJ_TX_REQUEST,&obj->flags);
while(!test_and_set_bit(OBJ_TX_LOCK,&obj->flags)){
}
/* preempt_enable(); */
- spin_unlock(&dummy_lock);
+ can_spin_unlock(&dummy_lock);
return 0;
}
{
int cmd;
/* dummy lock to prevent preemption fully portable way */
- spinlock_t dummy_lock;
+ can_spinlock_t dummy_lock;
/* preempt_disable() */
- spin_lock_init(&dummy_lock);
- spin_lock(&dummy_lock);
+ can_spin_lock_init(&dummy_lock);
+ can_spin_lock(&dummy_lock);
set_bit(OBJ_TX_REQUEST,&obj->flags);
}
/* preempt_enable(); */
- spin_unlock(&dummy_lock);
+ can_spin_unlock(&dummy_lock);
}