extern atomic_t edge_num_cnt;
#ifdef CAN_DEBUG
- #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
+ #define DEBUGQUE(fmt,args...) can_printk(KERN_ERR "can_queue (debug): " fmt,\
##args)
#else
#define DEBUGQUE(fmt,args...)
#endif
-#define ERRMSGQUE(fmt,args...) printk(KERN_ERR "can_queue: " fmt,\
+#define ERRMSGQUE(fmt,args...) can_printk(KERN_ERR "can_queue: " fmt,\
##args)
void canque_dead_func(unsigned long data);
/* Support for dead ends structures left after client close */
-spinlock_t canque_dead_func_lock;
+can_spinlock_t canque_dead_func_lock;
LIST_HEAD(canque_dead_ends);
/* retrieved by list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers) */
LIST_HEAD(canque_dead_edges);
static inline
struct canque_edge_t *canque_dead_edges_cut_first(void)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *edge;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
if(list_empty(&canque_dead_edges))
edge=NULL;
else{
edge=list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers);
list_del(&edge->inpeers);
}
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
return edge;
}
void canque_dead_func(unsigned long data)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *qedge;
struct canque_ends_t *qends;
struct list_head *entry;
while((qedge=canque_dead_edges_cut_first())){
DEBUGQUE("edge %d disposed\n",qedge->edge_num);
+ #ifdef CAN_WITH_RTL
+ if(canque_fifo_test_fl(&qedge->fifo,RTL_MEM)){
+ canque_dispose_edge_rtl(qedge);
+ continue;
+ }
+ #endif /*CAN_WITH_RTL*/
+ canque_fifo_done_kern(&qedge->fifo);
kfree(qedge);
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
entry=canque_dead_ends.next;
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
while(entry!=&canque_dead_ends){
qends=list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers);
entry=entry->next;
continue;
if(!list_empty(&qends->outlist))
continue;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
DEBUGQUE("ends structure disposed\n");
+ #ifdef CAN_WITH_RTL
+ if(qends->ends_flags&CAN_ENDSF_MEM_RTL){
+ canque_ends_free_rtl(qends);
+ continue;
+ }
+ #endif /*CAN_WITH_RTL*/
kfree(qends);
}
void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
if(dead_fl) return;
return;
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_add(&edge->inpeers,&canque_dead_edges);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
}
/*if(qends->ends_flags & CAN_ENDSF_DEAD){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
}*/
{
DEBUGQUE("canqueue_notify_kern for edge %d, use %d and event %d\n",
qedge->edge_num,(int)atomic_read(&qedge->edge_used),what);
+
+ /* delay event delivery for RT-Linux -> kernel notifications */
+ if(canqueue_rtl2lin_check_and_pend(qends,qedge,what)){
+ DEBUGQUE("canqueue_notify_kern postponed\n");
+ return;
+ }
+
switch(what){
case CANQUEUE_NOTIFY_EMPTY:
wake_up(&qends->endinfo.fileinfo.emptyq);
break;
case CANQUEUE_NOTIFY_SPACE:
wake_up(&qends->endinfo.fileinfo.writeq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_OUT);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_PROC:
wake_up(&qends->endinfo.fileinfo.readq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_IN);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_NOUSR:
wake_up(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ qends->endinfo.fileinfo.fasync=NULL;
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
+
qends->notify=canqueue_notify_kern;
DEBUGQUE("canqueue_ends_init_kern\n");
return 0;
}
+/**
+ * canque_fifo_init_kern - initialize one CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ * @slotsnr: number of requested slots
+ *
+ * Return Value: The negative value indicates, that there is no memory
+ * to allocate space for the requested number of the slots.
+ */
+int canque_fifo_init_kern(struct canque_fifo_t *fifo, int slotsnr)
+{
+ int size;
+ if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
+ size=sizeof(struct canque_slot_t)*slotsnr;
+ fifo->entry=kmalloc(size,GFP_KERNEL);
+ if(!fifo->entry) return -1;
+ fifo->slotsnr=slotsnr;
+ return canque_fifo_init_slots(fifo);
+}
+
+/**
+ * canque_fifo_done_kern - frees slots allocated for CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ */
+int canque_fifo_done_kern(struct canque_fifo_t *fifo)
+{
+ if(fifo->entry)
+ kfree(fifo->entry);
+ fifo->entry=NULL;
+ return 1;
+}
+
+
/**
* canque_new_edge_kern - allocate new edge structure in the Linux kernel context
* @slotsnr: required number of slots in the newly allocated edge structure
if(qedge == NULL) return NULL;
memset(qedge,0,sizeof(struct canque_edge_t));
- spin_lock_init(&qedge->fifo.fifo_lock);
- if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
+ can_spin_lock_init(&qedge->fifo.fifo_lock);
+ if(canque_fifo_init_kern(&qedge->fifo, slotsnr)<0){
kfree(qedge);
DEBUGQUE("canque_new_edge_kern failed\n");
return NULL;
return qedge;
}
-#ifdef USE_DELAYED_DISCONNECT_EDGE_KERN
+#ifdef USE_SYNC_DISCONNECT_EDGE_KERN
/**
* canqueue_disconnect_edge_kern - disconnect edge from communicating entities with wait
int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
{
struct canque_edge_t *edge;
- unsigned long flags;
+ can_spin_irqflags_t flags;
for(;;){
- spin_lock_irqsave(&qends->ends_lock,flags);
+ can_spin_lock_irqsave(&qends->ends_lock,flags);
if(list_empty(list)){
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
return 0;
}
if(list == &qends->inlist)
else
edge=list_entry(list->next,struct canque_edge_t,outpeers);
atomic_inc(&edge->edge_used);
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
/* Free edge memory */
- canque_fifo_done(&edge->fifo);
+ canque_fifo_done_kern(&edge->fifo);
kfree(edge);
}else{
canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
}
}
-#endif /*USE_DELAYED_DISCONNECT_EDGE_KERN*/
+#endif /*USE_SYNC_DISCONNECT_EDGE_KERN*/
int canqueue_ends_sync_all_kern(struct canque_ends_t *qends)
return 0;
}
-int canqueue_ends_done_inends(struct canque_ends_t *qends, int send_rest)
-{
- struct canque_edge_t *edge;
-
- canque_for_each_inedge(qends, edge){
- canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
- if(send_rest){
- canque_edge_incref(edge);
- if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
- if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
- continue;
- if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
- continue;
- }
- canque_edge_decref(edge);
- }
- }
- return list_empty(&qends->inlist)?0:1;
-}
-
-int canqueue_ends_done_outends(struct canque_ends_t *qends)
+void canqueue_ends_dispose_postpone(struct canque_ends_t *qends)
{
- struct canque_edge_t *edge;
-
- canque_for_each_outedge(qends, edge){
- canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
- }
- return list_empty(&qends->outlist)?0:1;
+ can_spin_irqflags_t flags;
+
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
+ qends->ends_flags |= CAN_ENDSF_DEAD;
+ list_add(&qends->dead_peers,&canque_dead_ends);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ tasklet_schedule(&canque_dead_tl);
}
*/
int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync)
{
- unsigned long flags;
int delayed;
DEBUGQUE("canqueue_ends_dispose_kern\n");
if(sync)
canqueue_ends_sync_all_kern(qends);
- /* Finish all outgoing edges listed in inends */
- delayed=canqueue_ends_done_inends(qends, 1);
-
- delayed|=canqueue_ends_done_outends(qends);
+ /* Finish or kill all outgoing edges listed in inends */
+ delayed=canqueue_ends_kill_inlist(qends, 1);
+ /* Kill all incoming edges listed in outends */
+ delayed|=canqueue_ends_kill_outlist(qends);
wake_up(&qends->endinfo.fileinfo.readq);
wake_up(&qends->endinfo.fileinfo.writeq);
wake_up(&qends->endinfo.fileinfo.emptyq);
if(delayed){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
- qends->ends_flags |= CAN_ENDSF_DEAD;
- list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
- tasklet_schedule(&canque_dead_tl);
+ canqueue_ends_dispose_postpone(qends);
DEBUGQUE("canqueue_ends_dispose_kern delayed\n");
return 1;
void canqueue_kern_initialize()
{
-
-
+ can_spin_lock_init(&canque_dead_func_lock);
}