* New CAN queues by Pavel Pisa - OCERA team member
* email:pisa@cmp.felk.cvut.cz
* This software is released under the GPL-License.
- * Version lincan-0.2 9 Jul 2003
+ * Version lincan-0.3 17 Jun 2004
*/
#include "../include/can.h"
extern atomic_t edge_num_cnt;
#ifdef CAN_DEBUG
- #define DEBUGQUE(fmt,args...) printk(KERN_ERR "can_queue (debug): " fmt,\
+ #define DEBUGQUE(fmt,args...) can_printk(KERN_ERR "can_quekern (debug): " fmt,\
##args)
#else
#define DEBUGQUE(fmt,args...)
#endif
-#define ERRMSGQUE(fmt,args...) printk(KERN_ERR "can_queue: " fmt,\
+#define ERRMSGQUE(fmt,args...) can_printk(KERN_ERR "can_quekern: " fmt,\
##args)
void canque_dead_func(unsigned long data);
/* Support for dead ends structures left after client close */
-spinlock_t canque_dead_func_lock;
+can_spinlock_t canque_dead_func_lock;
LIST_HEAD(canque_dead_ends);
/* retrieved by list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers) */
LIST_HEAD(canque_dead_edges);
static inline
struct canque_edge_t *canque_dead_edges_cut_first(void)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *edge;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
if(list_empty(&canque_dead_edges))
edge=NULL;
else{
edge=list_entry(canque_dead_edges.next,struct canque_edge_t,inpeers);
list_del(&edge->inpeers);
}
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
return edge;
}
void canque_dead_func(unsigned long data)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
struct canque_edge_t *qedge;
struct canque_ends_t *qends;
struct list_head *entry;
- int i;
while((qedge=canque_dead_edges_cut_first())){
DEBUGQUE("edge %d disposed\n",qedge->edge_num);
+ #ifdef CAN_WITH_RTL
+ if(canque_fifo_test_fl(&qedge->fifo,RTL_MEM)){
+ canque_dispose_edge_rtl(qedge);
+ continue;
+ }
+ #endif /*CAN_WITH_RTL*/
+ canque_fifo_done_kern(&qedge->fifo);
kfree(qedge);
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
entry=canque_dead_ends.next;
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ /* lock can be released there, because only one instance of canque_dead_tl
+ can run at once and all other functions add ends only to head */
while(entry!=&canque_dead_ends){
- qends=list_entry(canque_dead_ends.next,struct canque_ends_t,dead_peers);
+ qends=list_entry(entry,struct canque_ends_t,dead_peers);
entry=entry->next;
if(!list_empty(&qends->inlist))
continue;
- if(!list_empty(&qends->idle))
+ if(!list_empty(&qends->outlist))
continue;
- for(i=CANQUEUE_PRIO_NR;i--;)
- if(!list_empty(&qends->active[i]))
- continue;
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
- spin_unlock_irqrestore(&canque_dead_func_lock,flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock,flags);
DEBUGQUE("ends structure disposed\n");
+ #ifdef CAN_WITH_RTL
+ if(qends->ends_flags&CAN_ENDSF_MEM_RTL){
+ canque_ends_free_rtl(qends);
+ continue;
+ }
+ #endif /*CAN_WITH_RTL*/
kfree(qends);
}
}
+static inline void canque_dead_tasklet_schedule(void)
+{
+ #ifdef CAN_WITH_RTL
+ if(!rtl_rt_system_is_idle()){
+ set_bit(CAN_RTL2LIN_PEND_DEAD_b,&canqueue_rtl2lin_pend);
+ rtl_global_pend_irq (canqueue_rtl_irq);
+ return;
+ }
+ #endif /*CAN_WITH_RTL*/
-void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl)
+ tasklet_schedule(&canque_dead_tl);
+}
+
+
+void canque_edge_do_dead(struct canque_edge_t *edge)
{
- unsigned long flags;
+ can_spin_irqflags_t flags;
- if(dead_fl) return;
+ canque_notify_bothends(edge,CANQUEUE_NOTIFY_NOUSR);
+ #ifdef CAN_WITH_RTL
+ /* The problem of the above call is, that in RT-Linux to Linux notify
+ case is edge scheduled for delayed notify delivery, this needs
+ to be reflected there */
+ if(atomic_read(&edge->edge_used)>0){
+ can_spin_lock_irqsave(&edge->inends->ends_lock, flags);
+ can_spin_lock(&edge->outends->ends_lock);
+ if(atomic_read(&edge->edge_used)>0){
+ /* left edge to live for a while, banshee comes again in a while */
+ canque_fifo_clear_fl(&edge->fifo,DEAD);
+ can_spin_unlock(&edge->outends->ends_lock);
+ can_spin_unlock_irqrestore(&edge->inends->ends_lock, flags);
+ can_printk(KERN_ERR "can_quertl (debug): canque_edge_do_dead postponed\n");
+ return;
+ }
+ can_spin_unlock(&edge->outends->ends_lock);
+ can_spin_unlock_irqrestore(&edge->inends->ends_lock, flags);
+ }
+ #endif /*CAN_WITH_RTL*/
if(canqueue_disconnect_edge(edge)<0){
ERRMSGQUE("canque_edge_do_dead: canqueue_disconnect_edge failed !!!\n");
return;
}
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_add(&edge->inpeers,&canque_dead_edges);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
- tasklet_schedule(&canque_dead_tl);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ canque_dead_tasklet_schedule();
}
/*if(qends->ends_flags & CAN_ENDSF_DEAD){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
list_del(&qends->dead_peers);
list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
tasklet_schedule(&canque_dead_tl);
}*/
* @qends: pointer to the callback side ends structure
* @qedge: edge which invoked notification
* @what: notification type
+ *
+ * The notification event is handled directly by call of this function except case,
+ * when called from RT-Linux context in mixed mode Linux/RT-Linux compilation.
+ * It is not possible to directly call Linux kernel synchronization primitives
+ * in such case. The notification request is postponed and signaled by @pending_inops flags
+ * by call canqueue_rtl2lin_check_and_pend() function.
+ * The edge reference count is increased until until all pending notifications are processed.
*/
void canqueue_notify_kern(struct canque_ends_t *qends, struct canque_edge_t *qedge, int what)
{
DEBUGQUE("canqueue_notify_kern for edge %d, use %d and event %d\n",
qedge->edge_num,(int)atomic_read(&qedge->edge_used),what);
+
+ /* delay event delivery for RT-Linux -> kernel notifications */
+ if(canqueue_rtl2lin_check_and_pend(qends,qedge,what)){
+ DEBUGQUE("canqueue_notify_kern postponed\n");
+ return;
+ }
+
switch(what){
case CANQUEUE_NOTIFY_EMPTY:
wake_up(&qends->endinfo.fileinfo.emptyq);
break;
case CANQUEUE_NOTIFY_SPACE:
wake_up(&qends->endinfo.fileinfo.writeq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_OUT);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_PROC:
wake_up(&qends->endinfo.fileinfo.readq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ /* Asynchronous I/O processing */
+ kill_fasync(&qends->endinfo.fileinfo.fasync, SIGIO, POLL_IN);
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
break;
case CANQUEUE_NOTIFY_NOUSR:
wake_up(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.readq);
init_waitqueue_head(&qends->endinfo.fileinfo.writeq);
init_waitqueue_head(&qends->endinfo.fileinfo.emptyq);
+ #ifdef CAN_ENABLE_KERN_FASYNC
+ qends->endinfo.fileinfo.fasync=NULL;
+ #endif /*CAN_ENABLE_KERN_FASYNC*/
+
qends->notify=canqueue_notify_kern;
DEBUGQUE("canqueue_ends_init_kern\n");
return 0;
}
+/**
+ * canque_fifo_init_kern - initialize one CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ * @slotsnr: number of requested slots
+ *
+ * Return Value: The negative value indicates, that there is no memory
+ * to allocate space for the requested number of the slots.
+ */
+int canque_fifo_init_kern(struct canque_fifo_t *fifo, int slotsnr)
+{
+ int size;
+ if(!slotsnr) slotsnr=MAX_BUF_LENGTH;
+ size=sizeof(struct canque_slot_t)*slotsnr;
+ fifo->entry=kmalloc(size,GFP_KERNEL);
+ if(!fifo->entry) return -1;
+ fifo->slotsnr=slotsnr;
+ return canque_fifo_init_slots(fifo);
+}
+
+/**
+ * canque_fifo_done_kern - frees slots allocated for CAN FIFO
+ * @fifo: pointer to the FIFO structure
+ */
+int canque_fifo_done_kern(struct canque_fifo_t *fifo)
+{
+ if(fifo->entry)
+ kfree(fifo->entry);
+ fifo->entry=NULL;
+ return 1;
+}
+
+
/**
* canque_new_edge_kern - allocate new edge structure in the Linux kernel context
* @slotsnr: required number of slots in the newly allocated edge structure
if(qedge == NULL) return NULL;
memset(qedge,0,sizeof(struct canque_edge_t));
- spin_lock_init(&qedge->fifo.fifo_lock);
- if(canque_fifo_init_slots(&qedge->fifo, slotsnr)<0){
+ can_spin_lock_init(&qedge->fifo.fifo_lock);
+ if(canque_fifo_init_kern(&qedge->fifo, slotsnr)<0){
kfree(qedge);
DEBUGQUE("canque_new_edge_kern failed\n");
return NULL;
return qedge;
}
-/**
+#ifdef USE_SYNC_DISCONNECT_EDGE_KERN
+
+/*not included in doc
* canqueue_disconnect_edge_kern - disconnect edge from communicating entities with wait
* @qends: ends structure belonging to calling communication object
* @qedge: pointer to edge
int canqueue_disconnect_list_kern(struct canque_ends_t *qends, struct list_head *list)
{
struct canque_edge_t *edge;
- unsigned long flags;
+ can_spin_irqflags_t flags;
for(;;){
- spin_lock_irqsave(&qends->ends_lock,flags);
+ can_spin_lock_irqsave(&qends->ends_lock,flags);
if(list_empty(list)){
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
return 0;
}
if(list == &qends->inlist)
else
edge=list_entry(list->next,struct canque_edge_t,outpeers);
atomic_inc(&edge->edge_used);
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ can_spin_unlock_irqrestore(&qends->ends_lock,flags);
if(canqueue_disconnect_edge_kern(qends, edge)>=0) {
/* Free edge memory */
- canque_fifo_done(&edge->fifo);
+ canque_fifo_done_kern(&edge->fifo);
kfree(edge);
}else{
canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
}
}
-void canqueue_block_list(struct canque_ends_t *qends, struct list_head *list)
-{
- struct canque_edge_t *edge;
- struct list_head *entry;
-
- /* has to be called with qends->ends_lock already locked */
- list_for_each(entry,&qends->inlist){
- if(list == &qends->inlist)
- edge=list_entry(list->next,struct canque_edge_t,inpeers);
- else
- edge=list_entry(list->next,struct canque_edge_t,outpeers);
- canque_fifo_set_fl(&edge->fifo,BLOCK);
- }
-}
+#endif /*USE_SYNC_DISCONNECT_EDGE_KERN*/
+
int canqueue_ends_sync_all_kern(struct canque_ends_t *qends)
{
return 0;
}
-int canqueue_ends_done_inends(struct canque_ends_t *qends, int send_rest)
+
+void canqueue_ends_dispose_postpone(struct canque_ends_t *qends)
{
- struct canque_edge_t *edge;
-
- canque_for_each_inedge(qends, edge){
- canque_notify_bothends(edge, CANQUEUE_NOTIFY_DEAD_WANTED);
- if(send_rest){
- canque_edge_incref(edge);
- if(!canque_fifo_test_and_set_fl(&edge->fifo, FREEONEMPTY)){
- if(!canque_fifo_test_fl(&edge->fifo, EMPTY))
- continue;
- if(!canque_fifo_test_and_clear_fl(&edge->fifo, FREEONEMPTY))
- continue;
- }
- canque_edge_decref(edge);
- }
- }
- return list_empty(&qends->inlist)?0:1;
+ can_spin_irqflags_t flags;
+
+ can_spin_lock_irqsave(&canque_dead_func_lock, flags);
+ qends->ends_flags |= CAN_ENDSF_DEAD;
+ list_add(&qends->dead_peers,&canque_dead_ends);
+ can_spin_unlock_irqrestore(&canque_dead_func_lock, flags);
+ canque_dead_tasklet_schedule();
}
*/
int canqueue_ends_dispose_kern(struct canque_ends_t *qends, int sync)
{
- unsigned long flags;
- int i;
int delayed;
DEBUGQUE("canqueue_ends_dispose_kern\n");
- spin_lock_irqsave(&qends->ends_lock,flags);
- canqueue_block_list(qends, &qends->idle);
- for(i=CANQUEUE_PRIO_NR;--i>=0;){
- canqueue_block_list(qends, &qends->active[i]);
- }
- canqueue_block_list(qends, &qends->idle);
- canqueue_block_list(qends, &qends->inlist);
- spin_unlock_irqrestore(&qends->ends_lock,flags);
+ canqueue_block_inlist(qends);
+ canqueue_block_outlist(qends);
/*Wait for sending of all pending messages in the output FIFOs*/
if(sync)
canqueue_ends_sync_all_kern(qends);
- /* Finish all outgoing edges listed in inends */
- delayed=canqueue_ends_done_inends(qends, 1);
-
- delayed|=canqueue_disconnect_list_kern(qends, &qends->idle);
- for(i=CANQUEUE_PRIO_NR;--i>=0;){
- delayed|=canqueue_disconnect_list_kern(qends, &qends->active[i]);
- }
+ /* Finish or kill all outgoing edges listed in inends */
+ delayed=canqueue_ends_kill_inlist(qends, 1);
+ /* Kill all incoming edges listed in outends */
+ delayed|=canqueue_ends_kill_outlist(qends);
wake_up(&qends->endinfo.fileinfo.readq);
wake_up(&qends->endinfo.fileinfo.writeq);
wake_up(&qends->endinfo.fileinfo.emptyq);
if(delayed){
- spin_lock_irqsave(&canque_dead_func_lock, flags);
- qends->ends_flags |= CAN_ENDSF_DEAD;
- list_add(&qends->dead_peers,&canque_dead_ends);
- spin_unlock_irqrestore(&canque_dead_func_lock, flags);
- tasklet_schedule(&canque_dead_tl);
+ canqueue_ends_dispose_postpone(qends);
DEBUGQUE("canqueue_ends_dispose_kern delayed\n");
return 1;
void canqueue_kern_initialize()
{
-
-
+ can_spin_lock_init(&canque_dead_func_lock);
}