+/**************************************************************************/
+/* File: can_queue.h - CAN queues and message passing infrastructure */
+/* */
+/* LinCAN - (Not only) Linux CAN bus driver */
+/* Copyright (C) 2002-2009 DCE FEE CTU Prague <http://dce.felk.cvut.cz> */
+/* Copyright (C) 2002-2009 Pavel Pisa <pisa@cmp.felk.cvut.cz> */
+/* Funded by OCERA and FRESCOR IST projects */
+/* */
+/* LinCAN is free software; you can redistribute it and/or modify it */
+/* under terms of the GNU General Public License as published by the */
+/* Free Software Foundation; either version 2, or (at your option) any */
+/* later version. LinCAN is distributed in the hope that it will be */
+/* useful, but WITHOUT ANY WARRANTY; without even the implied warranty */
+/* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU */
+/* General Public License for more details. You should have received a */
+/* copy of the GNU General Public License along with LinCAN; see file */
+/* COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, */
+/* Cambridge, MA 02139, USA. */
+/* */
+/* To allow use of LinCAN in the compact embedded systems firmware */
+/* and RT-executives (RTEMS for example), main authors agree with next */
+/* special exception: */
+/* */
+/* Including LinCAN header files in a file, instantiating LinCAN generics */
+/* or templates, or linking other files with LinCAN objects to produce */
+/* an application image/executable, does not by itself cause the */
+/* resulting application image/executable to be covered by */
+/* the GNU General Public License. */
+/* This exception does not however invalidate any other reasons */
+/* why the executable file might be covered by the GNU Public License. */
+/* Publication of enhanced or derived LinCAN files is required although. */
+/**************************************************************************/
+
#ifndef _CAN_QUEUE_H
#define _CAN_QUEUE_H
#include "./can_sysdep.h"
/**
- * struct canque_slot_t - one CAN message slot in the CAN FIFO queue
+ * struct canque_slot_t - one CAN message slot in the CAN FIFO queue
* @next: pointer to the next/younger slot
* @slot_flags: space for flags and optional command describing action
* associated with slot data
* @fifo_flags: this field holds global flags describing state of the FIFO.
* %CAN_FIFOF_ERROR is set when some error condition occurs.
* %CAN_FIFOF_ERR2BLOCK defines, that error should lead to the FIFO block state.
- * %CAN_FIFOF_BLOCK state blocks insertion of the next messages.
- * %CAN_FIFOF_OVERRUN attempt to acquire new slot, when FIFO is full.
- * %CAN_FIFOF_FULL indicates FIFO full state.
+ * %CAN_FIFOF_BLOCK state blocks insertion of the next messages.
+ * %CAN_FIFOF_OVERRUN attempt to acquire new slot, when FIFO is full.
+ * %CAN_FIFOF_FULL indicates FIFO full state.
* %CAN_FIFOF_EMPTY indicates no allocated slot in the FIFO.
* %CAN_FIFOF_DEAD condition indication. Used when FIFO is beeing destroyed.
* @error_code: futher description of error condition
* @fifo_lock: the lock to ensure atomicity of slot manipulation operations.
* @slotsnr: number of allocated slots
*
- * This structure represents CAN FIFO queue. It is implemented as
+ * This structure represents CAN FIFO queue. It is implemented as
* a single linked list of slots prepared for processing. The empty slots
* are stored in single linked list (@flist).
*/
/**
- * canque_fifo_get_inslot - allocate slot for the input of one CAN message
+ * canque_fifo_get_inslot - allocate slot for the input of one CAN message
* @fifo: pointer to the FIFO structure
* @slotp: pointer to location to store pointer to the allocated slot.
* @cmd: optional command associated with allocated slot.
/* Forward declarations for external types */
struct msgobj_t;
-struct chip_t;
+struct canchip_t;
/**
* struct canque_edge_t - CAN message delivery subsystem graph edge
* @pending_inops: bitmask of pending operations
* @pending_outops: bitmask of pending operations
*
- * This structure represents one direction connection from messages source
+ * This structure represents one direction connection from messages source
* (@inends) to message consumer (@outends) fifo ends hub. The edge contains
* &struct canque_fifo_t for message fifo implementation.
*/
* struct canque_ends_t - CAN message delivery subsystem graph vertex (FIFO ends)
* @ends_flags: this field holds flags describing state of the ENDS structure.
* @active: the array of the lists of active edges directed to the ends structure
- * with ready messages. The array is indexed by the edges priorities.
+ * with ready messages. The array is indexed by the edges priorities.
* @idle: the list of the edges directed to the ends structure with empty FIFOs.
* @inlist: the list of outgoing edges input sides.
* @outlist: the list of all incoming edges output sides. Each of there edges
* @ends_lock: the lock synchronizing operations between threads accessing
* same ends structure.
* @notify: pointer to notify procedure. The next state changes are notified.
- * %CANQUEUE_NOTIFY_EMPTY (out->in call) - all slots are processed by FIFO out side.
+ * %CANQUEUE_NOTIFY_EMPTY (out->in call) - all slots are processed by FIFO out side.
* %CANQUEUE_NOTIFY_SPACE (out->in call) - full state negated => there is space for new message.
* %CANQUEUE_NOTIFY_PROC (in->out call) - empty state negated => out side is requested to process slots.
* %CANQUEUE_NOTIFY_NOUSR (both) - notify, that the last user has released the edge usage
#endif /*CAN_WITH_RTL*/
struct {
struct msgobj_t *msgobj;
- struct chip_t *chip;
+ struct canchip_t *chip;
#ifndef CAN_WITH_RTL
wait_queue_head_t daemonq;
#else /*CAN_WITH_RTL*/
* @inends: input side of the edge
*
* Function call moves output side of the edge from idle onto active edges
- * list.
+ * list. This function has to be called with edge reference count held.
+ * that is same as for most of other edge functions.
*/
static inline
void canque_activate_edge(struct canque_ends_t *inends, struct canque_edge_t *qedge)
struct canque_ends_t *outends;
if(qedge->edge_prio>=CANQUEUE_PRIO_NR)
qedge->edge_prio=CANQUEUE_PRIO_NR-1;
- can_spin_lock_irqsave(&inends->ends_lock, flags);
if((outends=qedge->outends)){
- can_spin_lock(&outends->ends_lock);
+ can_spin_lock_irqsave(&outends->ends_lock, flags);
can_spin_lock(&qedge->fifo.fifo_lock);
if(!canque_fifo_test_fl(&qedge->fifo,EMPTY)){
list_del(&qedge->activepeers);
list_add_tail(&qedge->activepeers,&outends->active[qedge->edge_prio]);
}
can_spin_unlock(&qedge->fifo.fifo_lock);
- can_spin_unlock(&outends->ends_lock);
-
+ can_spin_unlock_irqrestore(&outends->ends_lock, flags);
}
- can_spin_unlock_irqrestore(&inends->ends_lock, flags);
}
/**
int canque_get_inslot(struct canque_ends_t *qends,
struct canque_edge_t **qedgep, struct canque_slot_t **slotp, int cmd);
-
+
int canque_get_inslot4id(struct canque_ends_t *qends,
struct canque_edge_t **qedgep, struct canque_slot_t **slotp,
int cmd, unsigned long id, int prio);
-
+
int canque_put_inslot(struct canque_ends_t *qends,
struct canque_edge_t *qedge, struct canque_slot_t *slot);
int canque_set_filt(struct canque_edge_t *qedge,
unsigned long filtid, unsigned long filtmask, int flags);
-
+
int canque_flush(struct canque_edge_t *qedge);
int canqueue_disconnect_edge(struct canque_edge_t *qedge);
int canqueue_ends_kill_outlist(struct canque_ends_t *qends);
+int canqueue_ends_filt_conjuction(struct canque_ends_t *qends, struct canfilt_t *filt);
+
+int canqueue_ends_flush_inlist(struct canque_ends_t *qends);
+
+int canqueue_ends_flush_outlist(struct canque_ends_t *qends);
+
/* edge reference and traversal functions */
-void canque_edge_do_dead(struct canque_edge_t *edge, int dead_fl);
+void canque_edge_do_dead(struct canque_edge_t *edge);
+/**
+ * canque_edge_incref - increments edge reference count
+ * @edge: pointer to the edge structure
+ */
static inline
void canque_edge_incref(struct canque_edge_t *edge)
{
}
static inline
-void canque_edge_decref(struct canque_edge_t *edge)
+can_spin_irqflags_t canque_edge_lock_both_ends(struct canque_ends_t *inends, struct canque_ends_t *outends)
+{
+ can_spin_irqflags_t flags;
+ if(inends<outends) {
+ can_spin_lock_irqsave(&inends->ends_lock, flags);
+ can_spin_lock(&outends->ends_lock);
+ }else{
+ can_spin_lock_irqsave(&outends->ends_lock, flags);
+ if(outends!=inends) can_spin_lock(&inends->ends_lock);
+ }
+ return flags;
+}
+
+static inline
+void canque_edge_unlock_both_ends(struct canque_ends_t *inends, struct canque_ends_t *outends, can_spin_irqflags_t flags)
+{
+ if(outends!=inends) can_spin_unlock(&outends->ends_lock);
+ can_spin_unlock_irqrestore(&inends->ends_lock, flags);
+}
+
+/* Non-inlined version of edge reference decrement */
+void __canque_edge_decref(struct canque_edge_t *edge);
+
+static inline
+void __canque_edge_decref_body(struct canque_edge_t *edge)
{
can_spin_irqflags_t flags;
+ int dead_fl=0;
struct canque_ends_t *inends=edge->inends;
struct canque_ends_t *outends=edge->outends;
- int dead_fl;
-
- can_spin_lock_irqsave(&inends->ends_lock, flags);
- can_spin_lock(&outends->ends_lock);
+
+ flags=canque_edge_lock_both_ends(inends, outends);
if(atomic_dec_and_test(&edge->edge_used)) {
- dead_fl=canque_fifo_test_and_set_fl(&edge->fifo,DEAD);
- /* Because of former evolution of edge references
+ dead_fl=!canque_fifo_test_and_set_fl(&edge->fifo,DEAD);
+ /* Because of former evolution of edge references
management notify of CANQUEUE_NOTIFY_NOUSR could
be moved to canque_edge_do_dead :-) */
- can_spin_unlock(&outends->ends_lock);
- can_spin_unlock_irqrestore(&inends->ends_lock, flags);
- canque_edge_do_dead(edge, dead_fl);
- } else {
- can_spin_unlock(&outends->ends_lock);
- can_spin_unlock_irqrestore(&inends->ends_lock, flags);
}
+ canque_edge_unlock_both_ends(inends, outends, flags);
+ if(dead_fl) canque_edge_do_dead(edge);
}
+#ifndef CAN_HAVE_ARCH_CMPXCHG
+/**
+ * canque_edge_decref - decrements edge reference count
+ * @edge: pointer to the edge structure
+ *
+ * This function has to be called without lock held for both ends of edge.
+ * If reference count drops to 0, function canque_edge_do_dead()
+ * is called.
+ */
+static inline
+void canque_edge_decref(struct canque_edge_t *edge)
+{
+ __canque_edge_decref_body(edge);
+}
+#else
+static inline
+void canque_edge_decref(struct canque_edge_t *edge)
+{
+ int x, y;
+
+ x = atomic_read(&edge->edge_used);
+ do{
+ if(x<=1)
+ return __canque_edge_decref(edge);
+ y=x;
+ #ifdef CAN_HAVE_ATOMIC_CMPXCHG
+ atomic_cmpxchg(&edge->edge_used, x, x-1);
+ #else /* workaround for case that atomic_cmpxchg is not defined */
+ /* This code strongly depends on the definition of atomic_t !!!! */
+ x=__cmpxchg(&edge->edge_used, x, x-1, sizeof(atomic_t));
+ /* If even this does not help, comment out CAN_HAVE_ARCH_CMPXCHG in can_sysdep.h */
+ #endif
+ } while(x!=y);
+}
+#endif
+
static inline
struct canque_edge_t *canque_first_inedge(struct canque_ends_t *qends)
{
can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *edge;
-
+
can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=qends->inlist.next;
skip_dead:
can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *next;
-
+
can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=edge->inpeers.next;
skip_dead:
can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *edge;
-
+
can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=qends->outlist.next;
skip_dead:
can_spin_irqflags_t flags;
struct list_head *entry;
struct canque_edge_t *next;
-
+
can_spin_lock_irqsave(&qends->ends_lock, flags);
entry=edge->outpeers.next;
skip_dead: