1 /**************************************************************************/
2 /* File: can_sysless.h - provides can_sysdep.h definitions to hide */
3 /* between Linux kernel and embedded target build */
5 /* LinCAN - (Not only) Linux CAN bus driver */
6 /* Copyright (C) 2002-2009 DCE FEE CTU Prague <http://dce.felk.cvut.cz> */
7 /* Copyright (C) 2002-2009 Pavel Pisa <pisa@cmp.felk.cvut.cz> */
8 /* Funded by OCERA and FRESCOR IST projects */
10 /* LinCAN is free software; you can redistribute it and/or modify it */
11 /* under terms of the GNU General Public License as published by the */
12 /* Free Software Foundation; either version 2, or (at your option) any */
13 /* later version. LinCAN is distributed in the hope that it will be */
14 /* useful, but WITHOUT ANY WARRANTY; without even the implied warranty */
15 /* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU */
16 /* General Public License for more details. You should have received a */
17 /* copy of the GNU General Public License along with LinCAN; see file */
18 /* COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, */
19 /* Cambridge, MA 02139, USA. */
21 /* To allow use of LinCAN in the compact embedded systems firmware */
22 /* and RT-executives (RTEMS for example), main authors agree with next */
23 /* special exception: */
25 /* Including LinCAN header files in a file, instantiating LinCAN generics */
26 /* or templates, or linking other files with LinCAN objects to produce */
27 /* an application image/executable, does not by itself cause the */
28 /* resulting application image/executable to be covered by */
29 /* the GNU General Public License. */
30 /* This exception does not however invalidate any other reasons */
31 /* why the executable file might be covered by the GNU Public License. */
32 /* Publication of enhanced or derived LinCAN files is required although. */
33 /**************************************************************************/
42 // typedef unsigned long atomic_t;
43 typedef struct { volatile int counter; } atomic_t;
45 #define mb() __memory_barrier()
48 * container_of - cast a member of a structure out to the containing structure
49 * @ptr: the pointer to the member.
50 * @type: the type of the container struct this is embedded in.
51 * @member: the name of the member within the struct.
54 #define container_of(ptr, type, member) ({ \
55 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
56 (type *)( (char *)__mptr - offsetof(type,member) );})
60 #define wait_queue_head_t struct wait_queue *
61 #define wait_queue_t struct wait_queue
62 #define init_waitqueue_head(queue_head) (*queue_head=NULL)
63 #define init_waitqueue_entry(qentry,qtask) \
64 (qentry->next=NULL,qentry->task=qtask)
65 #define DECLARE_WAIT_QUEUE_HEAD(name) \
66 struct wait_queue * name=NULL
67 #define DECLARE_WAITQUEUE(wait, current) \
68 struct wait_queue wait = { current, NULL }
70 typedef irqreturn_t can_irqreturn_t;
71 #define CAN_IRQ_NONE IRQ_NONE
72 #define CAN_IRQ_HANDLED IRQ_HANDLED
73 #define CAN_IRQ_RETVAL IRQ_RETVAL
75 #define CAN_IRQ_HANDLER_ARGS(irq_number, dev_id) \
76 int irq_number, void *dev_id
77 #define can_synchronize_irq(irqnum) do {} while(0)
79 typedef unsigned long can_ioptr_t;
80 #define can_ioptr2ulong(ioaddr) ((unsigned long)(ioaddr))
81 #define can_ulong2ioptr(addr) ((unsigned long)(addr))
82 #define can_inb(ioaddr) inb(ioaddr)
83 #define can_outb(data,ioaddr) outb(data,ioaddr)
84 #define can_inw(ioaddr) inb(ioaddr)
85 #define can_outw(data,ioaddr) outb(data,ioaddr)
86 #define can_inl(ioaddr) inb(ioaddr)
87 #define can_outl(data,ioaddr) outb(data,ioaddr)
89 #define can_readb readb
90 #define can_writeb writeb
91 #define can_readw readw
92 #define can_writew writew
93 #define can_readl readl
94 #define can_writel writel
96 #define can_ioport2ioptr can_ulong2ioptr
98 #ifdef __HAVE_ARCH_CMPXCHG
99 #define CAN_HAVE_ARCH_CMPXCHG
102 #define can_spinlock_t long
103 #define can_spin_irqflags_t unsigned long
104 /* Only one CPU is supposed on sys-less embedded target => no need for spinloks */
105 #define can_spin_lock(lock) do { } while (0)
106 #define can_spin_unlock(lock) do { } while (0)
107 /* Only one CPU is supposed on sys-less embedded target => no need for spinloks */
108 #define can_spin_lock_irqsave(lock,flags) save_and_cli(flags)
109 #define can_spin_unlock_irqrestore(lock,flags) restore_flags(flags)
110 #define can_spin_lock_init can_splck_init
112 #define CAN_DEFINE_SPINLOCK(x) can_spinlock_t x = 0
115 void can_splck_init(can_spinlock_t *x)
120 #define can_preempt_disable() do { } while (0)
121 #define can_preempt_enable() do { } while (0)
124 #define can_enable_irq(var) do { } while (0)
125 #define can_disable_irq(var) do { } while (0)
127 #define can_enable_irq enable_irq
128 #define can_disable_irq disable_irq
131 #define can_printk printf
135 /// LINUX src: include/asm-arm/bitops.h
137 #define set_bit ____atomic_set_bit
138 #define clear_bit ____atomic_clear_bit
139 #define change_bit ____atomic_change_bit
140 #define test_and_set_bit ____atomic_test_and_set_bit
141 #define test_and_clear_bit ____atomic_test_and_clear_bit
142 #define test_and_change_bit ____atomic_test_and_change_bit
143 #define raw_local_irq_save(flags) save_and_cli(flags);
144 #define raw_local_irq_restore(flags) restore_flags(flags);
147 * These functions are the basis of our bit ops.
149 * First, the atomic bitops. These use native endian.
151 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
154 unsigned long mask = 1UL << (bit & 31);
158 raw_local_irq_save(flags);
160 raw_local_irq_restore(flags);
163 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
166 unsigned long mask = 1UL << (bit & 31);
170 raw_local_irq_save(flags);
172 raw_local_irq_restore(flags);
175 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
178 unsigned long mask = 1UL << (bit & 31);
182 raw_local_irq_save(flags);
184 raw_local_irq_restore(flags);
188 ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
192 unsigned long mask = 1UL << (bit & 31);
196 raw_local_irq_save(flags);
199 raw_local_irq_restore(flags);
205 ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
209 unsigned long mask = 1UL << (bit & 31);
213 raw_local_irq_save(flags);
216 raw_local_irq_restore(flags);
222 ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
226 unsigned long mask = 1UL << (bit & 31);
230 raw_local_irq_save(flags);
233 raw_local_irq_restore(flags);
238 /// LINUX src: include/asm-arm/atomic.h
240 #define atomic_read(v) ((v)->counter)
242 #define atomic_set(v,i) (((v)->counter) = (i))
244 static inline int atomic_add_return(int i, atomic_t *v)
249 raw_local_irq_save(flags);
251 v->counter = val += i;
252 raw_local_irq_restore(flags);
257 static inline int atomic_sub_return(int i, atomic_t *v)
262 raw_local_irq_save(flags);
264 v->counter = val -= i;
265 raw_local_irq_restore(flags);
270 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
275 raw_local_irq_save(flags);
279 raw_local_irq_restore(flags);
284 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
286 static inline int atomic_add_unless(atomic_t *v, int a, int u)
291 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
295 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
297 #define atomic_add(i, v) (void) atomic_add_return(i, v)
298 #define atomic_inc(v) (void) atomic_add_return(1, v)
299 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
300 #define atomic_dec(v) (void) atomic_sub_return(1, v)
302 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
303 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
304 #define atomic_inc_return(v) (atomic_add_return(1, v))
305 #define atomic_dec_return(v) (atomic_sub_return(1, v))
306 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
308 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
310 /* Atomic operations are already serializing on ARM */
311 #define smp_mb__before_atomic_dec() barrier()
312 #define smp_mb__after_atomic_dec() barrier()
313 #define smp_mb__before_atomic_inc() barrier()
314 #define smp_mb__after_atomic_inc() barrier()
316 /// LINUX src: include/linux/interrupt.h
318 #define ATOMIC_INIT(i) { (i) }
320 /* Tasklets --- multithreaded analogue of BHs.
322 Main feature differing them of generic softirqs: tasklet
323 is running only on one CPU simultaneously.
325 Main feature differing them of BHs: different tasklets
326 may be run simultaneously on different CPUs.
329 * If tasklet_schedule() is called, then tasklet is guaranteed
330 to be executed on some cpu at least once after this.
331 * If the tasklet is already scheduled, but its excecution is still not
332 started, it will be executed only once.
333 * If this tasklet is already running on another CPU (or schedule is called
334 from tasklet itself), it is rescheduled for later.
335 * Tasklet is strictly serialized wrt itself, but not
336 wrt another tasklets. If client needs some intertask synchronization,
337 he makes it with spinlocks.
340 struct tasklet_struct
342 struct tasklet_struct *next;
345 void (*func)(unsigned long);
349 #define DECLARE_TASKLET(name, func, data) \
350 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
352 #define DECLARE_TASKLET_DISABLED(name, func, data) \
353 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
355 /* CAN message timestamp source, it is called from interrupt context */
356 //#define can_gettimeofday do_gettimeofday
358 /// from linux/timer.h
360 struct tvec_t_base_s;
363 struct list_head entry;
364 unsigned long expires;
366 void (*function)(unsigned long);
369 struct tvec_t_base_s *base;
370 #ifdef CONFIG_TIMER_STATS
377 void udelay(long time);
379 #endif /*_CAN_SYSDEP_H*/