1 /* can_sysdep.h - hides differences between individual Linux kernel
2 * versions and RT extensions
3 * Linux CAN-bus device driver.
4 * Written by Pavel Pisa - OCERA team member
5 * email:pisa@cmp.felk.cvut.cz
6 * This software is released under the GPL-License.
7 * Version lincan-0.3 17 Jun 2004
17 #include <rtl_mutex.h>
18 #include <rtl_sched.h>
20 #endif /*CAN_WITH_RTL*/
25 // typedef unsigned long atomic_t;
26 typedef struct { volatile int counter; } atomic_t;
28 #define mb() __memory_barrier()
31 * container_of - cast a member of a structure out to the containing structure
32 * @ptr: the pointer to the member.
33 * @type: the type of the container struct this is embedded in.
34 * @member: the name of the member within the struct.
37 #define container_of(ptr, type, member) ({ \
38 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
39 (type *)( (char *)__mptr - offsetof(type,member) );})
42 /*#define __NO_VERSION__*/
43 /*#include <linux/module.h>*/
45 // #include <linux/version.h>
46 // #include <linux/wait.h>
47 // #include <linux/list.h>
48 // #include <linux/fs.h>
49 // #include <linux/ioport.h>
50 // #include <linux/delay.h>
51 // #include <linux/sched.h>
52 // #include <linux/interrupt.h>
53 // #include <asm/errno.h>
55 // #include <asm/io.h>
56 // #include <asm/atomic.h>
57 // #include <asm/irq.h>
58 // #include <asm/uaccess.h>
60 // #include "lincan_config.h"
63 // #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0))
64 // #define CAN_ENABLE_KERN_FASYNC
66 // #define CAN_ENABLE_PCI_SUPPORT
68 // #ifdef CONFIG_OC_LINCANVME
69 // #define CAN_ENABLE_VME_SUPPORT
73 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
74 // #include <linux/malloc.h>
76 // #include <linux/slab.h>
79 // #ifdef CAN_ENABLE_PCI_SUPPORT
80 // #include "linux/pci.h"
81 // #endif /*CAN_ENABLE_PCI_SUPPORT*/
83 /* Next is not sctrictly correct, because of 2.3.0, 2.3.1, 2.3.2
84 kernels need next definitions too */
85 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,2,19)) /* may need correction */
86 #define wait_queue_head_t struct wait_queue *
87 #define wait_queue_t struct wait_queue
88 #define init_waitqueue_head(queue_head) (*queue_head=NULL)
89 #define init_waitqueue_entry(qentry,qtask) \
90 (qentry->next=NULL,qentry->task=qtask)
91 #define DECLARE_WAIT_QUEUE_HEAD(name) \
92 struct wait_queue * name=NULL
93 #define DECLARE_WAITQUEUE(wait, current) \
94 struct wait_queue wait = { current, NULL }
95 // #define init_MUTEX(sem) (*sem=MUTEX)
96 // #define DECLARE_MUTEX(name) struct semaphore name=MUTEX
97 // #endif /* 2.2.19 */
99 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)) && !defined(DECLARE_TASKLET)
100 // #define tasklet_struct tq_struct
101 /* #define DECLARE_TASKLET(_name, _func, _data) \
102 struct tq_struct _name = { sync: 0, routine: _func, data: (void*)_data }*/
104 // /* void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); */
105 /* #define tasklet_init(_tasklet, _func, _data) \
107 (_tasklet)->sync=0; \
108 (_tasklet)->routine=_func; \
109 (_tasklet)->data=(void*)_data; \
112 // /* void tasklet_schedule(struct tasklet_struct *t) */
113 /* #define tasklet_schedule(_tasklet) \
115 queue_task(_tasklet,&tq_immediate); \
116 mark_bh(IMMEDIATE_BH); \
119 // /* void tasklet_kill(struct tasklet_struct *t); */
120 /* #define tasklet_kill(_tasklet) \
123 // #endif /* 2.4.0 */
126 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,7)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
129 (MINOR(file->f_dentry->d_inode->i_rdev))
131 // #else /* Linux kernel < 2.5.7 or >= 2.6.0 */
134 (minor(file->f_dentry->d_inode->i_rdev))*/
136 // #endif /* Linux kernel < 2.5.7 or >= 2.6.0 */
138 // #ifndef CAN_WITH_RTL
139 // #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,68)) && !defined(IRQ_RETVAL))
140 typedef void can_irqreturn_t;
142 #define CAN_IRQ_HANDLED
143 #define CAN_IRQ_RETVAL(x)
144 // #else /* <=2.5.67 */
145 // typedef irqreturn_t can_irqreturn_t;
146 // #define CAN_IRQ_NONE IRQ_NONE
147 // #define CAN_IRQ_HANDLED IRQ_HANDLED
148 // #define CAN_IRQ_RETVAL IRQ_RETVAL
149 // #endif /* <=2.5.67 */
150 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
151 /* #define CAN_IRQ_HANDLER_ARGS(irq_number, dev_id) \
152 int irq_number, void *dev_id, struct pt_regs *regs*/
153 // #else /* < 2.6.19 */
154 #define CAN_IRQ_HANDLER_ARGS(irq_number, dev_id) \
155 int irq_number, void *dev_id
156 // #endif /* < 2.6.19 */
157 // #else /*CAN_WITH_RTL*/
158 // typedef int can_irqreturn_t;
159 // #define CAN_IRQ_NONE 0
160 // #define CAN_IRQ_HANDLED 1
161 // #define CAN_IRQ_RETVAL(x) ((x) != 0)
162 /* #define CAN_IRQ_HANDLER_ARGS(irq_number, dev_id) \
163 int irq_number, void *dev_id, struct pt_regs *regs*/
164 // #endif /*CAN_WITH_RTL*/
166 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,33))
167 // #define can_synchronize_irq(irqnum) synchronize_irq()
168 // #else /* >=2.5.33 */
169 #define can_synchronize_irq synchronize_irq
172 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
173 // #define del_timer_sync del_timer
174 // #endif /* <2.4.0 */
176 // #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
177 typedef unsigned long can_ioptr_t;
178 #define can_ioptr2ulong(ioaddr) ((unsigned long)(ioaddr))
179 #define can_ulong2ioptr(addr) ((unsigned long)(addr))
180 #define can_inb(ioaddr) inb(ioaddr)
181 #define can_outb(data,ioaddr) outb(data,ioaddr)
182 #define can_inw(ioaddr) inb(ioaddr)
183 #define can_outw(data,ioaddr) outb(data,ioaddr)
184 #define can_inl(ioaddr) inb(ioaddr)
185 #define can_outl(data,ioaddr) outb(data,ioaddr)
186 // #else /* >=2.6.9 */
187 // typedef void __iomem * can_ioptr_t;
188 // #define can_ioptr2ulong(ioaddr) ((unsigned long __force)(ioaddr))
189 // #define can_ulong2ioptr(addr) ((can_ioptr_t)(addr))
190 // #define can_inb(ioaddr) inb(can_ioptr2ulong(ioaddr))
191 // #define can_outb(data,ioaddr) outb(data,can_ioptr2ulong(ioaddr))
192 // #define can_inw(ioaddr) inb(can_ioptr2ulong(ioaddr))
193 // #define can_outw(data,ioaddr) outb(data,can_ioptr2ulong(ioaddr))
194 // #define can_inl(ioaddr) inb(can_ioptr2ulong(ioaddr))
195 // #define can_outl(data,ioaddr) outb(data,can_ioptr2ulong(ioaddr))
198 #define can_readb readb
199 #define can_writeb writeb
200 #define can_readw readw
201 #define can_writew writew
202 #define can_readl readl
203 #define can_writel writel
205 #define can_ioport2ioptr can_ulong2ioptr
207 #ifdef __HAVE_ARCH_CMPXCHG
208 #define CAN_HAVE_ARCH_CMPXCHG
211 // #ifndef CAN_WITH_RTL
212 /* Standard LINUX kernel */
214 #define can_spinlock_t long
215 #define can_spin_irqflags_t unsigned long
216 #define can_spin_lock(lock) cli()
217 #define can_spin_unlock(lock) sti()
218 #define can_spin_lock_irqsave(lock,flags) save_and_cli(flags)
219 #define can_spin_unlock_irqrestore(lock,flags) restore_flags(flags)
220 #define can_spin_lock_init can_splck_init
222 #define CAN_DEFINE_SPINLOCK(x) can_spinlock_t x = 0
225 void can_splck_init(can_spinlock_t *x)
230 // #if !defined(CONFIG_PREEMPT_RT) && ( defined(CONFIG_PREEMPT) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) )
231 // #define can_preempt_disable preempt_disable
232 // #define can_preempt_enable preempt_enable
233 // #else /*CONFIG_PREEMPT*/
234 #define can_preempt_disable() do { } while (0)
235 #define can_preempt_enable() do { } while (0)
236 // #endif /*CONFIG_PREEMPT*/
238 // #define can_enable_irq sti()
239 // #define can_disable_irq cli()
240 #define can_enable_irq(var) (var=1)
241 #define can_disable_irq(var) (var=0)
243 #define can_printk printf
247 /// LINUX src: include/asm-arm/bitops.h
249 #define set_bit ____atomic_set_bit
250 #define clear_bit ____atomic_clear_bit
251 #define change_bit ____atomic_change_bit
252 #define test_and_set_bit ____atomic_test_and_set_bit
253 #define test_and_clear_bit ____atomic_test_and_clear_bit
254 #define test_and_change_bit ____atomic_test_and_change_bit
255 #define raw_local_irq_save(flags) save_and_cli(flags);
256 #define raw_local_irq_restore(flags) restore_flags(flags);
258 * These functions are the basis of our bit ops.
260 * First, the atomic bitops. These use native endian.
262 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
265 unsigned long mask = 1UL << (bit & 31);
269 raw_local_irq_save(flags);
271 raw_local_irq_restore(flags);
274 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
277 unsigned long mask = 1UL << (bit & 31);
281 raw_local_irq_save(flags);
283 raw_local_irq_restore(flags);
286 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
289 unsigned long mask = 1UL << (bit & 31);
293 raw_local_irq_save(flags);
295 raw_local_irq_restore(flags);
299 ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
303 unsigned long mask = 1UL << (bit & 31);
307 raw_local_irq_save(flags);
310 raw_local_irq_restore(flags);
316 ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
320 unsigned long mask = 1UL << (bit & 31);
324 raw_local_irq_save(flags);
327 raw_local_irq_restore(flags);
333 ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
337 unsigned long mask = 1UL << (bit & 31);
341 raw_local_irq_save(flags);
344 raw_local_irq_restore(flags);
349 /// LINUX src: include/asm-arm/atomic.h
351 #define atomic_read(v) ((v)->counter)
353 #define atomic_set(v,i) (((v)->counter) = (i))
355 static inline int atomic_add_return(int i, atomic_t *v)
360 raw_local_irq_save(flags);
362 v->counter = val += i;
363 raw_local_irq_restore(flags);
368 static inline int atomic_sub_return(int i, atomic_t *v)
373 raw_local_irq_save(flags);
375 v->counter = val -= i;
376 raw_local_irq_restore(flags);
381 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
386 raw_local_irq_save(flags);
390 raw_local_irq_restore(flags);
395 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
397 static inline int atomic_add_unless(atomic_t *v, int a, int u)
402 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
406 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
408 #define atomic_add(i, v) (void) atomic_add_return(i, v)
409 #define atomic_inc(v) (void) atomic_add_return(1, v)
410 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
411 #define atomic_dec(v) (void) atomic_sub_return(1, v)
413 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
414 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
415 #define atomic_inc_return(v) (atomic_add_return(1, v))
416 #define atomic_dec_return(v) (atomic_sub_return(1, v))
417 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
419 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
421 /* Atomic operations are already serializing on ARM */
422 #define smp_mb__before_atomic_dec() barrier()
423 #define smp_mb__after_atomic_dec() barrier()
424 #define smp_mb__before_atomic_inc() barrier()
425 #define smp_mb__after_atomic_inc() barrier()
427 /// LINUX src: include/linux/interrupt.h
429 #define ATOMIC_INIT(i) { (i) }
431 /* Tasklets --- multithreaded analogue of BHs.
433 Main feature differing them of generic softirqs: tasklet
434 is running only on one CPU simultaneously.
436 Main feature differing them of BHs: different tasklets
437 may be run simultaneously on different CPUs.
440 * If tasklet_schedule() is called, then tasklet is guaranteed
441 to be executed on some cpu at least once after this.
442 * If the tasklet is already scheduled, but its excecution is still not
443 started, it will be executed only once.
444 * If this tasklet is already running on another CPU (or schedule is called
445 from tasklet itself), it is rescheduled for later.
446 * Tasklet is strictly serialized wrt itself, but not
447 wrt another tasklets. If client needs some intertask synchronization,
448 he makes it with spinlocks.
451 struct tasklet_struct
453 struct tasklet_struct *next;
456 void (*func)(unsigned long);
460 #define DECLARE_TASKLET(name, func, data) \
461 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
463 #define DECLARE_TASKLET_DISABLED(name, func, data) \
464 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
466 /* CAN message timestamp source, it is called from interrupt context */
467 //#define can_gettimeofday do_gettimeofday
469 /// from linux/timer.h
471 struct tvec_t_base_s;
474 struct list_head entry;
475 unsigned long expires;
477 void (*function)(unsigned long);
480 struct tvec_t_base_s *base;
481 #ifdef CONFIG_TIMER_STATS
488 static inline void udelay(long time)
490 volatile long ticks=(time * CCLK) / 2000000;
497 // #else /*CAN_WITH_RTL*/
499 // #define can_spinlock_t long
500 // #define can_spin_irqflags_t unsigned long
501 // #define can_spin_lock save_and_cli
502 // #define can_spin_unlock restore_flags
503 // #define can_spin_lock_irqsave save_and_cli
504 // #define can_spin_unlock_irqrestore restore_flags
505 // #define can_spin_lock_init can_splck_init
507 // #define CAN_DEFINE_SPINLOCK(x) can_spinlock_t x = 0
509 // #define can_preempt_disable() do { } while (0)
510 // #define can_preempt_enable() do { } while (0)
512 // #define can_enable_irq sti
513 // #define can_disable_irq cli
515 // #define can_printk rtl_printf
518 // * terrible hack to test rtl_file private_data concept, ugh !!!
519 // * this would result in crash on architectures, where
520 // * sizeof(int) < sizeof(void *)
522 // #define can_set_rtl_file_private_data(fptr, p) do{ fptr->f_minor=(long)(p); } while(0)
523 // #define can_get_rtl_file_private_data(fptr) ((void*)((fptr)->f_minor))
525 // extern can_spinlock_t can_irq_manipulation_lock;
527 // /* CAN message timestamp source, it is called from interrupt context */
528 /*#define can_gettimeofday(ptr) do {\
529 struct timespec temp_timespec;\
530 clock_gettime(CLOCK_REALTIME,&temp_timespec);\
531 ptr->tv_usec=temp_timespec.tv_nsec/1000;\
532 ptr->tv_sec=temp_timespec.tv_sec;\
535 // #endif /*CAN_WITH_RTL*/
537 #endif /*_CAN_SYSDEP_H*/