1 /*******************************************************************
2 Components for embedded applications builded for
3 laboratory and medical instruments firmware
5 cpu_def.h - low level CPU support for C programs
6 atomic bit operations, interrupts and exceptions
8 Copyright (C) 2001 by Pavel Pisa pisa@cmp.felk.cvut.cz
9 (C) 2002 by PiKRON Ltd. http://www.pikron.com
11 Functions names and concept inspired by Linux kernel
13 *******************************************************************/
15 #ifndef _H8S_CPU_DEF_H
16 #define _H8S_CPU_DEF_H
18 /* atomic access routines */
20 /* There should be possible to generate more optimized code
21 if %0 changed to %U0 and "r" to "rn", but GCC nor assembler
22 wants to switch to aa:8 or aaaa:16 instruction version */
24 #define __CONST_DATA_XOP_BIT(__nr,__pb,__aln) \
26 __asm__ __volatile__ ( __aln " %1,%0 /*mybit 1*/\n" : \
27 "+m" (*__pb) : "i" (__nr), "r" (__pb)); \
30 #define __xop_bit(nr,v,__aln) \
31 ({ volatile __typeof(*v) *__pv =(__typeof(*v) *)(v); \
32 unsigned short __nr=(nr); \
33 volatile char *__pb=(char*)__pv; \
34 __pb+=sizeof(*__pv)-1-(__nr)/8; \
36 if(__builtin_constant_p(__nr)) \
38 __CONST_DATA_XOP_BIT(0,__pb,__aln); \
39 __CONST_DATA_XOP_BIT(1,__pb,__aln); \
40 __CONST_DATA_XOP_BIT(2,__pb,__aln); \
41 __CONST_DATA_XOP_BIT(3,__pb,__aln); \
42 __CONST_DATA_XOP_BIT(4,__pb,__aln); \
43 __CONST_DATA_XOP_BIT(5,__pb,__aln); \
44 __CONST_DATA_XOP_BIT(6,__pb,__aln); \
45 __CONST_DATA_XOP_BIT(7,__pb,__aln); \
48 __asm__ __volatile__ ( __aln " %w1,%0 /*mybit 2*/\n" : \
49 "+m" (*__pb) : "r" (__nr), "r" (__pb)); \
52 #define set_bit(nr,v) (__xop_bit((nr),(v),"bset"))
54 #define clear_bit(nr,v) (__xop_bit((nr),(v),"bclr"))
56 #define __xcase_xop_mask_b1(mask,v,__aln,__aconaddr,__acondata) \
57 ({ volatile char *__pv=(char*)(v); \
58 unsigned __mask=(mask); \
59 if(__mask&0x0001) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (0), "0" (*__pv)); \
60 if(__mask&0x0002) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (1), "0" (*__pv)); \
61 if(__mask&0x0004) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (2), "0" (*__pv)); \
62 if(__mask&0x0008) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (3), "0" (*__pv)); \
63 if(__mask&0x0010) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (4), "0" (*__pv)); \
64 if(__mask&0x0020) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (5), "0" (*__pv)); \
65 if(__mask&0x0040) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (6), "0" (*__pv)); \
66 if(__mask&0x0080) __asm__ __volatile__(__aln " /*mymask b1*/\n": "=U" (*__pv) : __acondata (7), "0" (*__pv)); \
70 #define __constant_atomic_clear_mask_b1(mask, v) \
71 __xcase_xop_mask_b1(mask,v,"bclr %1,%0\n","i","n")
73 #define __generic_atomic_clear_mask_b1(mask, v) \
74 __xcase_xop_mask_b1(mask,v,"bclr %1,%0\n","r","n")
77 #define atomic_clear_mask_b1(mask, v) \
78 ( __builtin_constant_p(v) ? \
79 __constant_atomic_clear_mask_b1(mask, v) : \
80 __generic_atomic_clear_mask_b1(mask, v))
82 #define __constant_atomic_set_mask_b1(mask, v) \
83 __xcase_xop_mask_b1(mask,v,"bset %1,%0\n","i","n")
85 #define __generic_atomic_set_mask_b1(mask, v) \
86 __xcase_xop_mask_b1(mask,v,"bset %1,%0\n","r","n")
88 #define atomic_set_mask_b1(mask, v) \
89 ( __builtin_constant_p(v) ?\
90 __constant_atomic_set_mask_b1(mask, v) : \
91 __generic_atomic_set_mask_b1(mask, v))
94 #define __xcase_xop_mask_w1(mask,v,__aln,__aconaddr,__acondata) \
95 ({ volatile char *__pv; \
96 unsigned __mask=(mask); \
97 if(__mask&0x0001) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (0), "0" (*__pv)); } \
98 if(__mask&0x0002) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (1), "0" (*__pv)); } \
99 if(__mask&0x0004) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (2), "0" (*__pv)); } \
100 if(__mask&0x0008) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (3), "0" (*__pv)); } \
101 if(__mask&0x0010) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (4), "0" (*__pv)); } \
102 if(__mask&0x0020) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (5), "0" (*__pv)); } \
103 if(__mask&0x0040) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (6), "0" (*__pv)); } \
104 if(__mask&0x0080) { __pv=(char*)(v)+1; __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (7), "0" (*__pv)); } \
105 if(__mask&0x0100) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (0), "0" (*__pv)); } \
106 if(__mask&0x0200) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (1), "0" (*__pv)); } \
107 if(__mask&0x0400) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (2), "0" (*__pv)); } \
108 if(__mask&0x0800) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (3), "0" (*__pv)); } \
109 if(__mask&0x1000) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (4), "0" (*__pv)); } \
110 if(__mask&0x2000) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (5), "0" (*__pv)); } \
111 if(__mask&0x4000) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (6), "0" (*__pv)); } \
112 if(__mask&0x8000) { __pv=(char*)(v); __asm__ __volatile__(__aln " /*mymask w1*/\n": "=U" (*__pv) : __acondata (7), "0" (*__pv)); } \
115 #define __constant_atomic_clear_mask_w1(mask, v) \
116 __xcase_xop_mask_w1(mask,v,"bclr %1,%0\n","i","n")
118 #define __generic_atomic_clear_mask_w1(mask, v) \
119 __xcase_xop_mask_w1(mask,v,"bclr %1,%0\n","r","n")
122 #define atomic_clear_mask_w1(mask, v) \
123 ( __builtin_constant_p(v) ? \
124 __constant_atomic_clear_mask_w1(mask, v) : \
125 __generic_atomic_clear_mask_w1(mask, v))
127 #define __constant_atomic_set_mask_w1(mask, v) \
128 __xcase_xop_mask_w1(mask,v,"bset %1,%0\n","i","n")
130 #define __generic_atomic_set_mask_w1(mask, v) \
131 __xcase_xop_mask_w1(mask,v,"bset %1,%0\n","r","nP")
133 #define atomic_set_mask_w1(mask, v) \
134 ( __builtin_constant_p(v) ?\
135 __constant_atomic_set_mask_w1(mask, v) : \
136 __generic_atomic_set_mask_w1(mask, v))
140 #define atomic_clear_mask(mask, v) \
141 __asm__ __volatile__("and.l %1,%0" : "=m" (*(v)) : "id" (~(mask)),"0"(*(v)))
143 #define atomic_set_mask(mask, v) \
144 __asm__ __volatile__("or.l %1,%0" : "=m" (*(v)) : "id" (mask),"0"(*(v)))
146 #define atomic_clear_mask_w(mask, v) \
147 __asm__ __volatile__("and.w %1,%0" : "=m" (*(v)) : "id" (~(mask)),"0"(*(v)))
149 #define atomic_set_mask_w(mask, v) \
150 __asm__ __volatile__("or.w %1,%0" : "=m" (*(v)) : "id" (mask),"0"(*(v)))
152 #define atomic_clear_mask_b(mask, v) \
153 __asm__ __volatile__("and.b %1,%0" : "=m" (*(v)) : "id" (~(mask)),"0"(*(v)))
155 #define atomic_set_mask_b(mask, v) \
156 __asm__ __volatile__("or.b %1,%0" : "=m" (*(v)) : "id" (mask),"0"(*(v)))
160 /* Port access routines */
162 #define readb(addr) \
163 ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
164 #define readw(addr) \
165 ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
166 #define readl(addr) \
167 ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
169 #define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
170 #define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
171 #define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
173 /* Arithmetic functions */
175 #define sat_add_slsl(__x,__y) \
176 __asm__ (" add.l %2,%0\n" \
179 " mov.l #0x7fffffff:32,%0\n" \
181 "1: mov.l #0x80000000:32,%0\n" \
184 : "0" ((long)__x), "r" ((long)__y) : "cc"); \
186 #define sat_sub_slsl(__x,__y) \
187 __asm__ (" sub.l %2,%0\n" \
190 " mov.l #0x7fffffff:32,%0\n" \
192 "1: mov.l #0x80000000:32,%0\n" \
195 : "0" ((long)__x), "r" ((long)__y) : "cc"); \
197 #define div_us_ulus(__x,__y) \
199 unsigned long __z=(__x); \
200 __asm__ ("divxu.w %2,%0": "=r"(__z) \
201 : "0" (__z), "r" ((unsigned short)(__y)) : "cc"); \
202 (unsigned short)__z; \
205 #define div_ss_slss(__x,__y) \
207 unsigned long __z=(__x); \
208 __asm__ ("divxs.w %2,%0": "=r"(__z) \
209 : "0" (__z), "r" ((unsigned short)(__y)) : "cc"); \
210 (unsigned short)__z; \
213 #define muldiv_us(__x,__y,__z) \
214 div_ss_slss((long)(__x)*(__y),__z)
216 #define muldiv_ss(__x,__y,__z) \
217 div_us_ulus((unsigned long)(__x)*(__y),__z)
219 /* Power down modes support */
221 #define __cpu_sleep() __asm__ __volatile__ ("sleep": : : "memory")
223 /* IRQ handling code */
225 //#define _USE_EXR_LEVELS 1
227 #ifdef _USE_EXR_LEVELS
229 #define __sti() __asm__ __volatile__ ("andc #0xf8,exr": : : "memory")
231 #define __cli() __asm__ __volatile__ ("orc #0x07,exr": : : "memory")
233 #define __save_flags(x) \
235 unsigned short __exr; \
236 __asm__ __volatile__("stc exr,%0":"=m" (__exr) : :"memory"); \
240 #define __restore_flags(x) \
242 unsigned short __exr=(x); \
243 __asm__ __volatile__("ldc %0,exr": :"m" (__exr) :"memory"); \
247 #else /* _USE_EXR_LEVELS */
249 #define __sti() __asm__ __volatile__ ("andc #0x7f,ccr": : : "memory")
251 #define __cli() __asm__ __volatile__ ("orc #0x80,ccr": : : "memory")
253 #define __save_flags(x) \
255 unsigned short __ccr; \
256 __asm__ __volatile__("stc ccr,%0":"=m" (__ccr) : :"memory"); \
260 #define __restore_flags(x) \
262 unsigned short __ccr=(x); \
263 __asm__ __volatile__("ldc %0,ccr": :"m" (__ccr) :"cc","memory"); \
266 #endif /* _USE_EXR_LEVELS */
268 #define __get_vbr(x) 0
270 #define __get_sp(x) __asm__ __volatile__("mov.l sp,%0":"=r" (x) : :"cc")
272 #define __memory_barrier() \
273 __asm__ __volatile__("": : : "memory")
275 #define cli() __cli()
276 #define sti() __sti()
278 #define save_flags(x) __save_flags(x)
279 #define restore_flags(x) __restore_flags(x)
280 #define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
284 /* this struct defines the way the registers are stored on the
285 stack during a system call. */
304 unsigned vector : 12;
326 unsigned vector : 12;
330 typedef struct irq_handler {
331 void (*handler)(int, void *, struct pt_regs *);
335 struct irq_handler *next;
338 irq_handler_t *irq_array[NR_IRQS];
339 void *irq_vec[NR_IRQS];
341 int add_irq_handler(int vectno,irq_handler_t *handler);
344 void *excptvec_get(int vectnum);
346 void *excptvec_set(int vectnum,void *vect);
348 int excptvec_initfill(void *fill_vect, int force_all);
350 #define __val2mfld(mask,val) (((mask)&~((mask)<<1))*(val)&(mask))
351 #define __mfld2val(mask,val) (((val)&(mask))/((mask)&~((mask)<<1)))
353 #endif /* _H8S_CPU_DEF_H */