--- /dev/null
+/*******************************************************************
+ Components for embedded applications builded for
+ laboratory and medical instruments firmware
+
+ cpu_def.h - low level CPU support for C programs
+ atomic bit operations, interrupts and exceptions
+
+ Copyright (C) 2001 by Pavel Pisa pisa@cmp.felk.cvut.cz
+ (C) 2002 by PiKRON Ltd. http://www.pikron.com
+
+ Functions names and concept inspired by Linux kernel
+
+ *******************************************************************/
+
+#ifndef _MSP430_CPU_DEF_H
+#define _MSP430_CPU_DEF_H
+
+/* atomic access routines */
+
+extern unsigned int __generic_bit_op_table_0_15[16];
+
+#define __constant_set_bit(nr,v) \
+ ({ __asm__ __volatile__ ("bis.w %1,%0" : "=m" (*(v)) : "i" (1<<(nr)),"0"(*(v))); })
+
+#define __generic_set_bit(nr,v) \
+ ({ __asm__ __volatile__ ("bis.w %1,%0" : "=m" (*(v)) : "g" (__generic_bit_op_table_0_15[(nr)]),"0"(*(v))); })
+
+#define set_bit(nr,v) \
+ (__builtin_constant_p(nr) ? \
+ __constant_set_bit(nr, v) : \
+ __generic_set_bit(nr, v))
+
+#define __constant_clear_bit(nr,v) \
+ ({ __asm__ __volatile__ ("bic.w %1,%0" : "=m" (*(v)) : "i" (1<<(nr)),"0"(*(v))); })
+
+#define __generic_clear_bit(nr,v) \
+ ({ __asm__ __volatile__ ("bic.w %1,%0" : "=m" (*(v)) : "g" (__generic_bit_op_table_0_15[(nr)]),"0"(*(v))); })
+
+#define clear_bit(nr,v) \
+ (__builtin_constant_p(nr) ? \
+ __constant_clear_bit(nr, v) : \
+ __generic_clear_bit(nr, v))
+
+
+#define atomic_clear_mask_w(mask, v) \
+ __asm__ __volatile__ ("bic.w %1,%0" : "=m" (*(v)) : "ig" (mask),"0"(*(v)))
+
+#define atomic_set_mask_w(mask, v) \
+ __asm__ __volatile__ ("bis.w %1,%0" : "=m" (*(v)) : "ig" (mask),"0"(*(v)))
+
+#define atomic_clear_mask_b(mask, v) \
+ __asm__ __volatile__ ("bic.b %1,%0" : "=m" (*(v)) : "ig" (mask),"0"(*(v)))
+
+#define atomic_set_mask_b(mask, v) \
+ __asm__ __volatile__ ("bis.b %1,%0" : "=m" (*(v)) : "ig" (mask),"0"(*(v)))
+
+#define atomic_clear_mask atomic_clear_mask_w
+#define atomic_set_mask atomic_set_mask_w
+#define atomic_clear_mask_w1 atomic_clear_mask_w
+#define atomic_set_mask_w1 atomic_set_mask_w
+#define atomic_clear_mask_b1 atomic_clear_mask_b
+#define atomic_set_mask_b1 atomic_set_mask_b
+
+#if 0
+long
+atomic_read_long(volatile long *p)
+{
+ unsigned short l, h;
+ volatile unsigned short *s=(volatile unsigned short *)p;
+
+ do{
+ h=s[1];
+ l=s[0];
+ }while(h!=s[1]);
+
+ return l|((long)h<<16);
+}
+#else
+static inline long
+atomic_read_long(volatile long *p)
+{
+ long l;
+
+ __asm__ (
+"1: mov.w 2(%[p]),%B[l]\n"
+" mov.w @%[p],%A[l]\n"
+" cmp.w 2(%[p]),%B[l]\n"
+" jne 1b\n"
+ : [l] "=&r" (l) : [p] "r" (p) : "cc"
+ );
+
+ return l;
+}
+#endif
+
+/* Port access routines */
+
+#define readb(addr) \
+ ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
+#define readw(addr) \
+ ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
+#define readl(addr) \
+ ({ unsigned long __v = (*(volatile unsigned long *) (addr)); __v; })
+
+#define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
+#define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
+#define writel(b,addr) (void)((*(volatile unsigned long *) (addr)) = (b))
+
+
+/* Arithmetic functions */
+
+#define sat_add_slsl(__x,__y) \
+ __asm__ (" add.w %A2,%A0 /*sat_add_slsl*/\n" \
+ " addc.w %B2,%B0\n" \
+ " jl 2f\n" \
+ " jn 1f\n" \
+ " jmp 3f\n" \
+ "1: mov.w #llo(0x7fffffff),%A0\n" \
+ " mov.w #lhi(0x7fffffff),%B0\n" \
+ " jmp 3f\n" \
+ "2: jn 3f\n" \
+ " mov.l #llo(0x80000000),%A0\n" \
+ " mov.l #lhi(0x80000000),%B0\n" \
+ "3: \n" \
+ : "=g"(__x) \
+ : "0" ((long)(__x)), "g" ((long)(__y)) : "cc"); \
+
+#define sat_sub_slsl(__x,__y) \
+ __asm__ (" sub.w %A2,%A0 /*sat_sub_slsl*/\n" \
+ " subc.w %B2,%B0\n" \
+ " jl 2f\n" \
+ " jn 1f\n" \
+ " jmp 3f\n" \
+ "1: mov.w #llo(0x7fffffff),%A0\n" \
+ " mov.w #lhi(0x7fffffff),%B0\n" \
+ " jmp 3f\n" \
+ "2: jn 3f\n" \
+ " mov.l #llo(0x80000000),%A0\n" \
+ " mov.l #lhi(0x80000000),%B0\n" \
+ "3: \n" \
+ : "=g"(__x) \
+ : "0" ((long)(__x)), "g" ((long)(__y)) : "cc"); \
+
+#define sat_add_ssss(__x,__y) \
+ __asm__ (" add.w %2,%0 /*sat_add_ssss*/\n" \
+ " jl 2f\n" \
+ " jn 1f\n" \
+ " jmp 3f\n" \
+ "1: mov.w #llo(0x7fff),%0\n" \
+ " jmp 3f\n" \
+ "2: jn 3f\n" \
+ " mov.l #llo(0x8000),%0\n" \
+ "3: \n" \
+ : "=g"(__x) \
+ : "0" ((short)(__x)), "g" ((short)(__y)) : "cc"); \
+
+#define sat_sub_ssss(__x,__y) \
+ __asm__ (" sub.w %2,%0 /*sat_sub_ssss*/\n" \
+ " jl 2f\n" \
+ " jn 1f\n" \
+ " jmp 3f\n" \
+ "1: mov.w #llo(0x7fff),%0\n" \
+ " jmp 3f\n" \
+ "2: jn 3f\n" \
+ " mov.l #llo(0x8000),%0\n" \
+ "3: \n" \
+ : "=g"(__x) \
+ : "0" ((short)(__x)), "g" ((short)(__y)) : "cc"); \
+
+
+
+
+#define __memory_barrier() \
+__asm__ __volatile__("": : : "memory")
+
+
+
+
+
+#if 0
+
+#define div_us_ulus(__x,__y) \
+ ({ \
+ unsigned long __z=(__x); \
+ __asm__ ("divxu.w %2,%0": "=r"(__z) \
+ : "0" (__z), "r" ((unsigned short)(__y)) : "cc"); \
+ (unsigned short)__z; \
+ })
+
+#define div_ss_slss(__x,__y) \
+ ({ \
+ unsigned long __z=(__x); \
+ __asm__ ("divxs.w %2,%0": "=r"(__z) \
+ : "0" (__z), "r" ((unsigned short)(__y)) : "cc"); \
+ (unsigned short)__z; \
+ })
+
+#define muldiv_us(__x,__y,__z) \
+ div_ss_slss((long)(__x)*(__y),__z)
+
+#define muldiv_ss(__x,__y,__z) \
+ div_us_ulus((unsigned long)(__x)*(__y),__z)
+
+/* Power down modes support */
+
+#define __cpu_sleep() __asm__ __volatile__ ("sleep": : : "memory")
+
+/* IRQ handling code */
+
+//#define _USE_EXR_LEVELS 1
+
+#ifdef _USE_EXR_LEVELS
+
+#define __sti() __asm__ __volatile__ ("andc #0xf8,exr": : : "memory")
+
+#define __cli() __asm__ __volatile__ ("orc #0x07,exr": : : "memory")
+
+#define __save_flags(x) \
+ do{ \
+ unsigned short __exr; \
+ __asm__ __volatile__("stc exr,%0":"=m" (__exr) : :"memory"); \
+ (x)=__exr; \
+ }while(0)
+
+#define __restore_flags(x) \
+ do{ \
+ unsigned short __exr=(x); \
+ __asm__ __volatile__("ldc %0,exr": :"m" (__exr) :"memory"); \
+ }while(0)
+
+
+#else /* _USE_EXR_LEVELS */
+
+#define __sti() __asm__ __volatile__ ("andc #0x7f,ccr": : : "memory")
+
+#define __cli() __asm__ __volatile__ ("orc #0x80,ccr": : : "memory")
+
+#define __save_flags(x) \
+ do{ \
+ unsigned short __ccr; \
+ __asm__ __volatile__("stc ccr,%0":"=m" (__ccr) : :"memory"); \
+ (x)=__ccr; \
+ }while(0)
+
+#define __restore_flags(x) \
+ do{ \
+ unsigned short __ccr=(x); \
+ __asm__ __volatile__("ldc %0,ccr": :"m" (__ccr) :"cc","memory"); \
+ }while(0)
+
+#endif /* _USE_EXR_LEVELS */
+
+#define __get_vbr(x) 0
+
+#define __get_sp(x) __asm__ __volatile__("mov.l sp,%0":"=r" (x) : :"cc")
+
+#define __memory_barrier() \
+__asm__ __volatile__("": : : "memory")
+
+#define cli() __cli()
+#define sti() __sti()
+
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
+
+#define NR_IRQS 256
+
+#endif
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+/*
+
+#if 0
+struct pt_regs {
+ long d1;
+ long d2;
+ long d3;
+ long d4;
+ long d5;
+ long a0;
+ long a1;
+ long a2;
+ long d0;
+ long orig_d0;
+ unsigned short sr;
+ unsigned long pc;
+ unsigned format : 4;
+ unsigned vector : 12;
+};
+#else
+struct pt_regs {
+ long d0;
+ long d1;
+ long d2;
+ long d3;
+ long d4;
+ long d5;
+ long d6;
+ long d7;
+ long a0;
+ long a1;
+ long a2;
+ long a3;
+ long a4;
+ long a5;
+ long a6;
+ unsigned short sr;
+ unsigned long pc;
+ unsigned format : 4;
+ unsigned vector : 12;
+};
+#endif
+
+typedef struct irq_handler {
+ void (*handler)(int, void *, struct pt_regs *);
+ unsigned long flags;
+ void *dev_id;
+ const char *devname;
+ struct irq_handler *next;
+} irq_handler_t;
+
+irq_handler_t *irq_array[NR_IRQS];
+void *irq_vec[NR_IRQS];
+
+int add_irq_handler(int vectno,irq_handler_t *handler);
+*/
+
+/*
+void *excptvec_get(int vectnum);
+
+void *excptvec_set(int vectnum,void *vect);
+
+int excptvec_initfill(void *fill_vect, int force_all);
+*/
+
+#define __val2mfld(mask,val) (((mask)&~((mask)<<1))*(val)&(mask))
+#define __mfld2val(mask,val) (((val)&(mask))/((mask)&~((mask)<<1)))
+
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+
+#endif /* _MSP430_CPU_DEF_H */