7 * Copyright (C) 1994 Hamish Macdonald
9 * Delay routines, using a pre-computed "loops_per_jiffy" value.
12 static inline void __delay(unsigned long loops)
14 #if defined(CONFIG_COLDFIRE)
15 /* The coldfire runs this loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
25 __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t"
28 : "=d" (loops) : "0" (loops));
30 __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
31 : "=d" (loops) : "0" (loops));
35 extern void __bad_udelay(void);
38 * Use only for very small delays ( < 1 msec). Should probably use a
39 * lookup table, really, as the multiplications take much too long with
40 * short delays. This is a "reasonable" implementation, though (and the
41 * first constant multiplications gets optimized away if the delay is
44 static inline void __const_udelay(unsigned long xloops)
46 #if defined(CONFIG_COLDFIRE)
47 __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ);
51 __asm__ ("mulul %2,%0:%1"
52 : "=d" (xloops), "=d" (tmp)
53 : "d" (xloops), "1" (loops_per_jiffy));
58 static inline void __udelay(unsigned long usecs)
60 __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
63 #define udelay(n) (__builtin_constant_p(n) ? \
64 ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
67 static inline unsigned long muldiv(unsigned long a, unsigned long b,
70 #if defined(CONFIG_COLDFIRE)
71 return (long)(((unsigned long long)a * b)/c);
75 __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
76 : "=d" (tmp), "=d" (a)
77 : "d" (b), "d" (c), "1" (a));
82 #endif /* defined(_M68K_DELAY_H) */