4 * Copyright 1992, Linus Torvalds.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #ifndef _LINUX_BITOPS_H
12 #error only <linux/bitops.h> can be included directly
15 #include <linux/compiler.h>
17 #define test_and_set_bit(nr,vaddr) \
18 (__builtin_constant_p(nr) ? \
19 __constant_coldfire_test_and_set_bit(nr, vaddr) : \
20 __generic_coldfire_test_and_set_bit(nr, vaddr))
23 static __inline__ int __constant_coldfire_test_and_set_bit(int nr,
27 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
29 __asm__ __volatile__ ("bset %2,%1; sne %0"
30 : "=d" (retval), "+QUd" (*p)
35 static __inline__ int __generic_coldfire_test_and_set_bit(int nr,
40 __asm__ __volatile__ ("bset %2,%1; sne %0"
41 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
46 #define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
48 #define set_bit(nr,vaddr) \
49 (__builtin_constant_p(nr) ? \
50 __constant_coldfire_set_bit(nr, vaddr) : \
51 __generic_coldfire_set_bit(nr, vaddr))
53 static __inline__ void __constant_coldfire_set_bit(int nr,
56 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
57 __asm__ __volatile__ ("bset %1,%0"
58 : "+QUd" (*p) : "di" (nr & 7));
61 static __inline__ void __generic_coldfire_set_bit(int nr, volatile void *vaddr)
63 __asm__ __volatile__ ("bset %1,%0"
64 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
68 #define __set_bit(nr, vaddr) set_bit(nr, vaddr)
70 #define test_and_clear_bit(nr, vaddr) \
71 (__builtin_constant_p(nr) ? \
72 __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
73 __generic_coldfire_test_and_clear_bit(nr, vaddr))
75 static __inline__ int __constant_coldfire_test_and_clear_bit(int nr,
79 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
81 __asm__ __volatile__ ("bclr %2,%1; sne %0"
82 : "=d" (retval), "+QUd" (*p)
88 static __inline__ int __generic_coldfire_test_and_clear_bit(int nr,
93 __asm__ __volatile__ ("bclr %2,%1; sne %0"
94 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
100 #define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
103 * clear_bit() doesn't provide any barrier for the compiler.
105 #define smp_mb__before_clear_bit() barrier()
106 #define smp_mb__after_clear_bit() barrier()
108 #define clear_bit(nr,vaddr) \
109 (__builtin_constant_p(nr) ? \
110 __constant_coldfire_clear_bit(nr, vaddr) : \
111 __generic_coldfire_clear_bit(nr, vaddr))
113 static __inline__ void __constant_coldfire_clear_bit(int nr,
114 volatile void *vaddr)
116 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
117 __asm__ __volatile__ ("bclr %1,%0"
118 : "+QUd" (*p) : "id" (nr & 7));
121 static __inline__ void __generic_coldfire_clear_bit(int nr,
122 volatile void *vaddr)
124 __asm__ __volatile__ ("bclr %1,%0"
125 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
129 #define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
131 #define test_and_change_bit(nr, vaddr) \
132 (__builtin_constant_p(nr) ? \
133 __constant_coldfire_test_and_change_bit(nr, vaddr) : \
134 __generic_coldfire_test_and_change_bit(nr, vaddr))
136 static __inline__ int __constant_coldfire_test_and_change_bit(int nr,
137 volatile void *vaddr)
140 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
142 __asm__ __volatile__ ("bchg %2,%1; sne %0"
143 : "=d" (retval), "+QUd" (*p)
149 static __inline__ int __generic_coldfire_test_and_change_bit(int nr,
150 volatile void *vaddr)
154 __asm__ __volatile__ ("bchg %2,%1; sne %0"
155 : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
161 #define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
162 #define __change_bit(nr, vaddr) change_bit(nr, vaddr)
164 #define change_bit(nr,vaddr) \
165 (__builtin_constant_p(nr) ? \
166 __constant_coldfire_change_bit(nr, vaddr) : \
167 __generic_coldfire_change_bit(nr, vaddr))
169 static __inline__ void __constant_coldfire_change_bit(int nr,
170 volatile void *vaddr)
172 volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
173 __asm__ __volatile__ ("bchg %1,%0"
174 : "+QUd" (*p) : "id" (nr & 7));
177 static __inline__ void __generic_coldfire_change_bit(int nr,
178 volatile void *vaddr)
180 __asm__ __volatile__ ("bchg %1,%0"
181 : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
186 static inline int test_bit(int nr, const unsigned long *vaddr)
188 return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
191 static __inline__ unsigned long ffz(unsigned long word)
193 unsigned long result = 0;
202 /* find_next_zero_bit() finds the first zero bit in a bit string of length
203 * 'size' bits, starting the search at bit 'offset'. This is largely based
204 * on Linus's ALPHA routines.
206 static __inline__ unsigned long find_next_zero_bit(void *addr,
207 unsigned long size, unsigned long offset)
209 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
210 unsigned long result = offset & ~31UL;
219 tmp |= ~0UL >> (32-offset);
227 while (size & ~31UL) {
241 return result + ffz(tmp);
244 #define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
247 /* Ported from included/linux/bitops.h */
248 static __inline__ int ffs(int x)
276 #define __ffs(x) (ffs(x) - 1)
278 /* find_next_bit - find the next set bit in a memory region
279 * (from asm-ppc/bitops.h)
281 static __inline__ unsigned long find_next_bit(const unsigned long *addr,
282 unsigned long size, unsigned long offset)
284 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
285 unsigned int result = offset & ~31UL;
294 tmp &= ~0UL << offset;
314 tmp &= ~0UL >> (32 - size);
315 if (tmp == 0UL) /* Are any bits set? */
316 return result + size; /* Nope. */
318 return result + __ffs(tmp);
321 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
325 /* Ported from include/linux/bitops.h */
326 static __inline__ int fls(int x)
332 if (!(x & 0xffff0000u)) {
336 if (!(x & 0xff000000u)) {
340 if (!(x & 0xf0000000u)) {
344 if (!(x & 0xc0000000u)) {
348 if (!(x & 0x80000000u)) {
355 #include <asm-generic/bitops/fls64.h>
356 #include <asm-generic/bitops/sched.h>
357 #include <asm-generic/bitops/hweight.h>
358 #include <asm-generic/bitops/lock.h>
360 #define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
362 #define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
363 (unsigned long *)(addr))
364 #define minix_set_bit(nr, addr) set_bit((nr), \
365 (unsigned long *)(addr))
366 #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
367 (unsigned long *)(addr))
369 static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
371 int *a = (int *)vaddr;
375 mask = 1 << (nr & 0x1f);
376 return ((mask & *a) != 0);
379 #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
380 (unsigned long *)(addr))
381 #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
382 (unsigned long *)(addr))
383 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
384 (unsigned long *)(addr))
385 #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
386 (unsigned long *)(addr))
388 static inline int ext2_test_bit(int nr, const void *vaddr)
390 const unsigned char *p = vaddr;
391 return (p[nr >> 3] & (1U << (nr & 7))) != 0;
394 static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
396 const unsigned long *p = vaddr, *addr = vaddr;
402 size = (size >> 5) + ((size & 31) > 0);
403 while (*p++ == ~0UL) {
405 return (p - addr) << 5;
409 for (res = 0; res < 32; res++)
410 if (!ext2_test_bit (res, p))
412 return (p - addr) * 32 + res;
415 static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
418 const unsigned long *addr = vaddr;
419 const unsigned long *p = addr + (offset >> 5);
420 int bit = offset & 31UL, res;
426 /* Look for zero in first longword */
427 for (res = bit; res < 32; res++)
428 if (!ext2_test_bit (res, p))
429 return (p - addr) * 32 + res;
432 /* No zero yet, search remaining full bytes for a zero */
433 res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
434 return (p - addr) * 32 + res;
439 #endif /* _M68K_BITOPS_H */