aboutsummaryrefslogtreecommitdiff
path: root/linux/src/include/asm-i386/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'linux/src/include/asm-i386/bitops.h')
-rw-r--r--linux/src/include/asm-i386/bitops.h201
1 files changed, 201 insertions, 0 deletions
diff --git a/linux/src/include/asm-i386/bitops.h b/linux/src/include/asm-i386/bitops.h
new file mode 100644
index 0000000..e2a4c14
--- /dev/null
+++ b/linux/src/include/asm-i386/bitops.h
@@ -0,0 +1,201 @@
+#ifndef _I386_BITOPS_H
+#define _I386_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+/*
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+#ifdef __SMP__
+#define LOCK_PREFIX "lock ; "
+#define SMPVOL volatile
+#else
+#define LOCK_PREFIX ""
+#define SMPVOL
+#endif
+
+/*
+ * Some hacks to defeat gcc over-optimizations..
+ */
+struct __dummy { unsigned long a[100]; };
+#define ADDR (*(struct __dummy *) addr)
+#define CONST_ADDR (*(const struct __dummy *) addr)
+
+static __inline__ int set_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int clear_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int change_bit(int nr, SMPVOL void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__(LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btsl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btrl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ int oldbit;
+
+ __asm__ __volatile__( LOCK_PREFIX
+ "btcl %2,%1\n\tsbbl %0,%0"
+ :"=r" (oldbit),"=m" (ADDR)
+ :"Ir" (nr));
+ return oldbit;
+}
+
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static __inline__ int test_bit(int nr, const SMPVOL void * addr)
+{
+ return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
+}
+
+/*
+ * Find-bit routines..
+ */
+static __inline__ int find_first_zero_bit(void * addr, unsigned size)
+{
+ int d0, d1, d2;
+ int res;
+
+ if (!size)
+ return 0;
+ __asm__("cld\n\t"
+ "movl $-1,%%eax\n\t"
+ "xorl %%edx,%%edx\n\t"
+ "repe; scasl\n\t"
+ "je 1f\n\t"
+ "xorl -4(%%edi),%%eax\n\t"
+ "subl $4,%%edi\n\t"
+ "bsfl %%eax,%%edx\n"
+ "1:\tsubl %%ebx,%%edi\n\t"
+ "shll $3,%%edi\n\t"
+ "addl %%edi,%%edx"
+ :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
+ return res;
+}
+
+static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+{
+ unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
+ int set = 0, bit = offset & 31, res;
+
+ if (bit) {
+ /*
+ * Look for zero in first byte
+ */
+ __asm__("bsfl %1,%0\n\t"
+ "jne 1f\n\t"
+ "movl $32, %0\n"
+ "1:"
+ : "=r" (set)
+ : "r" (~(*p >> bit)));
+ if (set < (32 - bit))
+ return set + offset;
+ set = 32 - bit;
+ p++;
+ }
+ /*
+ * No zero yet, search remaining full bytes for a zero
+ */
+ res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
+ return (offset + set + res);
+}
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static __inline__ unsigned long ffz(unsigned long word)
+{
+ __asm__("bsfl %1,%0"
+ :"=r" (word)
+ :"r" (~word));
+ return word;
+}
+
+#ifdef __KERNEL__
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static __inline__ int ffs(int x)
+{
+ int r;
+
+ __asm__("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "g" (x));
+ return r+1;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_BITOPS_H */