1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-24 11:29:10 +00:00

- Remove test_and_set_bit() macro. It is unused since r255037.

- Relax atomic_read() and atomic_set() macros.  Linux does not require any
memory barrier.  Also, these macros may be even reordered or optimized away
according to the API documentation:

https://www.kernel.org/doc/Documentation/atomic_ops.txt
This commit is contained in:
Jung-uk Kim 2013-08-29 19:47:52 +00:00
parent 310915a45a
commit ea4447500d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=255039

View File

@ -37,8 +37,8 @@ typedef uint64_t atomic64_t;
#define BITS_TO_LONGS(x) howmany(x, sizeof(long) * NBBY)
#define atomic_set(p, v) atomic_store_rel_int(p, v)
#define atomic_read(p) atomic_load_acq_int(p)
#define atomic_read(p) (*(volatile u_int *)(p))
#define atomic_set(p, v) do { *(u_int *)(p) = (v); } while (0)
#define atomic_add(v, p) atomic_add_int(p, v)
#define atomic_sub(v, p) atomic_subtract_int(p, v)
@ -63,9 +63,7 @@ typedef uint64_t atomic64_t;
#define set_bit(b, p) \
atomic_set_int((volatile u_int *)(p) + (b) / 32, 1 << (b) % 32)
#define test_bit(b, p) \
(atomic_load_acq_int((volatile u_int *)(p) + (b) / 32) & (1 << (b) % 32))
#define test_and_set_bit(b, p) \
atomic_testandset_int((volatile u_int *)(p) + (b) / 32, b)
((atomic_read((volatile u_int *)(p) + (b) / 32) & (1 << (b) % 32)) != 0)
static __inline int
find_first_zero_bit(volatile void *p, int max)