1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-17 10:26:15 +00:00

- Place the gcc memory barrier hint in the right place in the 80386 version

of atomic_store_rel().
- Use the 80386 versions of atomic_load_acq() and atomic_store_rel() that
  do not use serializing instructions on all UP kernels since a UP machine
  does need to synchronize with other CPUs.  This trims lots of cycles from
  spin locks on UP kernels among other things.

Benchmarked by:	rwatson
This commit is contained in:
John Baldwin 2004-11-11 22:42:25 +00:00
parent 20447d54c4
commit 57621b8b35
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=137591

View File

@ -172,13 +172,14 @@ atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src)
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
#if defined(I386_CPU)
#if !defined(SMP)
/*
* We assume that a = b will do atomic loads and stores.
*
* XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee
* memory ordering. These should only be used on a 386.
* We assume that a = b will do atomic loads and stores. However, on a
* PentiumPro or higher, reads may pass writes, so for that case we have
* to use a serializing instruction (i.e. with LOCK) to do the load in
* SMP kernels. For UP kernels, however, the cache of the single processor
* is always consistent, so we don't need any memory barriers.
*/
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
static __inline u_##TYPE \
@ -190,12 +191,12 @@ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \
static __inline void \
atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
{ \
*p = v; \
__asm __volatile("" : : : "memory"); \
*p = v; \
} \
struct __hack
#else /* !defined(I386_CPU) */
#else /* defined(SMP) */
#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \
static __inline u_##TYPE \
@ -224,7 +225,7 @@ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\
} \
struct __hack
#endif /* defined(I386_CPU) */
#endif /* !defined(SMP) */
#else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */