mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-14 10:09:48 +00:00
Use atomic_interrupt_fence() instead of bare __compiler_membar()
for the which which definitely use membar to sync with interrupt handlers. libc and rtld uses of __compiler_membar() seems to want compiler barriers proper. The barrier in sched_unpin_lite() after td_pinned decrement seems to be not needed and removed, instead of convertion. Reviewed by: markj MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D28956
This commit is contained in:
parent
1d9ba697f9
commit
b5449c92b4
@ -366,7 +366,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
* Check to see if the IPI granted us the lock after all. The load of
|
||||
* rmp_flags must happen after the tracker is removed from the list.
|
||||
*/
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
if (tracker->rmp_flags) {
|
||||
/* Just add back tracker - we hold the lock. */
|
||||
rm_tracker_add(pc, tracker);
|
||||
@ -448,7 +448,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
|
||||
td->td_critnest++; /* critical_enter(); */
|
||||
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
|
||||
pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
|
||||
|
||||
@ -456,7 +456,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
|
||||
sched_pin();
|
||||
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
|
||||
td->td_critnest--;
|
||||
|
||||
@ -873,17 +873,15 @@ db_show_rm(const struct lock_object *lock)
|
||||
* Concurrent writers take turns taking the lock while going off cpu. If this is
|
||||
* of concern for your usecase, this is not the right primitive.
|
||||
*
|
||||
* Neither rms_rlock nor rms_runlock use fences. Instead compiler barriers are
|
||||
* inserted to prevert reordering of generated code. Execution ordering is
|
||||
* provided with the use of an IPI handler.
|
||||
* Neither rms_rlock nor rms_runlock use thread fences. Instead interrupt
|
||||
* fences are inserted to ensure ordering with the code executed in the IPI
|
||||
* handler.
|
||||
*
|
||||
* No attempt is made to track which CPUs read locked at least once,
|
||||
* consequently write locking sends IPIs to all of them. This will become a
|
||||
* problem at some point. The easiest way to lessen it is to provide a bitmap.
|
||||
*/
|
||||
|
||||
#define rms_int_membar() __compiler_membar()
|
||||
|
||||
#define RMS_NOOWNER ((void *)0x1)
|
||||
#define RMS_TRANSIENT ((void *)0x2)
|
||||
#define RMS_FLAGMASK 0xf
|
||||
@ -1030,14 +1028,14 @@ rms_rlock(struct rmslock *rms)
|
||||
critical_enter();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
rms_rlock_fallback(rms);
|
||||
return;
|
||||
}
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_readers_inc(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
}
|
||||
@ -1052,15 +1050,15 @@ rms_try_rlock(struct rmslock *rms)
|
||||
critical_enter();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
return (0);
|
||||
}
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_readers_inc(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
return (1);
|
||||
@ -1092,14 +1090,14 @@ rms_runlock(struct rmslock *rms)
|
||||
critical_enter();
|
||||
pcpu = rms_int_pcpu(rms);
|
||||
rms_int_influx_enter(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
if (__predict_false(rms->writers > 0)) {
|
||||
rms_runlock_fallback(rms);
|
||||
return;
|
||||
}
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_readers_dec(rms, pcpu);
|
||||
rms_int_membar();
|
||||
atomic_interrupt_fence();
|
||||
rms_int_influx_exit(rms, pcpu);
|
||||
critical_exit();
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ sched_pin_lite(struct thread_lite *td)
|
||||
|
||||
KASSERT((struct thread *)td == curthread, ("sched_pin called on non curthread"));
|
||||
td->td_pinned++;
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
}
|
||||
|
||||
static __inline void
|
||||
@ -47,9 +47,8 @@ sched_unpin_lite(struct thread_lite *td)
|
||||
|
||||
KASSERT((struct thread *)td == curthread, ("sched_unpin called on non curthread"));
|
||||
KASSERT(td->td_pinned > 0, ("sched_unpin called on non pinned thread"));
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
td->td_pinned--;
|
||||
__compiler_membar();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
@ -1091,7 +1091,7 @@ void resume_all_fs(void);
|
||||
_mpcpu = vfs_mount_pcpu(mp); \
|
||||
MPASS(mpcpu->mntp_thread_in_ops == 0); \
|
||||
_mpcpu->mntp_thread_in_ops = 1; \
|
||||
__compiler_membar(); \
|
||||
atomic_interrupt_fence(); \
|
||||
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
|
||||
vfs_op_thread_exit_crit(mp, _mpcpu); \
|
||||
_retval_crit = false; \
|
||||
@ -1111,7 +1111,7 @@ void resume_all_fs(void);
|
||||
#define vfs_op_thread_exit_crit(mp, _mpcpu) do { \
|
||||
MPASS(_mpcpu == vfs_mount_pcpu(mp)); \
|
||||
MPASS(_mpcpu->mntp_thread_in_ops == 1); \
|
||||
__compiler_membar(); \
|
||||
atomic_interrupt_fence(); \
|
||||
_mpcpu->mntp_thread_in_ops = 0; \
|
||||
} while (0)
|
||||
|
||||
|
@ -173,13 +173,13 @@ static __inline void
|
||||
sched_pin(void)
|
||||
{
|
||||
curthread->td_pinned++;
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
}
|
||||
|
||||
static __inline void
|
||||
sched_unpin(void)
|
||||
{
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
curthread->td_pinned--;
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,7 @@ critical_enter(void)
|
||||
|
||||
td = (struct thread_lite *)curthread;
|
||||
td->td_critnest++;
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
}
|
||||
|
||||
static __inline void
|
||||
@ -295,9 +295,9 @@ critical_exit(void)
|
||||
td = (struct thread_lite *)curthread;
|
||||
KASSERT(td->td_critnest != 0,
|
||||
("critical_exit: td_critnest == 0"));
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
td->td_critnest--;
|
||||
__compiler_membar();
|
||||
atomic_interrupt_fence();
|
||||
if (__predict_false(td->td_owepreempt))
|
||||
critical_exit_preempt();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user