1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-02 12:20:51 +00:00

Improve the ktrace locking somewhat to reduce overhead:

- Depessimize userret() in kernels where KTRACE is enabled by doing an
  unlocked check of the per-process queue of pending events before
  acquiring any locks.  Previously ktr_userret() unconditionally acquired
  the global ktrace_sx lock on every return to userland for every thread,
  even if ktrace wasn't enabled for the thread.
- Optimize the locking in exit() to first perform an unlocked read of
  p_traceflag to see if ktrace is enabled and only acquire locks and
  teardown ktrace if the test succeeds.  Also, explicitly disable tracing
  before draining any pending events so the pending events actually get
  written out.  The unlocked read is safe because proc lock is acquired
  earlier after single-threading so p_traceflag can't change between then
  and this check (well, it can currently due to a bug in ktrace I will fix
  next, but that race existed prior to this change as well).

Reviewed by:	rwatson
This commit is contained in:
John Baldwin 2007-06-13 20:01:42 +00:00
parent ce0be64687
commit 34a9edafbc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170685
2 changed files with 26 additions and 18 deletions

View File

@ -356,26 +356,32 @@ exit1(struct thread *td, int rv)
mtx_unlock(&Giant);
#ifdef KTRACE
/*
* Drain any pending records on the thread and release the trace
* file. It might be better if drain-and-clear were atomic.
* Disable tracing, then drain any pending records and release
* the trace file.
*/
ktrprocexit(td);
PROC_LOCK(p);
mtx_lock(&ktrace_mtx);
p->p_traceflag = 0; /* don't trace the vrele() */
tracevp = p->p_tracevp;
p->p_tracevp = NULL;
tracecred = p->p_tracecred;
p->p_tracecred = NULL;
mtx_unlock(&ktrace_mtx);
PROC_UNLOCK(p);
if (tracevp != NULL) {
locked = VFS_LOCK_GIANT(tracevp->v_mount);
vrele(tracevp);
VFS_UNLOCK_GIANT(locked);
if (p->p_traceflag != 0) {
PROC_LOCK(p);
mtx_lock(&ktrace_mtx);
p->p_traceflag = 0;
mtx_unlock(&ktrace_mtx);
PROC_UNLOCK(p);
ktrprocexit(td);
PROC_LOCK(p);
mtx_lock(&ktrace_mtx);
tracevp = p->p_tracevp;
p->p_tracevp = NULL;
tracecred = p->p_tracecred;
p->p_tracecred = NULL;
mtx_unlock(&ktrace_mtx);
PROC_UNLOCK(p);
if (tracevp != NULL) {
locked = VFS_LOCK_GIANT(tracevp->v_mount);
vrele(tracevp);
VFS_UNLOCK_GIANT(locked);
}
if (tracecred != NULL)
crfree(tracecred);
}
if (tracecred != NULL)
crfree(tracecred);
#endif
/*
* Release reference to text vnode

View File

@ -444,6 +444,8 @@ void
ktruserret(struct thread *td)
{
if (STAILQ_EMPTY(&td->td_proc->p_ktr))
return;
ktrace_enter(td);
sx_xlock(&ktrace_sx);
ktr_drain(td);