1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-28 16:43:09 +00:00

Temporarily undo parts of the stuct mount locking commit by jeff.

It is unsafe to hold a mutex across vput/vrele calls.

This will be redone when a better locking strategy is agreed upon.

Discussed with: jeff
This commit is contained in:
Alexander Kabaev 2003-11-01 05:51:54 +00:00
parent bb187251fc
commit 492c1e68fb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=121847
6 changed files with 10 additions and 34 deletions

View File

@ -814,8 +814,7 @@ msdosfs_sync(mp, waitfor, cred, td)
error = VOP_FSYNC(vp, cred, waitfor, td);
if (error)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);

View File

@ -933,8 +933,7 @@ ext2_sync(mp, waitfor, cred, td)
}
if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);

View File

@ -933,8 +933,7 @@ ext2_sync(mp, waitfor, cred, td)
}
if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);

View File

@ -908,12 +908,8 @@ vfs_stdsync(mp, waitfor, cred, td)
if (error)
allerror = error;
mtx_lock(&mntvnode_mtx);
if (nvp != TAILQ_NEXT(vp, v_nmntvnodes)) {
vput(vp);
goto loop;
}
vput(vp);
mtx_lock(&mntvnode_mtx);
}
mtx_unlock(&mntvnode_mtx);
return (allerror);

View File

@ -1112,7 +1112,6 @@ ffs_sync(mp, waitfor, cred, td)
struct ufsmount *ump = VFSTOUFS(mp);
struct fs *fs;
int error, count, wait, lockreq, allerror = 0;
int restart;
fs = ump->um_fs;
if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
@ -1131,7 +1130,6 @@ ffs_sync(mp, waitfor, cred, td)
lockreq |= LK_INTERLOCK;
mtx_lock(&mntvnode_mtx);
loop:
restart = 0;
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
@ -1168,12 +1166,9 @@ ffs_sync(mp, waitfor, cred, td)
}
if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
allerror = error;
VOP_UNLOCK(vp, 0, td);
vput(vp);
mtx_lock(&mntvnode_mtx);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp)
restart = 1;
vrele(vp);
if (restart)
goto loop;
}
mtx_unlock(&mntvnode_mtx);

View File

@ -411,7 +411,6 @@ quotaon(td, mp, type, fname)
struct dquot *dq;
int error, flags;
struct nameidata nd;
int restart;
error = suser_cred(td->td_ucred, PRISON_ROOT);
if (error)
@ -458,7 +457,6 @@ quotaon(td, mp, type, fname)
*/
mtx_lock(&mntvnode_mtx);
again:
restart = 0;
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
@ -475,13 +473,11 @@ quotaon(td, mp, type, fname)
continue;
}
error = getinoquota(VTOI(vp));
mtx_lock(&mntvnode_mtx);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
restart = 1;
vput(vp);
mtx_lock(&mntvnode_mtx);
if (error)
break;
if (restart)
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);
@ -505,7 +501,6 @@ quotaoff(td, mp, type)
struct ufsmount *ump = VFSTOUFS(mp);
struct dquot *dq;
struct inode *ip;
int restart;
int error;
error = suser_cred(td->td_ucred, PRISON_ROOT);
@ -521,7 +516,6 @@ quotaoff(td, mp, type)
*/
mtx_lock(&mntvnode_mtx);
again:
restart = 0;
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
@ -542,11 +536,9 @@ quotaoff(td, mp, type)
dq = ip->i_dquot[type];
ip->i_dquot[type] = NODQUOT;
dqrele(vp, dq);
vput(vp);
mtx_lock(&mntvnode_mtx);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
restart = 1;
vput(vp);
if (restart)
goto again;
}
mtx_unlock(&mntvnode_mtx);
@ -739,7 +731,6 @@ qsync(mp)
struct thread *td = curthread; /* XXX */
struct vnode *vp, *nextvp;
struct dquot *dq;
int restart;
int i, error;
/*
@ -757,7 +748,6 @@ qsync(mp)
*/
mtx_lock(&mntvnode_mtx);
again:
restart = 0;
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nextvp) {
if (vp->v_mount != mp)
goto again;
@ -781,11 +771,9 @@ qsync(mp)
if (dq != NODQUOT && (dq->dq_flags & DQ_MOD))
dqsync(vp, dq);
}
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
restart = 1;
mtx_lock(&mntvnode_mtx);
vput(vp);
if (restart)
mtx_lock(&mntvnode_mtx);
if (TAILQ_NEXT(vp, v_nmntvnodes) != nextvp)
goto again;
}
mtx_unlock(&mntvnode_mtx);