1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-12 14:29:28 +00:00

The IO_NOWDRAIN and B_NOWDRAIN hacks are no longer needed to prevent

deadlocks with vnode backed md(4) devices because md now uses a
kthread to run the bio requests instead of doing it directly from
the bio down path.
This commit is contained in:
Poul-Henning Kamp 2003-05-31 16:42:45 +00:00
parent cf459fc35c
commit 17a1391990
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=115456
7 changed files with 7 additions and 27 deletions

View File

@ -497,8 +497,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
auio.uio_td = curthread; auio.uio_td = curthread;
/* /*
* When reading set IO_DIRECT to try to avoid double-caching * When reading set IO_DIRECT to try to avoid double-caching
* the data. When writing IO_DIRECT is not optimal, but we * the data. When writing IO_DIRECT is not optimal.
* must set IO_NOWDRAIN to avoid a wdrain deadlock.
*/ */
if (bp->bio_cmd == BIO_READ) { if (bp->bio_cmd == BIO_READ) {
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
@ -506,7 +505,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
} else { } else {
(void) vn_start_write(sc->vnode, &mp, V_WAIT); (void) vn_start_write(sc->vnode, &mp, V_WAIT);
vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY, curthread);
error = VOP_WRITE(sc->vnode, &auio, IO_NOWDRAIN, sc->cred); error = VOP_WRITE(sc->vnode, &auio, 0, sc->cred);
vn_finished_write(mp); vn_finished_write(mp);
} }
VOP_UNLOCK(sc->vnode, 0, curthread); VOP_UNLOCK(sc->vnode, 0, curthread);

View File

@ -882,13 +882,10 @@ bwrite(struct buf * bp)
int rtval = bufwait(bp); int rtval = bufwait(bp);
brelse(bp); brelse(bp);
return (rtval); return (rtval);
} else if ((oldflags & B_NOWDRAIN) == 0) { } else {
/* /*
* don't allow the async write to saturate the I/O * don't allow the async write to saturate the I/O
* system. Deadlocks can occur only if a device strategy * system. We will not deadlock here because
* routine (like in MD) turns around and issues another
* high-level write, in which case B_NOWDRAIN is expected
* to be set. Otherwise we will not deadlock here because
* we are blocking waiting for I/O that is already in-progress * we are blocking waiting for I/O that is already in-progress
* to complete. * to complete.
*/ */
@ -1461,8 +1458,7 @@ brelse(struct buf * bp)
if (bp->b_bufsize || bp->b_kvasize) if (bp->b_bufsize || bp->b_kvasize)
bufspacewakeup(); bufspacewakeup();
bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
B_DIRECT | B_NOWDRAIN);
if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY)) if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
panic("brelse: not dirty"); panic("brelse: not dirty");
/* unlock */ /* unlock */

View File

@ -839,7 +839,7 @@ cluster_wbuild(vp, size, start_lbn, len)
bp->b_data = (char *)((vm_offset_t)bp->b_data | bp->b_data = (char *)((vm_offset_t)bp->b_data |
((vm_offset_t)tbp->b_data & PAGE_MASK)); ((vm_offset_t)tbp->b_data & PAGE_MASK));
bp->b_flags |= B_CLUSTER | bp->b_flags |= B_CLUSTER |
(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN)); (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT));
bp->b_iodone = cluster_callback; bp->b_iodone = cluster_callback;
pbgetvp(vp, bp); pbgetvp(vp, bp);
/* /*

View File

@ -973,12 +973,6 @@ nfs_write(struct vop_write_args *ap)
} }
vfs_bio_set_validclean(bp, on, n); vfs_bio_set_validclean(bp, on, n);
} }
/*
* If IO_NOWDRAIN then set B_NOWDRAIN (nfs-backed MD
* filesystem)
*/
if (ioflag & IO_NOWDRAIN)
bp->b_flags |= B_NOWDRAIN;
/* /*
* If IO_SYNC do bwrite(). * If IO_SYNC do bwrite().

View File

@ -206,10 +206,6 @@ struct buf {
* sticky until the buffer is released and typically * sticky until the buffer is released and typically
* only has an effect when B_RELBUF is also set. * only has an effect when B_RELBUF is also set.
* *
* B_NOWDRAIN This flag should be set when a device (like MD)
* does a turn-around VOP_WRITE from its strategy
* routine. This flag prevents bwrite() from blocking
* in wdrain, avoiding a deadlock situation.
*/ */
#define B_AGE 0x00000001 /* Move to age queue when I/O done. */ #define B_AGE 0x00000001 /* Move to age queue when I/O done. */
@ -223,7 +219,7 @@ struct buf {
#define B_00000100 0x00000100 /* Available flag. */ #define B_00000100 0x00000100 /* Available flag. */
#define B_DONE 0x00000200 /* I/O completed. */ #define B_DONE 0x00000200 /* I/O completed. */
#define B_EINTR 0x00000400 /* I/O was interrupted */ #define B_EINTR 0x00000400 /* I/O was interrupted */
#define B_NOWDRAIN 0x00000800 /* Avoid wdrain deadlock */ #define B_00000800 0x00000800 /* Availabel flag. */
#define B_00001000 0x00001000 /* Available flag. */ #define B_00001000 0x00001000 /* Available flag. */
#define B_INVAL 0x00002000 /* Does not contain valid info. */ #define B_INVAL 0x00002000 /* Does not contain valid info. */
#define B_LOCKED 0x00004000 /* Locked in core (not reusable). */ #define B_LOCKED 0x00004000 /* Locked in core (not reusable). */

View File

@ -279,7 +279,6 @@ struct vattr {
#define IO_INVAL 0x0040 /* invalidate after I/O */ #define IO_INVAL 0x0040 /* invalidate after I/O */
#define IO_ASYNC 0x0080 /* bawrite rather then bdwrite */ #define IO_ASYNC 0x0080 /* bawrite rather then bdwrite */
#define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */ #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
#define IO_NOWDRAIN 0x0200 /* do not block on wdrain */
#define IO_EXT 0x0400 /* operate on external attributes */ #define IO_EXT 0x0400 /* operate on external attributes */
#define IO_NORMAL 0x0800 /* operate on regular data */ #define IO_NORMAL 0x0800 /* operate on regular data */
#define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */ #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */

View File

@ -702,8 +702,6 @@ ffs_write(ap)
vfs_bio_clrbuf(bp); vfs_bio_clrbuf(bp);
if (ioflag & IO_DIRECT) if (ioflag & IO_DIRECT)
bp->b_flags |= B_DIRECT; bp->b_flags |= B_DIRECT;
if (ioflag & IO_NOWDRAIN)
bp->b_flags |= B_NOWDRAIN;
if (uio->uio_offset + xfersize > ip->i_size) { if (uio->uio_offset + xfersize > ip->i_size) {
ip->i_size = uio->uio_offset + xfersize; ip->i_size = uio->uio_offset + xfersize;
@ -1163,8 +1161,6 @@ ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred)
vfs_bio_clrbuf(bp); vfs_bio_clrbuf(bp);
if (ioflag & IO_DIRECT) if (ioflag & IO_DIRECT)
bp->b_flags |= B_DIRECT; bp->b_flags |= B_DIRECT;
if (ioflag & IO_NOWDRAIN)
bp->b_flags |= B_NOWDRAIN;
if (uio->uio_offset + xfersize > dp->di_extsize) if (uio->uio_offset + xfersize > dp->di_extsize)
dp->di_extsize = uio->uio_offset + xfersize; dp->di_extsize = uio->uio_offset + xfersize;