1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-03 09:00:21 +00:00

vn_io_fault() handling of the LOR for i/o into the file-backed buffers

has observable overhead when the buffer pages are not resident or not
mapped.  The overhead comes at least from two factors, one is the
additional work needed to detect the situation, prepare and execute
the rollbacks.  Another is the consequence of the i/o splitting into
the batches of the held pages, causing filesystems see series of the
smaller i/o requests instead of the single large request.

Note that expected case of the resident i/o buffer does not expose
these issues.  Provide a prefaulting for the userspace i/o buffers,
disabled by default.  I am careful of not enabling prefaulting by
default for now, since it would be detrimental for the applications
which speculatively pass extra-large buffers of anonymous memory to
not deal with buffer sizing (if such apps exist).

Found and tested by:	bde, emaste
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2015-07-31 04:12:51 +00:00
parent 215397449a
commit 8917728875
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=286106

View File

@ -116,6 +116,9 @@ static const int io_hold_cnt = 16;
static int vn_io_fault_enable = 1;
SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW,
&vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance");
static int vn_io_fault_prefault = 0;
SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW,
&vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting");
static u_long vn_io_faults_cnt;
SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD,
&vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers");
@ -1020,6 +1023,59 @@ vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio,
uio->uio_rw);
}
static int
vn_io_fault_touch(char *base, const struct uio *uio)
{
int r;
r = fubyte(base);
if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1))
return (EFAULT);
return (0);
}
static int
vn_io_fault_prefault_user(const struct uio *uio)
{
char *base;
const struct iovec *iov;
size_t len;
ssize_t resid;
int error, i;
KASSERT(uio->uio_segflg == UIO_USERSPACE,
("vn_io_fault_prefault userspace"));
error = i = 0;
iov = uio->uio_iov;
resid = uio->uio_resid;
base = iov->iov_base;
len = iov->iov_len;
while (resid > 0) {
error = vn_io_fault_touch(base, uio);
if (error != 0)
break;
if (len < PAGE_SIZE) {
if (len != 0) {
error = vn_io_fault_touch(base + len - 1, uio);
if (error != 0)
break;
resid -= len;
}
if (++i >= uio->uio_iovcnt)
break;
iov = uio->uio_iov + i;
base = iov->iov_base;
len = iov->iov_len;
} else {
len -= PAGE_SIZE;
base += PAGE_SIZE;
resid -= PAGE_SIZE;
}
}
return (error);
}
/*
* Common code for vn_io_fault(), agnostic to the kind of i/o request.
* Uses vn_io_fault_doio() to make the call to an actual i/o function.
@ -1041,6 +1097,12 @@ vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args,
ssize_t adv;
int error, cnt, save, saveheld, prev_td_ma_cnt;
if (vn_io_fault_prefault) {
error = vn_io_fault_prefault_user(uio);
if (error != 0)
return (error); /* Or ignore ? */
}
prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ;
/*