1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-27 11:55:06 +00:00
freebsd/sys/kern/subr_uio.c
Attilio Rao 89f6b8632c Switch the vm_object mutex to be a rwlock. This will enable in the
future further optimizations where the vm_object lock will be held
in read mode most of the time the page cache resident pool of pages
are accessed for reading purposes.

The change is mostly mechanical but few notes are reported:
* The KPI changes as follow:
  - VM_OBJECT_LOCK() -> VM_OBJECT_WLOCK()
  - VM_OBJECT_TRYLOCK() -> VM_OBJECT_TRYWLOCK()
  - VM_OBJECT_UNLOCK() -> VM_OBJECT_WUNLOCK()
  - VM_OBJECT_LOCK_ASSERT(MA_OWNED) -> VM_OBJECT_ASSERT_WLOCKED()
    (in order to avoid visibility of implementation details)
  - The read-mode operations are added:
    VM_OBJECT_RLOCK(), VM_OBJECT_TRYRLOCK(), VM_OBJECT_RUNLOCK(),
    VM_OBJECT_ASSERT_RLOCKED(), VM_OBJECT_ASSERT_LOCKED()
* The vm/vm_pager.h namespace pollution avoidance (forcing requiring
  sys/mutex.h in consumers directly to cater its inlining functions
  using VM_OBJECT_LOCK()) imposes that all the vm/vm_pager.h
  consumers now must include also sys/rwlock.h.
* zfs requires a quite convoluted fix to include FreeBSD rwlocks into
  the compat layer because the name clash between FreeBSD and solaris
  versions must be avoided.
  At this purpose zfs redefines the vm_object locking functions
  directly, isolating the FreeBSD components in specific compat stubs.

The KPI results heavilly broken by this commit.  Thirdy part ports must
be updated accordingly (I can think off-hand of VirtualBox, for example).

Sponsored by:	EMC / Isilon storage division
Reviewed by:	jeff
Reviewed by:	pjd (ZFS specific review)
Discussed with:	alc
Tested by:	pho
2013-03-09 02:32:23 +00:00

606 lines
14 KiB
C

/*-
* Copyright (c) 1982, 1986, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_zero.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/limits.h>
#include <sys/lock.h>
#include <sys/mman.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/sched.h>
#include <sys/sysctl.h>
#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_extern.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#ifdef SOCKET_SEND_COW
#include <vm/vm_object.h>
#endif
SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
"Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
static int uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault);
#ifdef SOCKET_SEND_COW
/* Declared in uipc_socket.c */
extern int so_zero_copy_receive;
/*
* Identify the physical page mapped at the given kernel virtual
* address. Insert this physical page into the given address space at
* the given virtual address, replacing the physical page, if any,
* that already exists there.
*/
static int
vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
{
vm_map_t map = mapa;
vm_page_t kern_pg, user_pg;
vm_object_t uobject;
vm_map_entry_t entry;
vm_pindex_t upindex;
vm_prot_t prot;
boolean_t wired;
KASSERT((uaddr & PAGE_MASK) == 0,
("vm_pgmoveco: uaddr is not page aligned"));
/*
* Herein the physical page is validated and dirtied. It is
* unwired in sf_buf_mext().
*/
kern_pg = PHYS_TO_VM_PAGE(vtophys(kaddr));
kern_pg->valid = VM_PAGE_BITS_ALL;
KASSERT(kern_pg->queue == PQ_NONE && kern_pg->wire_count == 1,
("vm_pgmoveco: kern_pg is not correctly wired"));
if ((vm_map_lookup(&map, uaddr,
VM_PROT_WRITE, &entry, &uobject,
&upindex, &prot, &wired)) != KERN_SUCCESS) {
return(EFAULT);
}
VM_OBJECT_WLOCK(uobject);
retry:
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
goto retry;
vm_page_lock(user_pg);
pmap_remove_all(user_pg);
vm_page_free(user_pg);
vm_page_unlock(user_pg);
} else {
/*
* Even if a physical page does not exist in the
* object chain's first object, a physical page from a
* backing object may be mapped read only.
*/
if (uobject->backing_object != NULL)
pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE);
}
vm_page_insert(kern_pg, uobject, upindex);
vm_page_dirty(kern_pg);
VM_OBJECT_WUNLOCK(uobject);
vm_map_lookup_done(map, entry);
return(KERN_SUCCESS);
}
#endif /* SOCKET_SEND_COW */
int
copyin_nofault(const void *udaddr, void *kaddr, size_t len)
{
int error, save;
save = vm_fault_disable_pagefaults();
error = copyin(udaddr, kaddr, len);
vm_fault_enable_pagefaults(save);
return (error);
}
int
copyout_nofault(const void *kaddr, void *udaddr, size_t len)
{
int error, save;
save = vm_fault_disable_pagefaults();
error = copyout(kaddr, udaddr, len);
vm_fault_enable_pagefaults(save);
return (error);
}
#define PHYS_PAGE_COUNT(len) (howmany(len, PAGE_SIZE) + 1)
int
physcopyin(void *src, vm_paddr_t dst, size_t len)
{
vm_page_t m[PHYS_PAGE_COUNT(len)];
struct iovec iov[1];
struct uio uio;
int i;
iov[0].iov_base = src;
iov[0].iov_len = len;
uio.uio_iov = iov;
uio.uio_iovcnt = 1;
uio.uio_offset = 0;
uio.uio_resid = len;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
m[i] = PHYS_TO_VM_PAGE(dst);
return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
}
int
physcopyout(vm_paddr_t src, void *dst, size_t len)
{
vm_page_t m[PHYS_PAGE_COUNT(len)];
struct iovec iov[1];
struct uio uio;
int i;
iov[0].iov_base = dst;
iov[0].iov_len = len;
uio.uio_iov = iov;
uio.uio_iovcnt = 1;
uio.uio_offset = 0;
uio.uio_resid = len;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
m[i] = PHYS_TO_VM_PAGE(src);
return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
}
#undef PHYS_PAGE_COUNT
int
uiomove(void *cp, int n, struct uio *uio)
{
return (uiomove_faultflag(cp, n, uio, 0));
}
int
uiomove_nofault(void *cp, int n, struct uio *uio)
{
return (uiomove_faultflag(cp, n, uio, 1));
}
static int
uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
{
struct thread *td;
struct iovec *iov;
size_t cnt;
int error, newflags, save;
td = curthread;
error = 0;
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode"));
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
("uiomove proc"));
if (!nofault)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling uiomove()");
/* XXX does it make a sense to set TDP_DEADLKTREAT for UIO_SYSSPACE ? */
newflags = TDP_DEADLKTREAT;
if (uio->uio_segflg == UIO_USERSPACE && nofault) {
/*
* Fail if a non-spurious page fault occurs.
*/
newflags |= TDP_NOFAULTING | TDP_RESETSPUR;
}
save = curthread_pflags_set(newflags);
while (n > 0 && uio->uio_resid) {
iov = uio->uio_iov;
cnt = iov->iov_len;
if (cnt == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
continue;
}
if (cnt > n)
cnt = n;
switch (uio->uio_segflg) {
case UIO_USERSPACE:
maybe_yield();
if (uio->uio_rw == UIO_READ)
error = copyout(cp, iov->iov_base, cnt);
else
error = copyin(iov->iov_base, cp, cnt);
if (error)
goto out;
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
bcopy(cp, iov->iov_base, cnt);
else
bcopy(iov->iov_base, cp, cnt);
break;
case UIO_NOCOPY:
break;
}
iov->iov_base = (char *)iov->iov_base + cnt;
iov->iov_len -= cnt;
uio->uio_resid -= cnt;
uio->uio_offset += cnt;
cp = (char *)cp + cnt;
n -= cnt;
}
out:
curthread_pflags_restore(save);
return (error);
}
/*
* Wrapper for uiomove() that validates the arguments against a known-good
* kernel buffer. Currently, uiomove accepts a signed (n) argument, which
* is almost definitely a bad thing, so we catch that here as well. We
* return a runtime failure, but it might be desirable to generate a runtime
* assertion failure instead.
*/
int
uiomove_frombuf(void *buf, int buflen, struct uio *uio)
{
size_t offset, n;
if (uio->uio_offset < 0 || uio->uio_resid < 0 ||
(offset = uio->uio_offset) != uio->uio_offset)
return (EINVAL);
if (buflen <= 0 || offset >= buflen)
return (0);
if ((n = buflen - offset) > IOSIZE_MAX)
return (EINVAL);
return (uiomove((char *)buf + offset, n, uio));
}
#ifdef SOCKET_RECV_PFLIP
/*
* Experimental support for zero-copy I/O
*/
static int
userspaceco(void *cp, u_int cnt, struct uio *uio, int disposable)
{
struct iovec *iov;
int error;
iov = uio->uio_iov;
if (uio->uio_rw == UIO_READ) {
if ((so_zero_copy_receive != 0)
&& ((cnt & PAGE_MASK) == 0)
&& ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
&& ((uio->uio_offset & PAGE_MASK) == 0)
&& ((((intptr_t) cp) & PAGE_MASK) == 0)
&& (disposable != 0)) {
/* SOCKET: use page-trading */
/*
* We only want to call vm_pgmoveco() on
* disposeable pages, since it gives the
* kernel page to the userland process.
*/
error = vm_pgmoveco(&curproc->p_vmspace->vm_map,
(vm_offset_t)cp, (vm_offset_t)iov->iov_base);
/*
* If we get an error back, attempt
* to use copyout() instead. The
* disposable page should be freed
* automatically if we weren't able to move
* it into userland.
*/
if (error != 0)
error = copyout(cp, iov->iov_base, cnt);
} else {
error = copyout(cp, iov->iov_base, cnt);
}
} else {
error = copyin(iov->iov_base, cp, cnt);
}
return (error);
}
int
uiomoveco(void *cp, int n, struct uio *uio, int disposable)
{
struct iovec *iov;
u_int cnt;
int error;
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomoveco: mode"));
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
("uiomoveco proc"));
while (n > 0 && uio->uio_resid) {
iov = uio->uio_iov;
cnt = iov->iov_len;
if (cnt == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
continue;
}
if (cnt > n)
cnt = n;
switch (uio->uio_segflg) {
case UIO_USERSPACE:
maybe_yield();
error = userspaceco(cp, cnt, uio, disposable);
if (error)
return (error);
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
bcopy(cp, iov->iov_base, cnt);
else
bcopy(iov->iov_base, cp, cnt);
break;
case UIO_NOCOPY:
break;
}
iov->iov_base = (char *)iov->iov_base + cnt;
iov->iov_len -= cnt;
uio->uio_resid -= cnt;
uio->uio_offset += cnt;
cp = (char *)cp + cnt;
n -= cnt;
}
return (0);
}
#endif /* SOCKET_RECV_PFLIP */
/*
* Give next character to user as result of read.
*/
int
ureadc(int c, struct uio *uio)
{
struct iovec *iov;
char *iov_base;
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling ureadc()");
again:
if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
panic("ureadc");
iov = uio->uio_iov;
if (iov->iov_len == 0) {
uio->uio_iovcnt--;
uio->uio_iov++;
goto again;
}
switch (uio->uio_segflg) {
case UIO_USERSPACE:
if (subyte(iov->iov_base, c) < 0)
return (EFAULT);
break;
case UIO_SYSSPACE:
iov_base = iov->iov_base;
*iov_base = c;
break;
case UIO_NOCOPY:
break;
}
iov->iov_base = (char *)iov->iov_base + 1;
iov->iov_len--;
uio->uio_resid--;
uio->uio_offset++;
return (0);
}
int
copyinfrom(const void * __restrict src, void * __restrict dst, size_t len,
int seg)
{
int error = 0;
switch (seg) {
case UIO_USERSPACE:
error = copyin(src, dst, len);
break;
case UIO_SYSSPACE:
bcopy(src, dst, len);
break;
default:
panic("copyinfrom: bad seg %d\n", seg);
}
return (error);
}
int
copyinstrfrom(const void * __restrict src, void * __restrict dst, size_t len,
size_t * __restrict copied, int seg)
{
int error = 0;
switch (seg) {
case UIO_USERSPACE:
error = copyinstr(src, dst, len, copied);
break;
case UIO_SYSSPACE:
error = copystr(src, dst, len, copied);
break;
default:
panic("copyinstrfrom: bad seg %d\n", seg);
}
return (error);
}
int
copyiniov(const struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
{
u_int iovlen;
*iov = NULL;
if (iovcnt > UIO_MAXIOV)
return (error);
iovlen = iovcnt * sizeof (struct iovec);
*iov = malloc(iovlen, M_IOV, M_WAITOK);
error = copyin(iovp, *iov, iovlen);
if (error) {
free(*iov, M_IOV);
*iov = NULL;
}
return (error);
}
int
copyinuio(const struct iovec *iovp, u_int iovcnt, struct uio **uiop)
{
struct iovec *iov;
struct uio *uio;
u_int iovlen;
int error, i;
*uiop = NULL;
if (iovcnt > UIO_MAXIOV)
return (EINVAL);
iovlen = iovcnt * sizeof (struct iovec);
uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
iov = (struct iovec *)(uio + 1);
error = copyin(iovp, iov, iovlen);
if (error) {
free(uio, M_IOV);
return (error);
}
uio->uio_iov = iov;
uio->uio_iovcnt = iovcnt;
uio->uio_segflg = UIO_USERSPACE;
uio->uio_offset = -1;
uio->uio_resid = 0;
for (i = 0; i < iovcnt; i++) {
if (iov->iov_len > IOSIZE_MAX - uio->uio_resid) {
free(uio, M_IOV);
return (EINVAL);
}
uio->uio_resid += iov->iov_len;
iov++;
}
*uiop = uio;
return (0);
}
struct uio *
cloneuio(struct uio *uiop)
{
struct uio *uio;
int iovlen;
iovlen = uiop->uio_iovcnt * sizeof (struct iovec);
uio = malloc(iovlen + sizeof *uio, M_IOV, M_WAITOK);
*uio = *uiop;
uio->uio_iov = (struct iovec *)(uio + 1);
bcopy(uiop->uio_iov, uio->uio_iov, iovlen);
return (uio);
}
/*
* Map some anonymous memory in user space of size sz, rounded up to the page
* boundary.
*/
int
copyout_map(struct thread *td, vm_offset_t *addr, size_t sz)
{
struct vmspace *vms;
int error;
vm_size_t size;
vms = td->td_proc->p_vmspace;
/*
* Map somewhere after heap in process memory.
*/
PROC_LOCK(td->td_proc);
*addr = round_page((vm_offset_t)vms->vm_daddr +
lim_max(td->td_proc, RLIMIT_DATA));
PROC_UNLOCK(td->td_proc);
/* round size up to page boundry */
size = (vm_size_t)round_page(sz);
error = vm_mmap(&vms->vm_map, addr, size, PROT_READ | PROT_WRITE,
VM_PROT_ALL, MAP_PRIVATE | MAP_ANON, OBJT_DEFAULT, NULL, 0);
return (error);
}
/*
* Unmap memory in user space.
*/
int
copyout_unmap(struct thread *td, vm_offset_t addr, size_t sz)
{
vm_map_t map;
vm_size_t size;
if (sz == 0)
return (0);
map = &td->td_proc->p_vmspace->vm_map;
size = (vm_size_t)round_page(sz);
if (vm_map_remove(map, addr, addr + size) != KERN_SUCCESS)
return (EINVAL);
return (0);
}