1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-13 10:02:38 +00:00

Move what remains of vm/vm_contig.c into vm/vm_pageout.c, where similar

code resides.  Rename vm_contig_grow_cache() to vm_pageout_grow_cache().

Reviewed by:	kib
This commit is contained in:
Alan Cox 2012-07-18 05:21:34 +00:00
parent edad9799e8
commit 85eeca35b9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=238561
6 changed files with 142 additions and 241 deletions

View File

@ -3551,7 +3551,6 @@ vm/sg_pager.c standard
vm/swap_pager.c standard
vm/uma_core.c standard
vm/uma_dbg.c standard
vm/vm_contig.c standard
vm/memguard.c optional DEBUG_MEMGUARD
vm/vm_fault.c standard
vm/vm_glue.c standard

View File

@ -1034,9 +1034,9 @@ pmap_grow_direct_page_cache()
{
#ifdef __mips_n64
vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
#else
vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
#endif
}

View File

@ -1,231 +0,0 @@
/*-
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* The Mach Operating System project at Carnegie-Mellon University.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
*/
/*-
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
* All rights reserved.
*
* Authors: Avadis Tevanian, Jr., Michael Wayne Young
*
* Permission to use, copy, modify and distribute this software and
* its documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
static int
vm_contig_launder_page(vm_page_t m, vm_page_t *next, int tries)
{
vm_object_t object;
vm_page_t m_tmp;
struct vnode *vp;
struct mount *mp;
int vfslocked;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (!vm_pageout_page_lock(m, next) || m->hold_count != 0) {
vm_page_unlock(m);
return (EAGAIN);
}
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, next) || m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
if (tries == 0) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
vm_page_sleep(m, "vpctw0");
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
return (EBUSY);
}
vm_page_test_dirty(m);
if (m->dirty == 0)
pmap_remove_all(m);
if (m->dirty != 0) {
vm_page_unlock(m);
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
if (object->type == OBJT_VNODE) {
vm_page_unlock_queues();
vp = object->handle;
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
(void) vn_start_write(vp, &mp, V_WAIT);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0);
VFS_UNLOCK_GIANT(vfslocked);
vm_object_deallocate(object);
vn_finished_write(mp);
vm_page_lock_queues();
return (0);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
vm_page_unlock_queues();
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC, 0,
NULL, NULL);
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
return (0);
}
} else {
vm_page_cache(m);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
return (EAGAIN);
}
static int
vm_contig_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
{
vm_page_t m, next;
vm_paddr_t pa;
int error;
TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
KASSERT(m->queue == queue,
("vm_contig_launder: page %p's queue is not %d", m, queue));
if ((m->flags & PG_MARKER) != 0)
continue;
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + PAGE_SIZE > high)
continue;
error = vm_contig_launder_page(m, &next, tries);
if (error == 0)
return (TRUE);
if (error == EBUSY)
return (FALSE);
}
return (FALSE);
}
/*
* Increase the number of cached pages. The specified value, "tries",
* determines which categories of pages are cached:
*
* 0: All clean, inactive pages within the specified physical address range
* are cached. Will not sleep.
* 1: The vm_lowmem handlers are called. All inactive pages within
* the specified physical address range are cached. May sleep.
* 2: The vm_lowmem handlers are called. All inactive and active pages
* within the specified physical address range are cached. May sleep.
*/
void
vm_contig_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
{
int actl, actmax, inactl, inactmax;
if (tries > 0) {
/*
* Decrease registered cache sizes. The vm_lowmem handlers
* may acquire locks and/or sleep, so they can only be invoked
* when "tries" is greater than zero.
*/
EVENTHANDLER_INVOKE(vm_lowmem, 0);
/*
* We do this explicitly after the caches have been drained
* above.
*/
uma_reclaim();
}
vm_page_lock_queues();
inactl = 0;
inactmax = cnt.v_inactive_count;
actl = 0;
actmax = tries < 2 ? 0 : cnt.v_active_count;
again:
if (inactl < inactmax && vm_contig_launder(PQ_INACTIVE, tries, low,
high)) {
inactl++;
goto again;
}
if (actl < actmax && vm_contig_launder(PQ_ACTIVE, tries, low, high)) {
actl++;
goto again;
}
vm_page_unlock_queues();
}

View File

@ -239,7 +239,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
VM_OBJECT_UNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_contig_grow_cache(tries, low, high);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
VM_OBJECT_LOCK(object);
tries++;
@ -313,7 +313,7 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
VM_OBJECT_UNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
vm_map_unlock(map);
vm_contig_grow_cache(tries, low, high);
vm_pageout_grow_cache(tries, low, high);
vm_map_lock(map);
VM_OBJECT_LOCK(object);
tries++;

View File

@ -209,11 +209,14 @@ int vm_page_max_wired; /* XXX max # of wired pages system-wide */
SYSCTL_INT(_vm, OID_AUTO, max_wired,
CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page count");
static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
static boolean_t vm_pageout_launder(int, int, vm_paddr_t, vm_paddr_t);
#if !defined(NO_SWAPPING)
static void vm_pageout_map_deactivate_pages(vm_map_t, long);
static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
static void vm_req_vmdaemon(int req);
#endif
static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
static void vm_pageout_page_stats(void);
/*
@ -247,7 +250,7 @@ vm_pageout_init_marker(vm_page_t marker, u_short queue)
* This function depends on both the lock portion of struct vm_object
* and normal struct vm_page being type stable.
*/
boolean_t
static boolean_t
vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
{
struct vm_page marker;
@ -286,7 +289,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
*
* This function depends on normal struct vm_page being type stable.
*/
boolean_t
static boolean_t
vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
{
struct vm_page marker;
@ -558,6 +561,138 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
return (numpagedout);
}
static boolean_t
vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
{
struct mount *mp;
struct vnode *vp;
vm_object_t object;
vm_paddr_t pa;
vm_page_t m, m_tmp, next;
int vfslocked;
vm_page_lock_queues();
TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
KASSERT(m->queue == queue,
("vm_pageout_launder: page %p's queue is not %d", m,
queue));
if ((m->flags & PG_MARKER) != 0)
continue;
pa = VM_PAGE_TO_PHYS(m);
if (pa < low || pa + PAGE_SIZE > high)
continue;
if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
vm_page_unlock(m);
continue;
}
object = m->object;
if (!VM_OBJECT_TRYLOCK(object) &&
(!vm_pageout_fallback_object_lock(m, &next) ||
m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
continue;
}
if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
if (tries == 0) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
continue;
}
vm_page_sleep(m, "vpctw0");
VM_OBJECT_UNLOCK(object);
return (FALSE);
}
vm_page_test_dirty(m);
if (m->dirty == 0)
pmap_remove_all(m);
if (m->dirty != 0) {
vm_page_unlock(m);
if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
VM_OBJECT_UNLOCK(object);
continue;
}
if (object->type == OBJT_VNODE) {
vm_page_unlock_queues();
vp = object->handle;
vm_object_reference_locked(object);
VM_OBJECT_UNLOCK(object);
(void)vn_start_write(vp, &mp, V_WAIT);
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0);
VFS_UNLOCK_GIANT(vfslocked);
vm_object_deallocate(object);
vn_finished_write(mp);
return (TRUE);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
vm_page_unlock_queues();
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
0, NULL, NULL);
VM_OBJECT_UNLOCK(object);
return (TRUE);
}
} else {
vm_page_cache(m);
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
}
vm_page_unlock_queues();
return (FALSE);
}
/*
* Increase the number of cached pages. The specified value, "tries",
* determines which categories of pages are cached:
*
* 0: All clean, inactive pages within the specified physical address range
* are cached. Will not sleep.
* 1: The vm_lowmem handlers are called. All inactive pages within
* the specified physical address range are cached. May sleep.
* 2: The vm_lowmem handlers are called. All inactive and active pages
* within the specified physical address range are cached. May sleep.
*/
void
vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
{
int actl, actmax, inactl, inactmax;
if (tries > 0) {
/*
* Decrease registered cache sizes. The vm_lowmem handlers
* may acquire locks and/or sleep, so they can only be invoked
* when "tries" is greater than zero.
*/
EVENTHANDLER_INVOKE(vm_lowmem, 0);
/*
* We do this explicitly after the caches have been drained
* above.
*/
uma_reclaim();
}
inactl = 0;
inactmax = cnt.v_inactive_count;
actl = 0;
actmax = tries < 2 ? 0 : cnt.v_active_count;
again:
if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low,
high)) {
inactl++;
goto again;
}
if (actl < actmax && vm_pageout_launder(PQ_ACTIVE, tries, low, high)) {
actl++;
goto again;
}
}
#if !defined(NO_SWAPPING)
/*
* vm_pageout_object_deactivate_pages

View File

@ -101,10 +101,8 @@ extern void vm_wait(void);
extern void vm_waitpfault(void);
#ifdef _KERNEL
boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
int vm_pageout_flush(vm_page_t *, int, int, int, int *, boolean_t *);
void vm_pageout_grow_cache(int, vm_paddr_t, vm_paddr_t);
void vm_pageout_oom(int shortage);
boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
void vm_contig_grow_cache(int, vm_paddr_t, vm_paddr_t);
#endif
#endif /* _VM_VM_PAGEOUT_H_ */