1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-30 16:51:41 +00:00

Add a helper function to the swapout daemon's deactivation code.

vm_swapout_object_deactivate_pages() is renamed to
vm_swapout_object_deactivate(), and the loop body is moved into the new
vm_swapout_object_deactivate_page().  This makes the code a bit easier
to follow and is in preparation for some functional changes.

Reviewed by:	jeff, kib
Sponsored by:	Netflix, Intel
Differential Revision:	https://reviews.freebsd.org/D22651
This commit is contained in:
Mark Johnston 2019-12-10 18:15:20 +00:00
parent 5cff1f4dc3
commit 9be9ea420e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355587

View File

@ -165,13 +165,64 @@ static int last_swapin;
static void swapclear(struct proc *);
static int swapout(struct proc *);
static void vm_swapout_map_deactivate_pages(vm_map_t, long);
static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long);
static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long);
static void swapout_procs(int action);
static void vm_req_vmdaemon(int req);
static void vm_thread_swapout(struct thread *td);
static void
vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap)
{
int act_delta;
if (vm_page_tryxbusy(m) == 0)
return;
VM_CNT_INC(v_pdpages);
/*
* The page may acquire a wiring after this check.
* The page daemon handles wired pages, so there is
* no harm done if a wiring appears while we are
* attempting to deactivate the page.
*/
if (vm_page_wired(m) || !pmap_page_exists_quick(pmap, m)) {
vm_page_xunbusy(m);
return;
}
act_delta = pmap_ts_referenced(m);
vm_page_lock(m);
if ((m->a.flags & PGA_REFERENCED) != 0) {
if (act_delta == 0)
act_delta = 1;
vm_page_aflag_clear(m, PGA_REFERENCED);
}
if (!vm_page_active(m) && act_delta != 0) {
vm_page_activate(m);
m->a.act_count += act_delta;
} else if (vm_page_active(m)) {
/*
* The page daemon does not requeue pages
* after modifying their activation count.
*/
if (act_delta == 0) {
m->a.act_count -= min(m->a.act_count, ACT_DECLINE);
if (unmap && m->a.act_count == 0) {
(void)vm_page_try_remove_all(m);
vm_page_deactivate(m);
}
} else {
vm_page_activate(m);
if (m->a.act_count < ACT_MAX - ACT_ADVANCE)
m->a.act_count += ACT_ADVANCE;
}
} else if (vm_page_inactive(m))
(void)vm_page_try_remove_all(m);
vm_page_unlock(m);
vm_page_xunbusy(m);
}
/*
* vm_swapout_object_deactivate_pages
* vm_swapout_object_deactivate
*
* Deactivate enough pages to satisfy the inactive target
* requirements.
@ -179,12 +230,12 @@ static void vm_thread_swapout(struct thread *td);
* The object and map must be locked.
*/
static void
vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_swapout_object_deactivate(pmap_t pmap, vm_object_t first_object,
long desired)
{
vm_object_t backing_object, object;
vm_page_t p;
int act_delta, remove_mode;
vm_page_t m;
bool unmap;
VM_OBJECT_ASSERT_LOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0)
@ -197,63 +248,19 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
REFCOUNT_COUNT(object->paging_in_progress) > 0)
goto unlock_return;
remove_mode = 0;
unmap = true;
if (object->shadow_count > 1)
remove_mode = 1;
unmap = false;
/*
* Scan the object's entire memory queue.
*/
TAILQ_FOREACH(p, &object->memq, listq) {
TAILQ_FOREACH(m, &object->memq, listq) {
if (pmap_resident_count(pmap) <= desired)
goto unlock_return;
if (should_yield())
goto unlock_return;
if (vm_page_tryxbusy(p) == 0)
continue;
VM_CNT_INC(v_pdpages);
/*
* The page may acquire a wiring after this check.
* The page daemon handles wired pages, so there is
* no harm done if a wiring appears while we are
* attempting to deactivate the page.
*/
if (vm_page_wired(p) || !pmap_page_exists_quick(pmap, p)) {
vm_page_xunbusy(p);
continue;
}
act_delta = pmap_ts_referenced(p);
vm_page_lock(p);
if ((p->a.flags & PGA_REFERENCED) != 0) {
if (act_delta == 0)
act_delta = 1;
vm_page_aflag_clear(p, PGA_REFERENCED);
}
if (!vm_page_active(p) && act_delta != 0) {
vm_page_activate(p);
p->a.act_count += act_delta;
} else if (vm_page_active(p)) {
/*
* The page daemon does not requeue pages
* after modifying their activation count.
*/
if (act_delta == 0) {
p->a.act_count -= min(p->a.act_count,
ACT_DECLINE);
if (!remove_mode && p->a.act_count == 0) {
(void)vm_page_try_remove_all(p);
vm_page_deactivate(p);
}
} else {
vm_page_activate(p);
if (p->a.act_count < ACT_MAX -
ACT_ADVANCE)
p->a.act_count += ACT_ADVANCE;
}
} else if (vm_page_inactive(p))
(void)vm_page_try_remove_all(p);
vm_page_unlock(p);
vm_page_xunbusy(p);
vm_swapout_object_deactivate_page(pmap, m, unmap);
}
if ((backing_object = object->backing_object) == NULL)
goto unlock_return;
@ -307,7 +314,7 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
}
if (bigobj != NULL) {
vm_swapout_object_deactivate_pages(map->pmap, bigobj, desired);
vm_swapout_object_deactivate(map->pmap, bigobj, desired);
VM_OBJECT_RUNLOCK(bigobj);
}
/*
@ -321,8 +328,8 @@ vm_swapout_map_deactivate_pages(vm_map_t map, long desired)
obj = tmpe->object.vm_object;
if (obj != NULL) {
VM_OBJECT_RLOCK(obj);
vm_swapout_object_deactivate_pages(map->pmap,
obj, desired);
vm_swapout_object_deactivate(map->pmap, obj,
desired);
VM_OBJECT_RUNLOCK(obj);
}
}