1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-12 09:58:36 +00:00

Revert "vm_pageout_scans: correct detection of active object"

This reverts commit 3de96d664a.

Problem is that it is possible to reach the state with ref_count ==
1 for the mapped non-anonymous object. For instance, anonymous posix
shmfd or linux shmfs object could be mapped, and then corresponding
file descriptor closed, dropping the object reference owned by the
shmfd/shmfs file.  Then the check in inactive scan assumes that the
object and page are not mapped and frees the page, while they are not.

PR:	261707
Discussed with:	markj
Sponsored by:	The FreeBSD Foundation
MFC after:	now
This commit is contained in:
Konstantin Belousov 2022-02-10 16:50:42 +02:00
parent 79f5d19890
commit b51927b7b0

View File

@ -712,38 +712,6 @@ vm_pageout_clean(vm_page_t m, int *numpagedout)
return (error);
}
/*
* Check if the object is active. Non-anonymous swap objects are
* always referenced by the owner, for them require ref_count > 1 in
* order to ignore the ownership ref.
*
* Perform an unsynchronized object ref count check. While
* the page lock ensures that the page is not reallocated to
* another object, in particular, one with unmanaged mappings
* that cannot support pmap_ts_referenced(), two races are,
* nonetheless, possible:
* 1) The count was transitioning to zero, but we saw a non-
* zero value. pmap_ts_referenced() will return zero
* because the page is not mapped.
* 2) The count was transitioning to one, but we saw zero.
* This race delays the detection of a new reference. At
* worst, we will deactivate and reactivate the page.
*/
static bool
vm_pageout_object_act(vm_object_t object)
{
return (object->ref_count >
((object->flags & (OBJ_SWAP | OBJ_ANON)) == OBJ_SWAP ? 1 : 0));
}
static int
vm_pageout_page_ts_referenced(vm_object_t object, vm_page_t m)
{
if (!vm_pageout_object_act(object))
return (0);
return (pmap_ts_referenced(m));
}
/*
* Attempt to launder the specified number of pages.
*
@ -838,7 +806,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
if (vm_page_none_valid(m))
goto free_page;
refs = vm_pageout_page_ts_referenced(object, m);
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
for (old = vm_page_astate_load(m);;) {
/*
@ -858,7 +826,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
}
if (act_delta == 0) {
;
} else if (vm_pageout_object_act(object)) {
} else if (object->ref_count != 0) {
/*
* Increase the activation count if the page was
* referenced while in the laundry queue. This
@ -1295,8 +1263,20 @@ vm_pageout_scan_active(struct vm_domain *vmd, int page_shortage)
* Test PGA_REFERENCED after calling pmap_ts_referenced() so
* that a reference from a concurrently destroyed mapping is
* observed here and now.
*
* Perform an unsynchronized object ref count check. While
* the page lock ensures that the page is not reallocated to
* another object, in particular, one with unmanaged mappings
* that cannot support pmap_ts_referenced(), two races are,
* nonetheless, possible:
* 1) The count was transitioning to zero, but we saw a non-
* zero value. pmap_ts_referenced() will return zero
* because the page is not mapped.
* 2) The count was transitioning to one, but we saw zero.
* This race delays the detection of a new reference. At
* worst, we will deactivate and reactivate the page.
*/
refs = vm_pageout_page_ts_referenced(object, m);
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
old = vm_page_astate_load(m);
do {
@ -1546,7 +1526,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
if (vm_page_none_valid(m))
goto free_page;
refs = vm_pageout_page_ts_referenced(object, m);
refs = object->ref_count != 0 ? pmap_ts_referenced(m) : 0;
for (old = vm_page_astate_load(m);;) {
/*
@ -1566,7 +1546,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
}
if (act_delta == 0) {
;
} else if (vm_pageout_object_act(object)) {
} else if (object->ref_count != 0) {
/*
* Increase the activation count if the
* page was referenced while in the
@ -1604,7 +1584,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int page_shortage)
* mappings allow write access, then the page may still be
* modified until the last of those mappings are removed.
*/
if (vm_pageout_object_act(object)) {
if (object->ref_count != 0) {
vm_page_test_dirty(m);
if (m->dirty == 0 && !vm_page_try_remove_all(m))
goto skip_page;