mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-12 09:58:36 +00:00
Make vm_page's PG_ZERO flag immutable between the time of the page's
allocation and deallocation. This flag's principal use is shortly after allocation. For such cases, clearing the flag is pointless. The only unusual use of PG_ZERO is in vfs_bio_clrbuf(). However, allocbuf() never requests a prezeroed page. So, vfs_bio_clrbuf() never sees a prezeroed page. Reviewed by: tegge@
This commit is contained in:
parent
2fb3498126
commit
5a32489377
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=128992
@ -464,8 +464,6 @@ nwfs_getpages(ap)
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
m = pages[i];
|
||||
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
||||
if (nextoff <= size) {
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
m->dirty = 0;
|
||||
|
@ -517,8 +517,6 @@ smbfs_getpages(ap)
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
m = pages[i];
|
||||
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
||||
if (nextoff <= size) {
|
||||
/*
|
||||
* Read operation filled an entire page
|
||||
|
@ -780,8 +780,6 @@ spec_getpages(ap)
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
m = ap->a_m[i];
|
||||
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
||||
if (nextoff <= nread) {
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_undirty(m);
|
||||
|
@ -1893,7 +1893,6 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
|
||||
if (error)
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(pg, PG_ZERO);
|
||||
vm_page_io_finish(pg);
|
||||
mbstat.sf_iocnt++;
|
||||
}
|
||||
|
@ -1336,9 +1336,6 @@ brelse(struct buf * bp)
|
||||
int had_bogus = 0;
|
||||
|
||||
m = bp->b_pages[i];
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_unlock_queues();
|
||||
|
||||
/*
|
||||
* If we hit a bogus page, fixup *all* the bogus pages
|
||||
@ -1582,7 +1579,6 @@ vfs_vmio_release(bp)
|
||||
continue;
|
||||
|
||||
if (m->wire_count == 0) {
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
/*
|
||||
* Might as well free the page if we can and it has
|
||||
* no valid data. We also free the page if the
|
||||
@ -2326,10 +2322,8 @@ vfs_setdirty(struct buf *bp)
|
||||
* test the pages to see if they have been modified directly
|
||||
* by users through the VM system.
|
||||
*/
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
|
||||
for (i = 0; i < bp->b_npages; i++)
|
||||
vm_page_test_dirty(bp->b_pages[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the encompassing dirty range, boffset and eoffset,
|
||||
@ -2919,7 +2913,6 @@ allocbuf(struct buf *bp, int size)
|
||||
(cnt.v_free_min + cnt.v_cache_min))) {
|
||||
pagedaemon_wakeup();
|
||||
}
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_wire(m);
|
||||
vm_page_unlock_queues();
|
||||
bp->b_pages[bp->b_npages] = m;
|
||||
@ -3233,7 +3226,6 @@ bufdone(struct buf *bp)
|
||||
if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
|
||||
vfs_page_set_valid(bp, foff, i, m);
|
||||
}
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
|
||||
/*
|
||||
* when debugging new filesystems or buffer I/O methods, this
|
||||
@ -3316,7 +3308,6 @@ vfs_unbusy_pages(struct buf * bp)
|
||||
pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
|
||||
}
|
||||
vm_object_pip_subtract(obj, 1);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_io_finish(m);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
@ -3402,7 +3393,6 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
|
||||
for (i = 0; i < bp->b_npages; i++) {
|
||||
vm_page_t m = bp->b_pages[i];
|
||||
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
if ((bp->b_flags & B_CLUSTER) == 0) {
|
||||
vm_object_pip_add(obj, 1);
|
||||
vm_page_io_start(m);
|
||||
@ -3579,9 +3569,6 @@ vfs_bio_clrbuf(struct buf *bp)
|
||||
}
|
||||
}
|
||||
bp->b_pages[i]->valid |= mask;
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
unlock:
|
||||
VM_OBJECT_UNLOCK(bp->b_object);
|
||||
|
@ -215,8 +215,6 @@ nfs_getpages(struct vop_getpages_args *ap)
|
||||
nextoff = toff + PAGE_SIZE;
|
||||
m = pages[i];
|
||||
|
||||
m->flags &= ~PG_ZERO;
|
||||
|
||||
if (nextoff <= size) {
|
||||
/*
|
||||
* Read operation filled an entire page
|
||||
|
@ -1483,7 +1483,6 @@ swp_pager_async_iodone(struct buf *bp)
|
||||
* interrupt.
|
||||
*/
|
||||
m->valid = 0;
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
if (i != bp->b_pager.pg_reqpage)
|
||||
vm_page_free(m);
|
||||
else
|
||||
@ -1516,8 +1515,6 @@ swp_pager_async_iodone(struct buf *bp)
|
||||
* that existed in the old swapper for a time before
|
||||
* it got ripped out due to precisely this problem.
|
||||
*
|
||||
* clear PG_ZERO in page.
|
||||
*
|
||||
* If not the requested page then deactivate it.
|
||||
*
|
||||
* Note that the requested page, reqpage, is left
|
||||
@ -1529,7 +1526,6 @@ swp_pager_async_iodone(struct buf *bp)
|
||||
pmap_clear_modify(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
vm_page_undirty(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
|
||||
/*
|
||||
* We have to wake specifically requested pages
|
||||
|
@ -898,7 +898,6 @@ RetryFault:;
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_clear(fs.m, PG_ZERO);
|
||||
vm_page_flag_set(fs.m, PG_REFERENCED);
|
||||
|
||||
/*
|
||||
|
@ -513,7 +513,6 @@ vnode_pager_input_smlfs(object, m)
|
||||
sf_buf_free(sf);
|
||||
vm_page_lock_queues();
|
||||
pmap_clear_modify(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_unlock_queues();
|
||||
if (error) {
|
||||
return VM_PAGER_ERROR;
|
||||
@ -586,7 +585,6 @@ vnode_pager_input_old(object, m)
|
||||
vm_page_lock_queues();
|
||||
pmap_clear_modify(m);
|
||||
vm_page_undirty(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_unlock_queues();
|
||||
if (!error)
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
@ -884,7 +882,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
||||
/* vm_page_zero_invalid(mt, FALSE); */
|
||||
}
|
||||
|
||||
vm_page_flag_clear(mt, PG_ZERO);
|
||||
if (i != reqpage) {
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user