1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-10-19 02:29:40 +00:00

Added ability to detect sequential faults and DTRT. (swap_pager.c)

Added hook for pmap_prefault() and use symbolic constant for new third
argument to vm_page_alloc() (vm_fault.c, various)
Changed the way that upages and page tables are held. (vm_glue.c)
Fixed architectural flaw in allocating pages at interrupt time that was
introduced with the merged cache changes. (vm_page.c, various)
Adjusted some algorithms to acheive better paging performance and to
accomodate the fix for the architectural flaw mentioned above. (vm_pageout.c)
Fixed pbuf handling problem, changed policy on handling read-behind page.
(vnode_pager.c)

Submitted by:	John Dyson
This commit is contained in:
David Greenman 1995-01-24 10:14:09 +00:00
parent e5501f43a5
commit 6d40c3d394
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=5841
11 changed files with 105 additions and 78 deletions

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.22 1995/01/09 16:05:33 davidg Exp $
* $Id: swap_pager.c,v 1.23 1995/01/10 07:32:43 davidg Exp $
*/
/*
@ -894,6 +894,7 @@ swap_pager_input(swp, m, count, reqpage)
vm_offset_t paging_offset;
vm_object_t object;
int reqaddr[count];
int sequential;
int first, last;
int failed;
@ -901,6 +902,7 @@ swap_pager_input(swp, m, count, reqpage)
object = m[reqpage]->object;
paging_offset = object->paging_offset;
sequential = (m[reqpage]->offset == (object->last_read + PAGE_SIZE));
/*
* First determine if the page exists in the pager if this is a sync
* read. This quickly handles cases where we are following shadow
@ -947,7 +949,7 @@ swap_pager_input(swp, m, count, reqpage)
failed = 0;
first = 0;
for (i = reqpage - 1; i >= 0; --i) {
if (failed || (reqaddr[i] == SWB_EMPTY) ||
if (sequential || failed || (reqaddr[i] == SWB_EMPTY) ||
(swb[i]->swb_valid & (1 << off[i])) == 0 ||
(reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
((reqaddr[i] / dmmax) != reqdskregion)) {
@ -1105,6 +1107,7 @@ swap_pager_input(swp, m, count, reqpage)
pmap_qremove(kva, count);
if (spc) {
m[reqpage]->object->last_read = m[reqpage]->offset;
if (bp->b_flags & B_WANTED)
wakeup((caddr_t) bp);
/*
@ -1141,9 +1144,11 @@ swap_pager_input(swp, m, count, reqpage)
* results, it is best to deactivate
* the readahead pages.
*/
if ((i == reqpage - 1) || (i == reqpage + 1))
/*
if (sequential || (i == reqpage - 1) || (i == reqpage + 1))
vm_page_activate(m[i]);
else
*/
vm_page_deactivate(m[i]);
/*
@ -1155,6 +1160,9 @@ swap_pager_input(swp, m, count, reqpage)
PAGE_WAKEUP(m[i]);
}
}
m[reqpage]->object->last_read = m[count-1]->offset;
/*
* If we're out of swap space, then attempt to free
* some whenever pages are brought in. We must clear

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.14 1995/01/09 16:05:39 davidg Exp $
* $Id: vm_fault.c,v 1.15 1995/01/10 07:32:45 davidg Exp $
*/
/*
@ -320,7 +320,7 @@ RetryFault:;
* Allocate a new page for this object/offset pair.
*/
m = vm_page_alloc(object, offset, 0);
m = vm_page_alloc(object, offset, VM_ALLOC_NORMAL);
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
@ -655,7 +655,7 @@ RetryFault:;
* that the copy_object's pager doesn't have
* the page...
*/
copy_m = vm_page_alloc(copy_object, copy_offset, 0);
copy_m = vm_page_alloc(copy_object, copy_offset, VM_ALLOC_NORMAL);
if (copy_m == NULL) {
/*
* Wait for a page, then retry.
@ -839,6 +839,8 @@ RetryFault:;
*/
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
if( ((prot & VM_PROT_WRITE) == 0) && change_wiring == 0 && wired == 0)
pmap_prefault(map->pmap, vaddr, entry, first_object);
/*
* If the page is not wired down, then put it where the pageout daemon
@ -905,6 +907,11 @@ vm_fault_wire(map, start, end)
*/
for (va = start; va < end; va += PAGE_SIZE) {
if( curproc != pageproc &&
(cnt.v_free_count <= cnt.v_pageout_free_min))
VM_WAIT;
rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
if (rv) {
if (va != start)
@ -1019,7 +1026,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
*/
vm_object_lock(dst_object);
do {
dst_m = vm_page_alloc(dst_object, dst_offset, 0);
dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL);
if (dst_m == NULL) {
vm_object_unlock(dst_object);
VM_WAIT;
@ -1215,7 +1222,7 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
*/
for (i = 0; i < size; i++) {
if (i != treqpage)
rtm = vm_page_alloc(object, startoffset + i * NBPG, 0);
rtm = vm_page_alloc(object, startoffset + i * NBPG, VM_ALLOC_NORMAL);
else
rtm = m;
marray[i] = rtm;

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.11 1995/01/09 16:05:40 davidg Exp $
* $Id: vm_glue.c,v 1.12 1995/01/10 07:32:45 davidg Exp $
*/
#include <sys/param.h>
@ -236,12 +236,6 @@ vm_fork(p1, p2, isvfork)
pmap_extract(vp->pmap, addr + NBPG * i),
VM_PROT_READ | VM_PROT_WRITE, 1);
/*
* and allow the UPAGES page table entry to be paged (at the vm system
* level)
*/
vm_map_pageable(vp, ptaddr, ptaddr + NBPG, TRUE);
p2->p_addr = up;
/*
@ -339,9 +333,6 @@ faultin(p)
pa, VM_PROT_READ | VM_PROT_WRITE, 1);
}
/* and let the page table pages go (at least above pmap level) */
vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
s = splhigh();
if (p->p_stat == SRUN)
@ -491,6 +482,7 @@ swapout(p)
register struct proc *p;
{
vm_map_t map = &p->p_vmspace->vm_map;
vm_offset_t ptaddr;
++p->p_stats->p_ru.ru_nswap;
/*
@ -515,6 +507,9 @@ swapout(p)
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
ptaddr = trunc_page((u_int) vtopte(kstack));
vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.7 1994/08/18 22:36:02 wollman Exp $
* $Id: vm_kern.c,v 1.8 1995/01/09 16:05:43 davidg Exp $
*/
/*
@ -177,7 +177,7 @@ kmem_alloc(map, size)
for (i = 0; i < size; i += PAGE_SIZE) {
vm_page_t mem;
while ((mem = vm_page_alloc(kernel_object, offset + i, 0)) == NULL) {
while ((mem = vm_page_alloc(kernel_object, offset + i, VM_ALLOC_NORMAL)) == NULL) {
vm_object_unlock(kernel_object);
VM_WAIT;
vm_object_lock(kernel_object);
@ -331,7 +331,7 @@ kmem_malloc(map, size, canwait)
*/
vm_object_lock(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_alloc(kmem_object, offset + i, 1);
m = vm_page_alloc(kmem_object, offset + i, VM_ALLOC_INTERRUPT);
/*
* Ran out of space, free everything up and return. Don't need

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.10 1995/01/09 16:05:45 davidg Exp $
* $Id: vm_map.c,v 1.11 1995/01/10 07:32:46 davidg Exp $
*/
/*
@ -313,7 +313,8 @@ vm_map_entry_create(map)
vm_page_t m;
m = vm_page_alloc(kmem_object,
mapvm - vm_map_min(kmem_map), 0);
mapvm - vm_map_min(kmem_map),
(map == kmem_map) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL);
if (m) {
int newentries;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.17 1995/01/11 20:19:20 davidg Exp $
* $Id: vm_object.c,v 1.18 1995/01/13 13:30:24 davidg Exp $
*/
/*
@ -140,6 +140,7 @@ _vm_object_allocate(size, object)
object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
object->paging_in_progress = 0;
object->copy = NULL;
object->last_read = 0;
/*
* Object starts out read-write, with no pager.

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.3 1994/11/06 05:07:52 davidg Exp $
* $Id: vm_object.h,v 1.4 1995/01/09 16:05:50 davidg Exp $
*/
/*
@ -98,6 +98,7 @@ struct vm_object {
vm_offset_t paging_offset; /* Offset into paging space */
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
vm_offset_t last_read; /* last read in object -- detect seq behavior */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.15 1995/01/10 09:19:46 davidg Exp $
* $Id: vm_page.c,v 1.16 1995/01/15 07:31:34 davidg Exp $
*/
/*
@ -601,16 +601,22 @@ vm_page_requeue(vm_page_t mem, int flags)
* Allocate and return a memory cell associated
* with this VM object/offset pair.
*
* page_req -- 0 normal process request VM_ALLOC_NORMAL
* page_req -- 1 interrupt time request VM_ALLOC_INTERRUPT
* page_req -- 2 system *really* needs a page VM_ALLOC_SYSTEM
* but *cannot* be at interrupt time
*
* Object must be locked.
*/
vm_page_t
vm_page_alloc(object, offset, inttime)
vm_page_alloc(object, offset, page_req)
vm_object_t object;
vm_offset_t offset;
int inttime;
int page_req;
{
register vm_page_t mem;
int s;
int msgflg;
simple_lock(&vm_page_queue_free_lock);
@ -625,22 +631,20 @@ vm_page_alloc(object, offset, inttime)
splx(s);
return (NULL);
}
if (inttime) {
if (page_req == VM_ALLOC_INTERRUPT) {
if ((mem = vm_page_queue_free.tqh_first) == 0) {
for (mem = vm_page_queue_cache.tqh_first; mem; mem = mem->pageq.tqe_next) {
if ((mem->object->flags & OBJ_ILOCKED) == 0) {
TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
vm_page_remove(mem);
cnt.v_cache_count--;
goto gotpage;
}
}
simple_unlock(&vm_page_queue_free_lock);
splx(s);
/*
* need to wakeup at interrupt time -- it doesn't do VM_WAIT
*/
wakeup((caddr_t) &vm_pages_needed);
return NULL;
}
if( cnt.v_free_count < cnt.v_pageout_free_min)
wakeup((caddr_t) &vm_pages_needed);
} else {
if ((cnt.v_free_count < 3) ||
if ((cnt.v_free_count < cnt.v_pageout_free_min) ||
(mem = vm_page_queue_free.tqh_first) == 0) {
mem = vm_page_queue_cache.tqh_first;
if (mem) {
@ -649,9 +653,15 @@ vm_page_alloc(object, offset, inttime)
cnt.v_cache_count--;
goto gotpage;
}
simple_unlock(&vm_page_queue_free_lock);
splx(s);
return (NULL);
if( page_req == VM_ALLOC_SYSTEM) {
mem = vm_page_queue_free.tqh_first;
if( !mem) {
simple_unlock(&vm_page_queue_free_lock);
splx(s);
wakeup((caddr_t) &vm_pages_needed);
return (NULL);
}
}
}
}
@ -661,7 +671,7 @@ vm_page_alloc(object, offset, inttime)
gotpage:
simple_unlock(&vm_page_queue_free_lock);
mem->flags = PG_BUSY | PG_CLEAN;
mem->flags = PG_BUSY;
mem->wire_count = 0;
mem->hold_count = 0;
mem->act_count = 0;
@ -680,7 +690,8 @@ vm_page_alloc(object, offset, inttime)
* we would be nearly out of memory.
*/
if (curproc != pageproc &&
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min))
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
(cnt.v_free_count < cnt.v_pageout_free_min))
wakeup((caddr_t) &vm_pages_needed);
return (mem);
@ -1109,8 +1120,8 @@ void
vm_page_test_dirty(m)
vm_page_t m;
{
if ((!m->dirty || (m->dirty != vm_page_bits(0, PAGE_SIZE))) &&
pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
if ((m->dirty != VM_PAGE_BITS_ALL) &&
pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = VM_PAGE_BITS_ALL;
}

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_page.h,v 1.11 1995/01/09 16:05:52 davidg Exp $
* $Id: vm_page.h,v 1.12 1995/01/10 09:19:52 davidg Exp $
*/
/*
@ -220,6 +220,9 @@ extern simple_lock_data_t vm_page_queue_free_lock; /* lock on free page queue */
#define VM_PAGE_BITS_ALL 0xffff
#endif
#define VM_ALLOC_NORMAL 0
#define VM_ALLOC_INTERRUPT 1
#define VM_ALLOC_SYSTEM 2
void vm_page_activate __P((vm_page_t));
vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t, int));

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.29 1995/01/09 16:05:53 davidg Exp $
* $Id: vm_pageout.c,v 1.30 1995/01/10 07:32:49 davidg Exp $
*/
/*
@ -341,7 +341,7 @@ vm_pageout_object_deactivate_pages(map, object, count, map_remove_only)
if (object->shadow->ref_count == 1)
dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, map_remove_only);
else
dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count / 2 + 1, 1);
vm_pageout_object_deactivate_pages(map, object->shadow, count, 1);
}
if (object->paging_in_progress || !vm_object_lock_try(object))
return dcount;
@ -518,8 +518,8 @@ vm_pageout_inactive_stats(int maxiscan)
* heuristic alert -- if a page is being re-activated,
* it probably will be used one more time...
*/
++m->act_count;
++m->act_count;
if (m->act_count < ACT_MAX)
m->act_count += ACT_ADVANCE;
}
m = next;
}
@ -574,6 +574,7 @@ vm_pageout_scan()
*/
rescan0:
vm_pageout_inactive_stats(MAXISCAN);
maxlaunder = (cnt.v_inactive_target > MAXLAUNDER) ?
MAXLAUNDER : cnt.v_inactive_target;
@ -620,17 +621,21 @@ vm_pageout_scan()
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_activate(m);
++m->act_count;
++m->act_count;
if (m->act_count < ACT_MAX)
m->act_count += ACT_ADVANCE;
m = next;
continue;
}
vm_page_test_dirty(m);
if ((m->dirty & m->valid) == 0) {
if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
(cnt.v_cache_count < cnt.v_cache_min))
if (m->valid == 0) {
pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE);
vm_page_free(m);
} else if (((cnt.v_free_count + cnt.v_cache_count) < desired_free) ||
(cnt.v_cache_count < cnt.v_cache_min)) {
vm_page_cache(m);
}
} else if (maxlaunder > 0) {
int written;
@ -684,6 +689,8 @@ vm_pageout_scan()
desired_free - (cnt.v_free_count + cnt.v_cache_count);
}
}
if( (page_shortage <= 0) && (cnt.v_free_count < cnt.v_free_min))
page_shortage = 1;
}
maxscan = cnt.v_active_count;
minscan = cnt.v_active_count;
@ -706,6 +713,8 @@ vm_pageout_scan()
(m->flags & PG_BUSY) ||
(m->hold_count != 0) ||
(m->bmapped != 0)) {
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m = next;
continue;
}
@ -725,6 +734,8 @@ vm_pageout_scan()
TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
splx(s);
} else {
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
m->act_count -= min(m->act_count, ACT_DECLINE);
/*
@ -733,10 +744,6 @@ vm_pageout_scan()
if (!m->act_count && (page_shortage > 0)) {
if (m->object->ref_count == 0) {
vm_page_test_dirty(m);
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
--page_shortage;
if ((m->dirty & m->valid) == 0) {
m->act_count = 0;
@ -745,14 +752,10 @@ vm_pageout_scan()
vm_page_deactivate(m);
}
} else {
m->flags &= ~PG_REFERENCED;
pmap_clear_reference(VM_PAGE_TO_PHYS(m));
vm_page_deactivate(m);
--page_shortage;
}
} else {
} else if (m->act_count) {
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
}
@ -764,7 +767,7 @@ vm_pageout_scan()
* We try to maintain some *really* free pages, this allows interrupt
* code to be guaranteed space.
*/
while (cnt.v_free_count < MINFREE) {
while (cnt.v_free_count < cnt.v_free_min) {
m = vm_page_queue_cache.tqh_first;
if (!m)
break;
@ -840,7 +843,7 @@ vm_pageout()
* free_reserved needs to include enough for the largest swap pager
* structures plus enough for any pv_entry structs when paging.
*/
cnt.v_pageout_free_min = 5 + cnt.v_page_count / 1024;
cnt.v_pageout_free_min = 6 + cnt.v_page_count / 1024;
cnt.v_free_reserved = cnt.v_pageout_free_min + 2;
cnt.v_free_target = 3 * cnt.v_free_min + cnt.v_free_reserved;
cnt.v_inactive_target = cnt.v_free_count / 4;
@ -926,8 +929,6 @@ vm_daemon()
size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
if (limit >= 0 && size >= limit) {
overage = (size - limit) / NBPG;
if (limit == 0)
overage += 20;
vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,
(vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages);
}

View File

@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.19 1995/01/09 16:06:01 davidg Exp $
* $Id: vnode_pager.c,v 1.20 1995/01/11 20:00:10 davidg Exp $
*/
/*
@ -881,13 +881,15 @@ vnode_pager_input(vnp, m, count, reqpage)
counta = (count - reqpage) - 1;
bpa = 0;
sizea = 0;
if (counta) {
bpa = getpbuf();
count -= counta;
sizea = size - count * PAGE_SIZE;
size = count * PAGE_SIZE;
}
bp = getpbuf();
if (counta) {
bpa = (struct buf *) trypbuf();
if (bpa) {
count -= counta;
sizea = size - count * PAGE_SIZE;
size = count * PAGE_SIZE;
}
}
kva = (vm_offset_t) bp->b_data;
/*
@ -981,10 +983,7 @@ vnode_pager_input(vnp, m, count, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
if (i != reqpage - 1)
vm_page_deactivate(m[i]);
else
vm_page_activate(m[i]);
vm_page_deactivate(m[i]);
PAGE_WAKEUP(m[i]);
} else {
vnode_pager_freepage(m[i]);