1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-09 09:40:42 +00:00

Get rid of machine-dependent NBPG and replace with PAGE_SIZE.

This commit is contained in:
John Dyson 1995-10-23 05:35:48 +00:00
parent d559b36913
commit a91c5a7ecd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=11709
4 changed files with 20 additions and 20 deletions

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.27 1995/09/24 19:51:50 dyson Exp $
* $Id: vm_glue.c,v 1.28 1995/10/16 05:45:49 dyson Exp $
*/
#include <sys/param.h>
@ -213,7 +213,7 @@ vm_fork(p1, p2, isvfork)
* non-inheritable
*/
(void) vm_map_inherit(&p1->p_vmspace->vm_map,
UPT_MIN_ADDRESS - UPAGES * NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE);
UPT_MIN_ADDRESS - UPAGES * PAGE_SIZE, VM_MAX_ADDRESS, VM_INHERIT_NONE);
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
#ifdef SYSVSHM
@ -235,25 +235,25 @@ vm_fork(p1, p2, isvfork)
/* force in the page table encompassing the UPAGES */
ptaddr = trunc_page((u_int) vtopte(addr));
error = vm_map_pageable(vp, ptaddr, ptaddr + NBPG, FALSE);
error = vm_map_pageable(vp, ptaddr, ptaddr + PAGE_SIZE, FALSE);
if (error)
panic("vm_fork: wire of PT failed. error=%d", error);
/* and force in (demand-zero) the UPAGES */
error = vm_map_pageable(vp, addr, addr + UPAGES * NBPG, FALSE);
error = vm_map_pageable(vp, addr, addr + UPAGES * PAGE_SIZE, FALSE);
if (error)
panic("vm_fork: wire of UPAGES failed. error=%d", error);
/* get a kernel virtual address for the UPAGES for this proc */
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * NBPG);
up = (struct user *) kmem_alloc_pageable(u_map, UPAGES * PAGE_SIZE);
if (up == NULL)
panic("vm_fork: u_map allocation failed");
/* and force-map the upages into the kernel pmap */
for (i = 0; i < UPAGES; i++)
pmap_enter(vm_map_pmap(u_map),
((vm_offset_t) up) + NBPG * i,
pmap_extract(vp->pmap, addr + NBPG * i),
((vm_offset_t) up) + PAGE_SIZE * i,
pmap_extract(vp->pmap, addr + PAGE_SIZE * i),
VM_PROT_READ | VM_PROT_WRITE, 1);
p2->p_addr = up;
@ -330,19 +330,19 @@ faultin(p)
map = &p->p_vmspace->vm_map;
/* force the page table encompassing the kernel stack (upages) */
ptaddr = trunc_page((u_int) vtopte(kstack));
error = vm_map_pageable(map, ptaddr, ptaddr + NBPG, FALSE);
error = vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, FALSE);
if (error)
panic("faultin: wire of PT failed. error=%d", error);
/* wire in the UPAGES */
error = vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, FALSE);
(vm_offset_t) kstack + UPAGES * PAGE_SIZE, FALSE);
if (error)
panic("faultin: wire of UPAGES failed. error=%d", error);
/* and map them nicely into the kernel pmap */
for (i = 0; i < UPAGES; i++) {
vm_offset_t off = i * NBPG;
vm_offset_t off = i * PAGE_SIZE;
vm_offset_t pa = (vm_offset_t)
pmap_extract(&p->p_vmspace->vm_pmap,
(vm_offset_t) kstack + off);
@ -525,13 +525,13 @@ swapout(p)
* let the upages be paged
*/
pmap_remove(vm_map_pmap(u_map),
(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * NBPG);
(vm_offset_t) p->p_addr, ((vm_offset_t) p->p_addr) + UPAGES * PAGE_SIZE);
vm_map_pageable(map, (vm_offset_t) kstack,
(vm_offset_t) kstack + UPAGES * NBPG, TRUE);
(vm_offset_t) kstack + UPAGES * PAGE_SIZE, TRUE);
ptaddr = trunc_page((u_int) vtopte(kstack));
vm_map_pageable(map, ptaddr, ptaddr + NBPG, TRUE);
vm_map_pageable(map, ptaddr, ptaddr + PAGE_SIZE, TRUE);
p->p_flag &= ~P_SWAPPING;
p->p_swtime = 0;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.23 1995/07/13 08:48:26 davidg Exp $
* $Id: vm_map.c,v 1.24 1995/08/26 23:18:38 bde Exp $
*/
/*
@ -325,7 +325,7 @@ vm_map_entry_create(map)
if (m) {
int newentries;
newentries = (NBPG / sizeof(struct vm_map_entry));
newentries = (PAGE_SIZE / sizeof(struct vm_map_entry));
vm_page_wire(m);
m->flags &= ~PG_BUSY;
m->valid = VM_PAGE_BITS_ALL;
@ -334,7 +334,7 @@ vm_map_entry_create(map)
m->flags |= PG_WRITEABLE|PG_MAPPED;
entry = (vm_map_entry_t) mapvm;
mapvm += NBPG;
mapvm += PAGE_SIZE;
--mapvmpgcnt;
for (i = 0; i < newentries; i++) {

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.35 1995/09/03 19:57:25 dyson Exp $
* $Id: vm_page.c,v 1.36 1995/09/03 20:40:43 dyson Exp $
*/
/*
@ -344,7 +344,7 @@ vm_page_hash(object, offset)
vm_object_t object;
vm_offset_t offset;
{
return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
return ((unsigned) object + (offset >> PAGE_SHIFT)) & vm_page_hash_mask;
}
/*

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.56 1995/10/06 09:42:11 phk Exp $
* $Id: vm_pageout.c,v 1.57 1995/10/07 19:02:55 davidg Exp $
*/
/*
@ -935,7 +935,7 @@ vm_daemon()
if ((p->p_flag & P_INMEM) == 0)
limit = 0; /* XXX */
size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG;
size = p->p_vmspace->vm_pmap.pm_stats.resident_count * PAGE_SIZE;
if (limit >= 0 && size >= limit) {
overage = (size - limit) >> PAGE_SHIFT;
vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map,