1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-05 09:14:03 +00:00

Finalize GETPAGES layering scheme. Move the device GETPAGES

interface into specfs code.  No need at this point to modify the
PUTPAGES stuff except in the layered-type (NULL/UNION) filesystems.
This commit is contained in:
John Dyson 1995-10-23 02:23:29 +00:00
parent 9d45c3af90
commit 2c4488fce3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=11701
8 changed files with 337 additions and 145 deletions

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)cd9660_vnops.c 8.3 (Berkeley) 1/23/94
* $Id: cd9660_vnops.c,v 1.15 1995/08/02 13:00:40 dfr Exp $
* $Id: cd9660_vnops.c,v 1.16 1995/09/04 00:20:05 dyson Exp $
*/
#include <sys/param.h>
@ -1007,6 +1007,7 @@ struct vnodeopv_entry_desc cd9660_specop_entries[] = {
{ &vop_vfree_desc, spec_vfree }, /* vfree */
{ &vop_truncate_desc, spec_truncate }, /* truncate */
{ &vop_update_desc, cd9660_update }, /* update */
{ &vop_getpages_desc, spec_getpages},
{ &vop_bwrite_desc, vn_bwrite },
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.6 (Berkeley) 4/9/94
* $Id: spec_vnops.c,v 1.14 1995/09/04 00:20:37 dyson Exp $
* $Id: spec_vnops.c,v 1.15 1995/10/06 09:47:58 phk Exp $
*/
#include <sys/param.h>
@ -94,6 +94,7 @@ struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vop_truncate_desc, spec_truncate }, /* truncate */
{ &vop_update_desc, spec_update }, /* update */
{ &vop_bwrite_desc, vn_bwrite }, /* bwrite */
{ &vop_getpages_desc, spec_getpages}, /* getpages */
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};
struct vnodeopv_desc spec_vnodeop_opv_desc =
@ -101,6 +102,10 @@ struct vnodeopv_desc spec_vnodeop_opv_desc =
VNODEOP_SET(spec_vnodeop_opv_desc);
#include <vm/vm.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
/*
* Trivial lookup routine that always fails.
*/
@ -706,3 +711,122 @@ spec_badop()
panic("spec_badop called");
/* NOTREACHED */
}
static void spec_getpages_iodone(struct buf *bp) {
bp->b_flags |= B_DONE;
wakeup(bp);
}
/*
* get page routine
*/
int
spec_getpages(ap)
struct vop_getpages_args *ap;
{
vm_offset_t kva;
int i, size;
daddr_t blkno;
struct buf *bp;
int s;
int error = 0;
int pcount;
pcount = round_page(ap->a_count) / PAGE_SIZE;
/*
* calculate the size of the transfer
*/
blkno = (ap->a_m[0]->offset + ap->a_offset) / DEV_BSIZE;
/*
* round up physical size for real devices
*/
size = (ap->a_count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
/*
* and map the pages to be read into the kva
*/
pmap_qenter(kva, ap->a_m, pcount);
/* build a minimal buffer header */
bp->b_flags = B_BUSY | B_READ | B_CALL;
bp->b_iodone = spec_getpages_iodone;
/* B_PHYS is not set, but it is nice to fill this in */
bp->b_proc = curproc;
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
if (bp->b_rcred != NOCRED)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crhold(bp->b_wcred);
bp->b_blkno = blkno;
bp->b_lblkno = blkno;
pbgetvp(ap->a_vp, bp);
bp->b_bcount = size;
bp->b_bufsize = size;
cnt.v_vnodein++;
cnt.v_vnodepgsin += pcount;
/* do the input */
VOP_STRATEGY(bp);
if (bp->b_flags & B_ASYNC) {
return VM_PAGER_PEND;
}
s = splbio();
/* we definitely need to be at splbio here */
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "vnread", 0);
}
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
error = EIO;
if (!error) {
if (ap->a_count != pcount * PAGE_SIZE) {
bzero((caddr_t) kva + ap->a_count,
PAGE_SIZE * pcount - ap->a_count);
}
}
pmap_qremove(kva, pcount);
/*
* free the buffer header back to the swap buffer pool
*/
relpbuf(bp);
for (i = 0; i < pcount; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(ap->a_m[i]));
ap->a_m[i]->dirty = 0;
ap->a_m[i]->valid = VM_PAGE_BITS_ALL;
if (i != ap->a_reqpage) {
/*
* whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere. (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
/*
* just in case someone was asking for this page we
* now tell them that it is ok to use
*/
if (!error) {
vm_page_deactivate(ap->a_m[i]);
PAGE_WAKEUP(ap->a_m[i]);
} else {
vnode_pager_freepage(ap->a_m[i]);
}
}
}
if (error) {
printf("spec_getpages: I/O read error\n");
}
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)cd9660_vnops.c 8.3 (Berkeley) 1/23/94
* $Id: cd9660_vnops.c,v 1.15 1995/08/02 13:00:40 dfr Exp $
* $Id: cd9660_vnops.c,v 1.16 1995/09/04 00:20:05 dyson Exp $
*/
#include <sys/param.h>
@ -1007,6 +1007,7 @@ struct vnodeopv_entry_desc cd9660_specop_entries[] = {
{ &vop_vfree_desc, spec_vfree }, /* vfree */
{ &vop_truncate_desc, spec_truncate }, /* truncate */
{ &vop_update_desc, cd9660_update }, /* update */
{ &vop_getpages_desc, spec_getpages},
{ &vop_bwrite_desc, vn_bwrite },
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)spec_vnops.c 8.6 (Berkeley) 4/9/94
* $Id: spec_vnops.c,v 1.14 1995/09/04 00:20:37 dyson Exp $
* $Id: spec_vnops.c,v 1.15 1995/10/06 09:47:58 phk Exp $
*/
#include <sys/param.h>
@ -94,6 +94,7 @@ struct vnodeopv_entry_desc spec_vnodeop_entries[] = {
{ &vop_truncate_desc, spec_truncate }, /* truncate */
{ &vop_update_desc, spec_update }, /* update */
{ &vop_bwrite_desc, vn_bwrite }, /* bwrite */
{ &vop_getpages_desc, spec_getpages}, /* getpages */
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};
struct vnodeopv_desc spec_vnodeop_opv_desc =
@ -101,6 +102,10 @@ struct vnodeopv_desc spec_vnodeop_opv_desc =
VNODEOP_SET(spec_vnodeop_opv_desc);
#include <vm/vm.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
/*
* Trivial lookup routine that always fails.
*/
@ -706,3 +711,122 @@ spec_badop()
panic("spec_badop called");
/* NOTREACHED */
}
static void spec_getpages_iodone(struct buf *bp) {
bp->b_flags |= B_DONE;
wakeup(bp);
}
/*
* get page routine
*/
int
spec_getpages(ap)
struct vop_getpages_args *ap;
{
vm_offset_t kva;
int i, size;
daddr_t blkno;
struct buf *bp;
int s;
int error = 0;
int pcount;
pcount = round_page(ap->a_count) / PAGE_SIZE;
/*
* calculate the size of the transfer
*/
blkno = (ap->a_m[0]->offset + ap->a_offset) / DEV_BSIZE;
/*
* round up physical size for real devices
*/
size = (ap->a_count + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
/*
* and map the pages to be read into the kva
*/
pmap_qenter(kva, ap->a_m, pcount);
/* build a minimal buffer header */
bp->b_flags = B_BUSY | B_READ | B_CALL;
bp->b_iodone = spec_getpages_iodone;
/* B_PHYS is not set, but it is nice to fill this in */
bp->b_proc = curproc;
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
if (bp->b_rcred != NOCRED)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crhold(bp->b_wcred);
bp->b_blkno = blkno;
bp->b_lblkno = blkno;
pbgetvp(ap->a_vp, bp);
bp->b_bcount = size;
bp->b_bufsize = size;
cnt.v_vnodein++;
cnt.v_vnodepgsin += pcount;
/* do the input */
VOP_STRATEGY(bp);
if (bp->b_flags & B_ASYNC) {
return VM_PAGER_PEND;
}
s = splbio();
/* we definitely need to be at splbio here */
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "vnread", 0);
}
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
error = EIO;
if (!error) {
if (ap->a_count != pcount * PAGE_SIZE) {
bzero((caddr_t) kva + ap->a_count,
PAGE_SIZE * pcount - ap->a_count);
}
}
pmap_qremove(kva, pcount);
/*
* free the buffer header back to the swap buffer pool
*/
relpbuf(bp);
for (i = 0; i < pcount; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(ap->a_m[i]));
ap->a_m[i]->dirty = 0;
ap->a_m[i]->valid = VM_PAGE_BITS_ALL;
if (i != ap->a_reqpage) {
/*
* whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere. (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
/*
* just in case someone was asking for this page we
* now tell them that it is ok to use
*/
if (!error) {
vm_page_deactivate(ap->a_m[i]);
PAGE_WAKEUP(ap->a_m[i]);
} else {
vnode_pager_freepage(ap->a_m[i]);
}
}
}
if (error) {
printf("spec_getpages: I/O read error\n");
}
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)specdev.h 8.2 (Berkeley) 2/2/94
* $Id: specdev.h,v 1.2 1994/08/02 07:45:30 davidg Exp $
* $Id: specdev.h,v 1.3 1995/07/29 11:40:32 bde Exp $
*/
/*
@ -118,6 +118,7 @@ int spec_print __P((struct vop_print_args *));
#define spec_islocked ((int (*) __P((struct vop_islocked_args *)))nullop)
int spec_pathconf __P((struct vop_pathconf_args *));
int spec_advlock __P((struct vop_advlock_args *));
int spec_getpages __P((struct vop_getpages_args *));
#define spec_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))spec_badop)
#define spec_valloc ((int (*) __P((struct vop_valloc_args *)))spec_badop)
#define spec_reallocblks \

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ffs_vnops.c 8.7 (Berkeley) 2/3/94
* $Id: ffs_vnops.c,v 1.14 1995/09/22 06:02:40 davidg Exp $
* $Id: ffs_vnops.c,v 1.15 1995/09/25 06:00:59 dyson Exp $
*/
#include <sys/param.h>
@ -158,6 +158,7 @@ struct vnodeopv_entry_desc ffs_specop_entries[] = {
{ &vop_vfree_desc, ffs_vfree }, /* vfree */
{ &vop_truncate_desc, spec_truncate }, /* truncate */
{ &vop_update_desc, ffs_update }, /* update */
{ &vop_getpages_desc, spec_getpages},
{ &vop_bwrite_desc, vn_bwrite },
{ (struct vnodeop_desc*)NULL, (int(*)())NULL }
};

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.7 (Berkeley) 1/21/94
* $Id: ufs_readwrite.c,v 1.12 1995/09/07 04:39:09 dyson Exp $
* $Id: ufs_readwrite.c,v 1.13 1995/09/11 15:56:46 bde Exp $
*/
#ifdef LFS_READWRITE
@ -319,11 +319,6 @@ WRITE(ap)
#ifndef LFS_READWRITE
static void ffs_getpages_iodone(struct buf *bp) {
bp->b_flags |= B_DONE;
wakeup(bp);
}
/*
* get page routine
*/
@ -331,25 +326,28 @@ int
ffs_getpages(ap)
struct vop_getpages_args *ap;
{
vm_offset_t kva, foff;
vm_offset_t foff, physoffset;
int i, size, bsize;
struct vnode *dp;
struct buf *bp;
int s;
int error = 0;
int contigbackwards, contigforwards;
int pcontigbackwards, pcontigforwards;
int firstcontigpage;
daddr_t reqlblkno, reqblkno;
int bbackwards, bforwards;
int pbackwards, pforwards;
int firstpage;
int reqlblkno;
daddr_t reqblkno;
int poff;
int pcount;
int rtval;
int pagesperblock;
pcount = round_page(ap->a_count) / PAGE_SIZE;
/*
* if ANY DEV_BSIZE blocks are valid on a large filesystem block
* then, the entire page is valid --
*/
if (ap->a_m[ap->a_reqpage]->valid) {
ap->a_m[ap->a_reqpage]->valid = VM_PAGE_BITS_ALL;
for (i = 0; i < ap->a_count; i++) {
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage)
vnode_pager_freepage(ap->a_m[i]);
}
@ -357,13 +355,19 @@ ffs_getpages(ap)
}
bsize = ap->a_vp->v_mount->mnt_stat.f_iosize;
foff = ap->a_m[ap->a_reqpage]->offset;
reqlblkno = foff / bsize;
poff = (foff - reqlblkno * bsize) / PAGE_SIZE;
if ( VOP_BMAP( ap->a_vp, reqlblkno, &dp, &reqblkno, &contigforwards,
&contigbackwards) || (reqblkno == -1)) {
for(i = 0; i < ap->a_count; i++) {
/*
* foff is the file offset of the required page
* reqlblkno is the logical block that contains the page
* poff is the index of the page into the logical block
*/
foff = ap->a_m[ap->a_reqpage]->offset + ap->a_offset;
reqlblkno = foff / bsize;
poff = (foff % bsize) / PAGE_SIZE;
if ( VOP_BMAP( ap->a_vp, reqlblkno, &dp, &reqblkno,
&bforwards, &bbackwards) || (reqblkno == -1)) {
for(i = 0; i < pcount; i++) {
if (i != ap->a_reqpage)
vnode_pager_freepage(ap->a_m[i]);
}
@ -378,130 +382,54 @@ ffs_getpages(ap)
}
}
reqblkno += (poff * PAGE_SIZE) / DEV_BSIZE;
firstcontigpage = 0;
pcontigbackwards = 0;
if (ap->a_reqpage > 0) {
pcontigbackwards = poff + ((contigbackwards * bsize) / PAGE_SIZE);
if (pcontigbackwards < ap->a_reqpage) {
firstcontigpage = ap->a_reqpage - pcontigbackwards;
for(i = 0; i < firstcontigpage; i++)
physoffset = reqblkno * DEV_BSIZE + poff * PAGE_SIZE;
pagesperblock = bsize / PAGE_SIZE;
/*
* find the first page that is contiguous...
* note that pbackwards is the number of pages that are contiguous
* backwards.
*/
firstpage = 0;
if (ap->a_count) {
pbackwards = poff + bbackwards * pagesperblock;
if (ap->a_reqpage > pbackwards) {
firstpage = ap->a_reqpage - pbackwards;
for(i=0;i<firstpage;i++)
vnode_pager_freepage(ap->a_m[i]);
}
}
pcontigforwards = ((bsize / PAGE_SIZE) - (poff + 1)) +
(contigforwards * bsize) / PAGE_SIZE;
if (pcontigforwards < (ap->a_count - (ap->a_reqpage + 1))) {
for( i = ap->a_reqpage + pcontigforwards + 1; i < ap->a_count; i++)
vnode_pager_freepage(ap->a_m[i]);
ap->a_count = ap->a_reqpage + pcontigforwards + 1;
}
if (firstcontigpage != 0) {
for (i = firstcontigpage; i < ap->a_count; i++) {
ap->a_m[i - firstcontigpage] = ap->a_m[i];
/*
* pforwards is the number of pages that are contiguous
* after the current page.
*/
pforwards = (pagesperblock - (poff + 1)) +
bforwards * pagesperblock;
if (pforwards < (pcount - (ap->a_reqpage + 1))) {
for( i = ap->a_reqpage + pforwards + 1; i < pcount; i++)
vnode_pager_freepage(ap->a_m[i]);
pcount = ap->a_reqpage + pforwards + 1;
}
ap->a_count -= firstcontigpage;
ap->a_reqpage -= firstcontigpage;
/*
* number of pages for I/O corrected for the non-contig pages at
* the beginning of the array.
*/
pcount -= firstpage;
}
/*
* calculate the size of the transfer
*/
foff = ap->a_m[0]->offset;
reqblkno -= (ap->a_m[ap->a_reqpage]->offset - foff) / DEV_BSIZE;
size = ap->a_count * PAGE_SIZE;
if ((foff + size) >
size = pcount * PAGE_SIZE;
if ((ap->a_m[firstpage]->offset + size) >
((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size)
size = ((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size - foff;
size = ((vm_object_t) ap->a_vp->v_object)->un_pager.vnp.vnp_size - ap->a_m[firstpage]->offset;
/*
* round up physical size for real devices
*/
if (dp->v_type == VBLK || dp->v_type == VCHR)
size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
physoffset -= ap->a_m[ap->a_reqpage]->offset;
rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size,
(ap->a_reqpage - firstpage), physoffset);
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
/*
* and map the pages to be read into the kva
*/
pmap_qenter(kva, ap->a_m, ap->a_count);
/* build a minimal buffer header */
bp->b_flags = B_BUSY | B_READ | B_CALL;
bp->b_iodone = ffs_getpages_iodone;
/* B_PHYS is not set, but it is nice to fill this in */
bp->b_proc = curproc;
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
if (bp->b_rcred != NOCRED)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crhold(bp->b_wcred);
bp->b_blkno = reqblkno;
pbgetvp(dp, bp);
bp->b_bcount = size;
bp->b_bufsize = size;
cnt.v_vnodein++;
cnt.v_vnodepgsin += ap->a_count;
/* do the input */
VOP_STRATEGY(bp);
s = splbio();
/* we definitely need to be at splbio here */
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "vnread", 0);
}
splx(s);
if ((bp->b_flags & B_ERROR) != 0)
error = EIO;
if (!error) {
if (size != ap->a_count * PAGE_SIZE)
bzero((caddr_t) kva + size, PAGE_SIZE * ap->a_count - size);
}
pmap_qremove(kva, ap->a_count);
/*
* free the buffer header back to the swap buffer pool
*/
relpbuf(bp);
for (i = 0; i < ap->a_count; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(ap->a_m[i]));
ap->a_m[i]->dirty = 0;
ap->a_m[i]->valid = VM_PAGE_BITS_ALL;
if (i != ap->a_reqpage) {
/*
* whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
* somewhere. (it already is in the object). Result:
* It appears that emperical results show that
* deactivating pages is best.
*/
/*
* just in case someone was asking for this page we
* now tell them that it is ok to use
*/
if (!error) {
vm_page_deactivate(ap->a_m[i]);
PAGE_WAKEUP(ap->a_m[i]);
} else {
vnode_pager_freepage(ap->a_m[i]);
}
}
}
if (error) {
printf("ffs_getpages: I/O read error\n");
}
return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
return (rtval);
}
#endif

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.49 1995/09/12 14:42:43 dyson Exp $
* $Id: vnode_pager.c,v 1.50 1995/10/19 21:35:03 davidg Exp $
*/
/*
@ -188,8 +188,11 @@ vnode_pager_haspage(object, offset, before, after)
{
struct vnode *vp = object->handle;
daddr_t bn;
int err, run;
daddr_t reqblock;
int err, run, poff, bsize, pagesperblock;
int poff;
int bsize;
int pagesperblock;
/*
* If filesystem no longer mounted or offset beyond end of file we do
@ -364,6 +367,9 @@ vnode_pager_addr(vp, address, run)
if ((int) address < 0)
return -1;
if (vp->v_mount == NULL)
return -1;
bsize = vp->v_mount->mnt_stat.f_iosize;
vblock = address / bsize;
voffset = address % bsize;
@ -413,6 +419,9 @@ vnode_pager_input_smlfs(object, m)
int error = 0;
vp = object->handle;
if (vp->v_mount == NULL)
return VM_PAGER_BAD;
bsize = vp->v_mount->mnt_stat.f_iosize;
@ -554,9 +563,9 @@ vnode_pager_getpages(object, m, count, reqpage)
int rtval;
struct vnode *vp;
vp = object->handle;
rtval = VOP_GETPAGES(vp, m, count, reqpage);
rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0);
if (rtval == EOPNOTSUPP)
return vnode_pager_leaf_getpages(object, m, count, reqpage);
return vnode_pager_leaf_getpages(object, m, count, reqpage, 0);
else
return rtval;
}
@ -578,6 +587,9 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
int error = 0;
vp = object->handle;
if (vp->v_mount == NULL)
return VM_PAGER_BAD;
bsize = vp->v_mount->mnt_stat.f_iosize;
/* get the UNDERLYING device for the file with VOP_BMAP() */
@ -792,9 +804,9 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
int rtval;
struct vnode *vp;
vp = object->handle;
rtval = VOP_PUTPAGES(vp, m, count, sync, rtvals);
rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0);
if (rtval == EOPNOTSUPP)
return vnode_pager_leaf_putpages(object, m, count, sync, rtvals);
return vnode_pager_leaf_putpages(object, m, count, sync, rtvals, 0);
else
return rtval;
}