1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-16 10:20:30 +00:00

Fix a bug in the calculation of the maximum I/O request size.

The previous code did not limit the I/O request size based on
the maximum number of segments supported by the back-end.  In
current practice, since the only back-end supporting chained
requests is the FreeBSD implementation, this limit was never
exceeded.

sys/dev/xen/blkfront/block.h:
	Add two macros, XBF_SEGS_TO_SIZE() and XBF_SIZE_TO_SEGS(),
	to centralize the logic of reserving a segment to deal with
	non-page-aligned I/Os.

sys/dev/xen/blkfront/blkfront.c:
	o When negotiating transfer parameters, limit the
	  max_request_size we use and publish, if it is greater
	  than the maximum, unaligned, I/O we can support with
	  the number of segments advertised by the backend.
	o Don't unilaterally reduce the I/O size published to
	  the disk layer by a single page.  max_request_size
	  is already properly limited in the transfer parameter
	  negotiation code.
	o Fix typos in printf strings:
		"max_requests_segments" -> "max_request_segments"
		"specificed" -> "specified"

MFC after:	1 day
This commit is contained in:
Justin T. Gibbs 2012-02-16 21:58:47 +00:00
parent f7e784d50b
commit 443cc4d407
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=231839
2 changed files with 44 additions and 11 deletions

View File

@ -228,7 +228,7 @@ xlvbd_add(struct xb_softc *sc, blkif_sector_t sectors,
sc->xb_disk->d_sectorsize = sector_size;
sc->xb_disk->d_mediasize = sectors * sector_size;
sc->xb_disk->d_maxsize = sc->max_request_size - PAGE_SIZE;
sc->xb_disk->d_maxsize = sc->max_request_size;
sc->xb_disk->d_flags = 0;
disk_create(sc->xb_disk, DISK_VERSION_00);
@ -555,7 +555,7 @@ blkfront_initialize(struct xb_softc *sc)
max_ring_page_order = 0;
sc->ring_pages = 1;
sc->max_request_segments = BLKIF_MAX_SEGMENTS_PER_HEADER_BLOCK;
sc->max_request_size = (sc->max_request_segments - 1) * PAGE_SIZE;
sc->max_request_size = XBF_SEGS_TO_SIZE(sc->max_request_segments);
sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
/*
@ -621,8 +621,8 @@ blkfront_initialize(struct xb_softc *sc)
}
if (sc->max_request_segments > XBF_MAX_SEGMENTS_PER_REQUEST) {
device_printf(sc->xb_dev, "Back-end specificed "
"max_requests_segments of %u limited to "
device_printf(sc->xb_dev, "Back-end specified "
"max_request_segments of %u limited to "
"front-end limit of %u.\n",
sc->max_request_segments,
XBF_MAX_SEGMENTS_PER_REQUEST);
@ -630,12 +630,23 @@ blkfront_initialize(struct xb_softc *sc)
}
if (sc->max_request_size > XBF_MAX_REQUEST_SIZE) {
device_printf(sc->xb_dev, "Back-end specificed "
device_printf(sc->xb_dev, "Back-end specified "
"max_request_size of %u limited to front-end "
"limit of %u.\n", sc->max_request_size,
XBF_MAX_REQUEST_SIZE);
sc->max_request_size = XBF_MAX_REQUEST_SIZE;
}
if (sc->max_request_size > XBF_SEGS_TO_SIZE(sc->max_request_segments)) {
device_printf(sc->xb_dev, "Back-end specified "
"max_request_size of %u limited to front-end "
"limit of %u. (Too few segments.)\n",
sc->max_request_size,
XBF_SEGS_TO_SIZE(sc->max_request_segments));
sc->max_request_size =
XBF_SEGS_TO_SIZE(sc->max_request_segments);
}
sc->max_request_blocks = BLKIF_SEGS_TO_BLOCKS(sc->max_request_segments);
/* Allocate datastructures based on negotiated values. */

View File

@ -34,6 +34,32 @@
#define __XEN_DRIVERS_BLOCK_H__
#include <xen/blkif.h>
/**
* Given a number of blkif segments, compute the maximum I/O size supported.
*
* \note This calculation assumes that all but the first and last segments
* of the I/O are fully utilized.
*
* \note We reserve a segement from the maximum supported by the transport to
* guarantee we can handle an unaligned transfer without the need to
* use a bounce buffer.
*/
#define XBF_SEGS_TO_SIZE(segs) \
(((segs) - 1) * PAGE_SIZE)
/**
* Compute the maximum number of blkif segments requried to represent
* an I/O of the given size.
*
* \note This calculation assumes that all but the first and last segments
* of the I/O are fully utilized.
*
* \note We reserve a segement to guarantee we can handle an unaligned
* transfer without the need to use a bounce buffer.
*/
#define XBF_SIZE_TO_SEGS(size) \
((size / PAGE_SIZE) + 1)
/**
* The maximum number of outstanding requests blocks (request headers plus
* additional segment blocks) we will allow in a negotiated block-front/back
@ -44,13 +70,9 @@
/**
* The maximum mapped region size per request we will allow in a negotiated
* block-front/back communication channel.
*
* \note We reserve a segement from the maximum supported by the transport to
* guarantee we can handle an unaligned transfer without the need to
* use a bounce buffer..
*/
#define XBF_MAX_REQUEST_SIZE \
MIN(MAXPHYS, (BLKIF_MAX_SEGMENTS_PER_REQUEST - 1) * PAGE_SIZE)
MIN(MAXPHYS, XBF_SEGS_TO_SIZE(BLKIF_MAX_SEGMENTS_PER_REQUEST))
/**
* The maximum number of segments (within a request header and accompanying
@ -59,7 +81,7 @@
*/
#define XBF_MAX_SEGMENTS_PER_REQUEST \
(MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \
(XBF_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))
XBF_SIZE_TO_SEGS(XBF_MAX_REQUEST_SIZE)))
/**
* The maximum number of shared memory ring pages we will allow in a