1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-30 16:51:41 +00:00

Do the small amount of tweaking to support PAE for at least initiator mode.

I was unable to test this as the PAE kernel crashed with a "cannot copy
LDT" before coming up. When this gets a bit more testing, I'll fix the PAE
conf file to allow isp devices.

PR:		59728
This commit is contained in:
Matt Jacob 2004-09-07 08:04:09 +00:00
parent affa470653
commit 6de9bf776e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=134895

View File

@ -45,6 +45,12 @@ __FBSDID("$FreeBSD$");
#include <sys/rman.h>
#include <sys/malloc.h>
#ifdef ISP_TARGET_MODE
#ifdef PAE
#error "PAE and ISP_TARGET_MODE not supported yet"
#endif
#endif
#include <dev/isp/isp_freebsd.h>
static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int);
@ -1621,6 +1627,131 @@ tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
static void dma2(void *, bus_dma_segment_t *, int, int);
#ifdef PAE
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
mush_t *mp;
struct ispsoftc *isp;
struct ccb_scsiio *csio;
struct isp_pcisoftc *pcs;
bus_dmamap_t *dp;
bus_dma_segment_t *eseg;
ispreq64_t *rq;
int seglim, datalen;
u_int16_t nxti;
mp = (mush_t *) arg;
if (error) {
mp->error = error;
return;
}
if (nseg < 1) {
isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
mp->error = EFAULT;
return;
}
csio = mp->cmd_token;
isp = mp->isp;
rq = mp->rq;
pcs = (struct isp_pcisoftc *)mp->isp;
dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
nxti = *mp->nxtip;
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
} else {
bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
}
datalen = XS_XFRLEN(csio);
/*
* We're passed an initial partially filled in entry that
* has most fields filled in except for data transfer
* related values.
*
* Our job is to fill in the initial request queue entry and
* then to start allocating and filling in continuation entries
* until we've covered the entire transfer.
*/
if (IS_FC(isp)) {
seglim = ISP_RQDSEG_T3;
((ispreqt3_t *)rq)->req_totalcnt = datalen;
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
} else {
((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
}
} else {
if (csio->cdb_len > 12) {
seglim = 0;
} else {
seglim = ISP_RQDSEG_A64;
}
if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
rq->req_flags |= REQFLAG_DATA_IN;
} else {
rq->req_flags |= REQFLAG_DATA_OUT;
}
}
eseg = dm_segs + nseg;
while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
if (IS_FC(isp)) {
ispreqt3_t *rq3 = (ispreqt3_t *)rq;
rq3->req_dataseg[rq3->req_seg_count].ds_base =
dm_segs->ds_addr;
rq3->req_dataseg[rq3->req_seg_count].ds_count =
dm_segs->ds_len;
} else {
rq->req_dataseg[rq->req_seg_count].ds_base =
dm_segs->ds_addr;
rq->req_dataseg[rq->req_seg_count].ds_count =
dm_segs->ds_len;
}
datalen -= dm_segs->ds_len;
rq->req_seg_count++;
dm_segs++;
}
while (datalen > 0 && dm_segs != eseg) {
u_int16_t onxti;
ispcontreq64_t local, *crq = &local, *cqe;
cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
onxti = nxti;
nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
if (nxti == mp->optr) {
isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
mp->error = MUSHERR_NOQENTRIES;
return;
}
rq->req_header.rqs_entry_count++;
MEMZERO((void *)crq, sizeof (*crq));
crq->req_header.rqs_entry_count = 1;
crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
seglim = 0;
while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
crq->req_dataseg[seglim].ds_base =
dm_segs->ds_addr;
crq->req_dataseg[seglim].ds_count =
dm_segs->ds_len;
rq->req_seg_count++;
dm_segs++;
seglim++;
datalen -= dm_segs->ds_len;
}
isp_put_cont64_req(isp, crq, cqe);
MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
}
*mp->nxtip = nxti;
}
#else
static void
dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
{
@ -1744,6 +1875,7 @@ dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
}
*mp->nxtip = nxti;
}
#endif
static int
isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
@ -1821,7 +1953,7 @@ isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
} else {
/* Pointer to physical buffer */
struct bus_dma_segment seg;
seg.ds_addr = (bus_addr_t)csio->data_ptr;
seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
(*eptr)(mp, &seg, 1, 0);
}
@ -1867,6 +1999,10 @@ isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq,
case RQSTYPE_T2RQS:
isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
break;
case RQSTYPE_A64:
case RQSTYPE_T3RQS:
isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
break;
}
return (CMD_QUEUED);
}