1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-22 11:17:19 +00:00
freebsd/sys/dev/advansys/advansys.c
Kenneth D. Merry 9deea8574e Add a number of interrelated CAM feature enhancements and bug fixes.
NOTE:  These changes will require recompilation of any userland
applications, like cdrecord, xmcd, etc., that use the CAM passthrough
interface.  A make world is recommended.

camcontrol.[c8]:
 - We now support two new commands, "tags" and "negotiate".

	- The tags commands allows users to view the number of tagged
	  openings for a device as well as a number of other related
	  parameters, and it allows users to set tagged openings for
	  a device.

	- The negotiate command allows users to enable and disable
	  disconnection and tagged queueing, set sync rates, offsets
	  and bus width.  Note that not all of those features are
	  available for all controllers.  Only the adv, ahc, and ncr
	  drivers fully support all of the features at this point.
	  Some cards do not allow the setting of sync rates, offsets and
	  the like, and some of the drivers don't have any facilities to
	  do so.  Some drivers, like the adw driver, only support enabling
	  or disabling sync negotiation, but do not support setting sync
	  rates.

 - new description in the camcontrol man page of how to format a disk
 - cleanup of the camcontrol inquiry command
 - add support in the 'devlist' command for skipping unconfigured devices if
   -v was not specified on the command line.
 - make use of the new base_transfer_speed in the path inquiry CCB.
 - fix CCB bzero cases

cam_xpt.c, cam_sim.[ch], cam_ccb.h:

 - new flags on many CCB function codes to designate whether they're
   non-immediate, use a user-supplied CCB, and can only be passed from
   userland programs via the xpt device.  Use these flags in the transport
   layer and pass driver to categorize CCBs.

 - new flag in the transport layer device matching code for device nodes
   that indicates whether a device is unconfigured

 - bump the CAM version from 0x10 to 0x11

 - Change the CAM ioctls to use the version as their group code, so we can
   force users to recompile code even when the CCB size doesn't change.

 - add + fill in a new value in the path inquiry CCB, base_transfer_speed.
   Remove a corresponding field from the cam_sim structure, and add code to
   every SIM to set this field to the proper value.

 - Fix the set transfer settings code in the transport layer.

scsi_cd.c:

 - make some variables volatile instead of just casting them in various
   places
 - fix a race condition in the changer code
 - attach unless we get a "logical unit not supported" error.  This should
   fix all of the cases where people have devices that return weird errors
   when they don't have media in the drive.

scsi_da.c:

 - attach unless we get a "logical unit not supported" error

scsi_pass.c:

 - for immediate CCBs, just malloc a CCB to send the user request in.  This
   gets rid of the 'held' count problem in camcontrol tags.

scsi_pass.h:

 - change the CAM ioctls to use the CAM version as their group code.

adv driver:

 - Allow changing the sync rate and offset separately.

adw driver

 - Allow changing the sync rate and offset separately.

aha driver:

 - Don't return CAM_REQ_CMP for SET_TRAN_SETTINGS CCBs.

ahc driver:

 - Allow setting offset and sync rate separately

bt driver:

 - Don't return CAM_REQ_CMP for SET_TRAN_SETTINGS CCBs.

NCR driver:

 - Fix the ultra/ultra 2 negotiation bug
 - allow setting both the sync rate and offset separately

Other HBA drivers:
 - Put code in to set the base_transfer_speed field for
   XPT_GET_TRAN_SETTINGS CCBs.

Reviewed by:	gibbs, mjacob (isp), imp (aha)
1999-05-06 20:16:39 +00:00

1352 lines
37 KiB
C

/*
* Generic driver for the Advanced Systems Inc. SCSI controllers
* Product specific probe and attach routines can be found in:
*
* i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852
* i386/eisa/adv_eisa.c ABP742, ABP752
* pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U,
* ABP940UA, ABP950, ABP960, ABP960U, ABP960UA,
* ABP970, ABP970U
*
* Copyright (c) 1996-1998 Justin Gibbs.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification, immediately at the beginning of the file.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: advansys.c,v 1.9 1999/04/19 21:27:35 gibbs Exp $
*/
/*
* Ported from:
* advansys.c - Linux Host Driver for AdvanSys SCSI Adapters
*
* Copyright (c) 1995-1997 Advanced System Products, Inc.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistributions of source
* code retain the above copyright notice and this comment without
* modification.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/buf.h>
#include <sys/kernel.h>
#include <machine/bus_pio.h>
#include <machine/bus.h>
#include <machine/clock.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_xpt_periph.h>
#include <cam/cam_debug.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <dev/advansys/advansys.h>
u_long adv_unit;
static void adv_action(struct cam_sim *sim, union ccb *ccb);
static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
int nsegments, int error);
static void adv_poll(struct cam_sim *sim);
static void adv_run_doneq(struct adv_softc *adv);
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv);
static void adv_destroy_ccb_info(struct adv_softc *adv,
struct adv_ccb_info *cinfo);
static __inline struct adv_ccb_info *
adv_get_ccb_info(struct adv_softc *adv);
static __inline void adv_free_ccb_info(struct adv_softc *adv,
struct adv_ccb_info *cinfo);
struct adv_softc *advsoftcs[NADV]; /* XXX Config should handle this */
static __inline struct adv_ccb_info *
adv_get_ccb_info(struct adv_softc *adv)
{
struct adv_ccb_info *cinfo;
int opri;
opri = splcam();
if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
} else {
cinfo = adv_alloc_ccb_info(adv);
}
splx(opri);
return (cinfo);
}
static __inline void
adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
{
int opri;
opri = splcam();
cinfo->state = ACCB_FREE;
SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links);
splx(opri);
}
void
adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t* physaddr;
physaddr = (bus_addr_t*)arg;
*physaddr = segs->ds_addr;
}
char *
adv_name(struct adv_softc *adv)
{
static char name[10];
snprintf(name, sizeof(name), "adv%d", adv->unit);
return (name);
}
static void
adv_action(struct cam_sim *sim, union ccb *ccb)
{
struct adv_softc *adv;
CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n"));
adv = (struct adv_softc *)cam_sim_softc(sim);
switch (ccb->ccb_h.func_code) {
/* Common cases first */
case XPT_SCSI_IO: /* Execute the requested I/O operation */
{
struct ccb_hdr *ccb_h;
struct ccb_scsiio *csio;
struct adv_ccb_info *cinfo;
ccb_h = &ccb->ccb_h;
csio = &ccb->csio;
cinfo = adv_get_ccb_info(adv);
if (cinfo == NULL)
panic("XXX Handle CCB info error!!!");
ccb_h->ccb_cinfo_ptr = cinfo;
/* Only use S/G if there is a transfer */
if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
/*
* We've been given a pointer
* to a single buffer
*/
if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
int s;
int error;
s = splsoftvm();
error =
bus_dmamap_load(adv->buffer_dmat,
cinfo->dmamap,
csio->data_ptr,
csio->dxfer_len,
adv_execute_ccb,
csio, /*flags*/0);
if (error == EINPROGRESS) {
/*
* So as to maintain ordering,
* freeze the controller queue
* until our mapping is
* returned.
*/
xpt_freeze_simq(adv->sim,
/*count*/1);
cinfo->state |=
ACCB_RELEASE_SIMQ;
}
splx(s);
} else {
struct bus_dma_segment seg;
/* Pointer to physical buffer */
seg.ds_addr =
(bus_addr_t)csio->data_ptr;
seg.ds_len = csio->dxfer_len;
adv_execute_ccb(csio, &seg, 1, 0);
}
} else {
struct bus_dma_segment *segs;
if ((ccb_h->flags & CAM_DATA_PHYS) != 0)
panic("adv_setup_data - Physical "
"segment pointers unsupported");
if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0)
panic("adv_setup_data - Virtual "
"segment addresses unsupported");
/* Just use the segments provided */
segs = (struct bus_dma_segment *)csio->data_ptr;
adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0);
}
} else {
adv_execute_ccb(ccb, NULL, 0, 0);
}
break;
}
case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
case XPT_TARGET_IO: /* Execute target I/O request */
case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */
case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/
case XPT_EN_LUN: /* Enable LUN as a target */
case XPT_ABORT: /* Abort the specified CCB */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
case XPT_SET_TRAN_SETTINGS:
{
struct ccb_trans_settings *cts;
target_bit_vector targ_mask;
struct adv_transinfo *tconf;
u_int update_type;
int s;
cts = &ccb->cts;
targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
update_type = 0;
/*
* The user must specify which type of settings he wishes
* to change.
*/
if (((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0)
&& ((cts->flags & CCB_TRANS_USER_SETTINGS) == 0)) {
tconf = &adv->tinfo[cts->ccb_h.target_id].current;
update_type |= ADV_TRANS_GOAL;
} else if (((cts->flags & CCB_TRANS_USER_SETTINGS) != 0)
&& ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0)) {
tconf = &adv->tinfo[cts->ccb_h.target_id].user;
update_type |= ADV_TRANS_USER;
} else {
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
}
s = splcam();
if ((update_type & ADV_TRANS_GOAL) != 0) {
if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
adv->disc_enable |= targ_mask;
else
adv->disc_enable &= ~targ_mask;
adv_write_lram_8(adv, ADVV_DISC_ENABLE_B,
adv->disc_enable);
}
if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
adv->cmd_qng_enabled |= targ_mask;
else
adv->cmd_qng_enabled &= ~targ_mask;
}
}
if ((update_type & ADV_TRANS_USER) != 0) {
if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
if ((cts->flags & CCB_TRANS_DISC_ENB) != 0)
adv->user_disc_enable |= targ_mask;
else
adv->user_disc_enable &= ~targ_mask;
}
if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
if ((cts->flags & CCB_TRANS_TAG_ENB) != 0)
adv->user_cmd_qng_enabled |= targ_mask;
else
adv->user_cmd_qng_enabled &= ~targ_mask;
}
}
/*
* If the user specifies either the sync rate, or offset,
* but not both, the unspecified parameter defaults to its
* current value in transfer negotiations.
*/
if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0)
|| ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) {
/*
* If the user provided a sync rate but no offset,
* use the current offset.
*/
if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0)
cts->sync_offset = tconf->offset;
/*
* If the user provided an offset but no sync rate,
* use the current sync rate.
*/
if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0)
cts->sync_period = tconf->period;
adv_period_offset_to_sdtr(adv, &cts->sync_period,
&cts->sync_offset,
cts->ccb_h.target_id);
adv_set_syncrate(adv, /*struct cam_path */NULL,
cts->ccb_h.target_id, cts->sync_period,
cts->sync_offset, update_type);
}
splx(s);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_GET_TRAN_SETTINGS:
/* Get default/user set transfer settings for the target */
{
struct ccb_trans_settings *cts;
struct adv_transinfo *tconf;
target_bit_vector target_mask;
int s;
cts = &ccb->cts;
target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id);
cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
s = splcam();
if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) {
tconf = &adv->tinfo[cts->ccb_h.target_id].current;
if ((adv->disc_enable & target_mask) != 0)
cts->flags |= CCB_TRANS_DISC_ENB;
if ((adv->cmd_qng_enabled & target_mask) != 0)
cts->flags |= CCB_TRANS_TAG_ENB;
} else {
tconf = &adv->tinfo[cts->ccb_h.target_id].user;
if ((adv->user_disc_enable & target_mask) != 0)
cts->flags |= CCB_TRANS_DISC_ENB;
if ((adv->user_cmd_qng_enabled & target_mask) != 0)
cts->flags |= CCB_TRANS_TAG_ENB;
}
cts->sync_period = tconf->period;
cts->sync_offset = tconf->offset;
splx(s);
cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
cts->valid = CCB_TRANS_SYNC_RATE_VALID
| CCB_TRANS_SYNC_OFFSET_VALID
| CCB_TRANS_BUS_WIDTH_VALID
| CCB_TRANS_DISC_VALID
| CCB_TRANS_TQ_VALID;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_CALC_GEOMETRY:
{
struct ccb_calc_geometry *ccg;
u_int32_t size_mb;
u_int32_t secs_per_cylinder;
int extended;
ccg = &ccb->ccg;
size_mb = ccg->volume_size
/ ((1024L * 1024L) / ccg->block_size);
extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0;
if (size_mb > 1024 && extended) {
ccg->heads = 255;
ccg->secs_per_track = 63;
} else {
ccg->heads = 64;
ccg->secs_per_track = 32;
}
secs_per_cylinder = ccg->heads * ccg->secs_per_track;
ccg->cylinders = ccg->volume_size / secs_per_cylinder;
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_RESET_BUS: /* Reset the specified SCSI bus */
{
int s;
s = splcam();
adv_stop_execution(adv);
adv_reset_bus(adv);
adv_start_execution(adv);
splx(s);
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
case XPT_TERM_IO: /* Terminate the I/O process */
/* XXX Implement */
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
case XPT_PATH_INQ: /* Path routing inquiry */
{
struct ccb_pathinq *cpi = &ccb->cpi;
cpi->version_num = 1; /* XXX??? */
cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
cpi->target_sprt = 0;
cpi->hba_misc = 0;
cpi->hba_eng_cnt = 0;
cpi->max_target = 7;
cpi->max_lun = 7;
cpi->initiator_id = adv->scsi_id;
cpi->bus_id = cam_sim_bus(sim);
cpi->base_transfer_speed = 3300;
strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN);
strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
break;
}
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
xpt_done(ccb);
break;
}
}
/*
* Currently, the output of bus_dmammap_load suits our needs just
* fine, but should it change, we'd need to do something here.
*/
#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs)
static void
adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs,
int nsegments, int error)
{
struct ccb_scsiio *csio;
struct ccb_hdr *ccb_h;
struct cam_sim *sim;
struct adv_softc *adv;
struct adv_ccb_info *cinfo;
struct adv_scsi_q scsiq;
struct adv_sg_head sghead;
int s;
csio = (struct ccb_scsiio *)arg;
ccb_h = &csio->ccb_h;
sim = xpt_path_sim(ccb_h->path);
adv = (struct adv_softc *)cam_sim_softc(sim);
cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr;
if ((ccb_h->flags & CAM_CDB_POINTER) != 0) {
if ((ccb_h->flags & CAM_CDB_PHYS) == 0) {
/* XXX Need phystovirt!!!! */
/* How about pmap_kenter??? */
scsiq.cdbptr = csio->cdb_io.cdb_ptr;
} else {
scsiq.cdbptr = csio->cdb_io.cdb_ptr;
}
} else {
scsiq.cdbptr = csio->cdb_io.cdb_bytes;
}
/*
* Build up the request
*/
scsiq.q1.status = 0;
scsiq.q1.q_no = 0;
scsiq.q1.cntl = 0;
scsiq.q1.sg_queue_cnt = 0;
scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id);
scsiq.q1.target_lun = ccb_h->target_lun;
scsiq.q1.sense_len = csio->sense_len;
scsiq.q1.extra_bytes = 0;
scsiq.q2.ccb_ptr = (u_int32_t)csio;
scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id,
ccb_h->target_lun);
scsiq.q2.flag = 0;
scsiq.q2.cdb_len = csio->cdb_len;
if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0)
scsiq.q2.tag_code = csio->tag_action;
else
scsiq.q2.tag_code = 0;
scsiq.q2.vm_id = 0;
if (nsegments != 0) {
bus_dmasync_op_t op;
scsiq.q1.data_addr = dm_segs->ds_addr;
scsiq.q1.data_cnt = dm_segs->ds_len;
if (nsegments > 1) {
scsiq.q1.cntl |= QC_SG_HEAD;
sghead.entry_cnt
= sghead.entry_to_copy
= nsegments;
sghead.res = 0;
sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs);
scsiq.sg_head = &sghead;
} else {
scsiq.sg_head = NULL;
}
if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_PREREAD;
else
op = BUS_DMASYNC_PREWRITE;
bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
} else {
scsiq.q1.data_addr = 0;
scsiq.q1.data_cnt = 0;
scsiq.sg_head = NULL;
}
s = splcam();
/*
* Last time we need to check if this SCB needs to
* be aborted.
*/
if (ccb_h->status != CAM_REQ_INPROG) {
if (nsegments != 0) {
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
}
if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) {
ccb_h->status |= CAM_RELEASE_SIMQ;
}
adv_free_ccb_info(adv, cinfo);
xpt_done((union ccb *)csio);
splx(s);
return;
}
if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) {
/* Temporary resource shortage */
if (nsegments != 0) {
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
}
ccb_h->status = CAM_REQUEUE_REQ;
if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
ccb_h->status |= CAM_RELEASE_SIMQ;
/* Unfreeze when resources are available */
xpt_freeze_simq(adv->sim, /*count*/1);
adv_free_ccb_info(adv, cinfo);
xpt_done((union ccb *)csio);
splx(s);
return;
}
cinfo->state |= ACCB_ACTIVE;
ccb_h->status |= CAM_SIM_QUEUED;
LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le);
/* Schedule our timeout */
ccb_h->timeout_ch =
timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000);
splx(s);
}
static struct adv_ccb_info *
adv_alloc_ccb_info(struct adv_softc *adv)
{
int error;
struct adv_ccb_info *cinfo;
cinfo = malloc(sizeof(*cinfo), M_DEVBUF, M_NOWAIT);
if (cinfo == NULL) {
printf("%s: Can't malloc CCB info\n", adv_name(adv));
return (NULL);
}
cinfo->state = ACCB_FREE;
error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0,
&cinfo->dmamap);
if (error != 0) {
printf("%s: Unable to allocate CCB info "
"dmamap - error %d\n", adv_name(adv), error);
free(cinfo, M_DEVBUF);
cinfo = NULL;
}
return (cinfo);
}
static void
adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo)
{
bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap);
free(cinfo, M_DEVBUF);
}
void
adv_timeout(void *arg)
{
int s;
union ccb *ccb;
struct adv_softc *adv;
struct adv_ccb_info *cinfo;
ccb = (union ccb *)arg;
adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc;
cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
xpt_print_path(ccb->ccb_h.path);
printf("Timed out\n");
s = splcam();
/* Have we been taken care of already?? */
if (cinfo == NULL || cinfo->state == ACCB_FREE) {
splx(s);
return;
}
adv_stop_execution(adv);
if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) {
struct ccb_hdr *ccb_h;
/*
* In order to simplify the recovery process, we ask the XPT
* layer to halt the queue of new transactions and we traverse
* the list of pending CCBs and remove their timeouts. This
* means that the driver attempts to clear only one error
* condition at a time. In general, timeouts that occur
* close together are related anyway, so there is no benefit
* in attempting to handle errors in parrallel. Timeouts will
* be reinstated when the recovery process ends.
*/
if ((cinfo->state & ACCB_RELEASE_SIMQ) == 0) {
xpt_freeze_simq(adv->sim, /*count*/1);
cinfo->state |= ACCB_RELEASE_SIMQ;
}
/* This CCB is the CCB representing our recovery actions */
cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED;
ccb_h = LIST_FIRST(&adv->pending_ccbs);
while (ccb_h != NULL) {
untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch);
ccb_h = LIST_NEXT(ccb_h, sim_links.le);
}
/* XXX Should send a BDR */
/* Attempt an abort as our first tact */
xpt_print_path(ccb->ccb_h.path);
printf("Attempting abort\n");
adv_abort_ccb(adv, ccb->ccb_h.target_id,
ccb->ccb_h.target_lun, ccb,
CAM_CMD_TIMEOUT, /*queued_only*/FALSE);
ccb->ccb_h.timeout_ch =
timeout(adv_timeout, ccb, 2 * hz);
} else {
/* Our attempt to perform an abort failed, go for a reset */
xpt_print_path(ccb->ccb_h.path);
printf("Resetting bus\n");
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
adv_reset_bus(adv);
}
adv_start_execution(adv);
splx(s);
}
struct adv_softc *
adv_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh)
{
struct adv_softc *adv;
if (unit >= NADV) {
printf("adv: unit number (%d) too high\n", unit);
return NULL;
}
/*
* Allocate a storage area for us
*/
if (advsoftcs[unit]) {
printf("adv%d: memory already allocated\n", unit);
return NULL;
}
adv = malloc(sizeof(struct adv_softc), M_DEVBUF, M_NOWAIT);
if (!adv) {
printf("adv%d: cannot malloc!\n", unit);
return NULL;
}
bzero(adv, sizeof(struct adv_softc));
LIST_INIT(&adv->pending_ccbs);
SLIST_INIT(&adv->free_ccb_infos);
advsoftcs[unit] = adv;
adv->unit = unit;
adv->tag = tag;
adv->bsh = bsh;
return(adv);
}
void
adv_free(struct adv_softc *adv)
{
switch (adv->init_level) {
case 5:
{
struct adv_ccb_info *cinfo;
while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) {
SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links);
adv_destroy_ccb_info(adv, cinfo);
}
bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap);
}
case 4:
bus_dmamem_free(adv->sense_dmat, adv->sense_buffers,
adv->sense_dmamap);
case 3:
bus_dma_tag_destroy(adv->sense_dmat);
case 2:
bus_dma_tag_destroy(adv->buffer_dmat);
case 1:
bus_dma_tag_destroy(adv->parent_dmat);
case 0:
break;
}
free(adv, M_DEVBUF);
}
int
adv_init(struct adv_softc *adv)
{
struct adv_eeprom_config eeprom_config;
int checksum, i;
u_int16_t config_lsw;
u_int16_t config_msw;
adv_reset_chip_and_scsi_bus(adv);
adv_lib_init(adv);
/*
* Stop script execution.
*/
adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE);
adv_stop_execution(adv);
if (adv_is_chip_halted(adv) == 0) {
printf("adv%d: Unable to halt adapter. Initialization"
"failed\n", adv->unit);
return (1);
}
ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR);
if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) {
printf("adv%d: Unable to set program counter. Initialization"
"failed\n", adv->unit);
return (1);
}
config_msw = ADV_INW(adv, ADV_CONFIG_MSW);
config_lsw = ADV_INW(adv, ADV_CONFIG_LSW);
if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) {
config_msw &= (~(ADV_CFG_MSW_CLR_MASK));
/*
* XXX The Linux code flags this as an error,
* but what should we report to the user???
* It seems that clearing the config register
* makes this error recoverable.
*/
ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
}
/* Suck in the configuration from the EEProm */
checksum = adv_get_eeprom_config(adv, &eeprom_config);
eeprom_config.cfg_msw &= (~(ADV_CFG_MSW_CLR_MASK));
if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) {
/*
* XXX The Linux code sets a warning level for this
* condition, yet nothing of meaning is printed to
* the user. What does this mean???
*/
if (adv->chip_version == 3) {
if (eeprom_config.cfg_lsw != config_lsw) {
eeprom_config.cfg_lsw =
ADV_INW(adv, ADV_CONFIG_LSW);
}
if (eeprom_config.cfg_msw != config_msw) {
eeprom_config.cfg_msw =
ADV_INW(adv, ADV_CONFIG_MSW);
}
}
}
eeprom_config.cfg_lsw |= ADV_CFG_LSW_HOST_INT_ON;
if (adv_test_external_lram(adv) == 0) {
/*
* XXX What about non PCI cards with no
* external LRAM????
*/
if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) {
eeprom_config.max_total_qng =
ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG;
eeprom_config.max_tag_qng =
ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG;
} else {
eeprom_config.cfg_msw |= 0x0800;
config_msw |= 0x0800;
ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw);
eeprom_config.max_total_qng =
ADV_MAX_PCI_INRAM_TOTAL_QNG;
eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG;
}
adv->max_openings = eeprom_config.max_total_qng;
}
if (checksum == eeprom_config.chksum) {
/* Range/Sanity checking */
if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) {
eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG;
}
if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) {
eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG;
}
if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) {
eeprom_config.max_tag_qng = eeprom_config.max_total_qng;
}
if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) {
eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC;
}
adv->max_openings = eeprom_config.max_total_qng;
adv->user_disc_enable = eeprom_config.disc_enable;
adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng;
adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config);
adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID;
EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id);
adv->control = eeprom_config.cntl;
for (i = 0; i <= ADV_MAX_TID; i++)
adv_sdtr_to_period_offset(adv,
eeprom_config.sdtr_data[i],
&adv->tinfo[i].user.period,
&adv->tinfo[i].user.offset,
i);
} else {
u_int8_t sync_data;
printf("adv%d: Warning EEPROM Checksum mismatch. "
"Using default device parameters\n", adv->unit);
/* Set reasonable defaults since we can't read the EEPROM */
adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1;
adv->max_openings = ADV_DEF_MAX_TOTAL_QNG;
adv->disc_enable = TARGET_BIT_VECTOR_SET;
adv->user_disc_enable = TARGET_BIT_VECTOR_SET;
adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET;
adv->scsi_id = 7;
sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4);
for (i = 0; i <= ADV_MAX_TID; i++)
adv_sdtr_to_period_offset(adv, sync_data,
&adv->tinfo[i].user.period,
&adv->tinfo[i].user.offset,
i);
}
if (adv_set_eeprom_config(adv, &eeprom_config) != 0)
printf("%s: WARNING! Failure writing to EEPROM.\n",
adv_name(adv));
adv_set_chip_scsiid(adv, adv->scsi_id);
if (adv_init_lram_and_mcode(adv))
return (1);
adv->disc_enable = adv->user_disc_enable;
adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable);
for (i = 0; i <= ADV_MAX_TID; i++) {
/*
* Start off in async mode.
*/
adv_set_syncrate(adv, /*struct cam_path */NULL,
i, /*period*/0, /*offset*/0,
ADV_TRANS_CUR);
/*
* Enable the use of tagged commands on all targets.
* This allows the kernel driver to make up it's own mind
* as it sees fit to tag queue instead of having the
* firmware try and second guess the tag_code settins.
*/
adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i,
adv->max_openings);
}
adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET);
printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n",
adv->unit, (adv->type & ADV_ULTRA) ? "Ultra SCSI" : "SCSI",
adv->scsi_id, adv->max_openings);
return (0);
}
void
adv_intr(void *arg)
{
struct adv_softc *adv;
u_int16_t chipstat;
u_int16_t saved_ram_addr;
u_int8_t ctrl_reg;
u_int8_t saved_ctrl_reg;
u_int8_t host_flag;
adv = (struct adv_softc *)arg;
ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL);
saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET |
ADV_CC_SINGLE_STEP | ADV_CC_DIAG |
ADV_CC_TEST));
if ((chipstat = ADV_INW(adv, ADV_CHIP_STATUS)) & ADV_CSW_INT_PENDING) {
saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR);
host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B);
adv_write_lram_8(adv, ADVV_HOST_FLAG_B,
host_flag | ADV_HOST_FLAG_IN_ISR);
adv_ack_interrupt(adv);
if ((chipstat & ADV_CSW_HALTED)
&& (ctrl_reg & ADV_CC_SINGLE_STEP)) {
adv_isr_chip_halted(adv);
saved_ctrl_reg &= ~ADV_CC_HALT;
} else {
adv_run_doneq(adv);
}
ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr);
#ifdef DIAGNOSTIC
if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr)
panic("adv_intr: Unable to set LRAM addr");
#endif
adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag);
}
ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg);
}
void
adv_run_doneq(struct adv_softc *adv)
{
struct adv_q_done_info scsiq;
u_int doneq_head;
u_int done_qno;
doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF;
done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head)
+ ADV_SCSIQ_B_FWD);
while (done_qno != ADV_QLINK_END) {
union ccb* ccb;
u_int done_qaddr;
u_int sg_queue_cnt;
int aborted;
done_qaddr = ADV_QNO_TO_QADDR(done_qno);
/* Pull status from this request */
sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq,
adv->max_dma_count);
/* Mark it as free */
adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS,
scsiq.q_status & ~(QS_READY|QS_ABORTED));
/* Process request based on retrieved info */
if ((scsiq.cntl & QC_SG_HEAD) != 0) {
u_int i;
/*
* S/G based request. Free all of the queue
* structures that contained S/G information.
*/
for (i = 0; i < sg_queue_cnt; i++) {
done_qno = adv_read_lram_8(adv, done_qaddr
+ ADV_SCSIQ_B_FWD);
#ifdef DIAGNOSTIC
if (done_qno == ADV_QLINK_END) {
panic("adv_qdone: Corrupted SG "
"list encountered");
}
#endif
done_qaddr = ADV_QNO_TO_QADDR(done_qno);
/* Mark SG queue as free */
adv_write_lram_8(adv, done_qaddr
+ ADV_SCSIQ_B_STATUS, QS_FREE);
}
} else
sg_queue_cnt = 0;
#ifdef DIAGNOSTIC
if (adv->cur_active < (sg_queue_cnt + 1))
panic("adv_qdone: Attempting to free more "
"queues than are active");
#endif
adv->cur_active -= sg_queue_cnt + 1;
aborted = (scsiq.q_status & QS_ABORTED) != 0;
if ((scsiq.q_status != QS_DONE)
&& (scsiq.q_status & QS_ABORTED) == 0)
panic("adv_qdone: completed scsiq with unknown status");
scsiq.remain_bytes += scsiq.extra_bytes;
if ((scsiq.d3.done_stat == QD_WITH_ERROR) &&
(scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) {
if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) {
scsiq.d3.done_stat = QD_NO_ERROR;
scsiq.d3.host_stat = QHSTA_NO_ERROR;
}
}
ccb = (union ccb *)scsiq.d2.ccb_ptr;
ccb->csio.resid = scsiq.remain_bytes;
adv_done(adv, (union ccb *)scsiq.d2.ccb_ptr,
scsiq.d3.done_stat, scsiq.d3.host_stat,
scsiq.d3.scsi_stat, scsiq.q_no);
doneq_head = done_qno;
done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD);
}
adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head);
}
void
adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat,
u_int host_stat, u_int scsi_status, u_int q_no)
{
struct adv_ccb_info *cinfo;
cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr;
/*
* Null this out so that we catch driver bugs that cause a
* ccb to be completed twice.
*/
ccb->ccb_h.ccb_cinfo_ptr = NULL;
LIST_REMOVE(&ccb->ccb_h, sim_links.le);
untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch);
if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
bus_dmasync_op_t op;
if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
op = BUS_DMASYNC_POSTREAD;
else
op = BUS_DMASYNC_POSTWRITE;
bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op);
bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap);
}
switch (done_stat) {
case QD_NO_ERROR:
if (host_stat == QHSTA_NO_ERROR) {
ccb->ccb_h.status = CAM_REQ_CMP;
break;
}
xpt_print_path(ccb->ccb_h.path);
printf("adv_done - queue done without error, "
"but host status non-zero(%x)\n", host_stat);
/*FALLTHROUGH*/
case QD_WITH_ERROR:
switch (host_stat) {
case QHSTA_M_TARGET_STATUS_BUSY:
case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY:
/*
* Assume that if we were a tagged transaction
* the target reported queue full. Otherwise,
* report busy. The firmware really should just
* pass the original status back up to us even
* if it thinks the target was in error for
* returning this status as no other transactions
* from this initiator are in effect, but this
* ignores multi-initiator setups and there is
* evidence that the firmware gets its per-device
* transaction counts screwed up occassionally.
*/
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0
&& host_stat != QHSTA_M_TARGET_STATUS_BUSY)
scsi_status = SCSI_STATUS_QUEUE_FULL;
else
scsi_status = SCSI_STATUS_BUSY;
adv_abort_ccb(adv, ccb->ccb_h.target_id,
ccb->ccb_h.target_lun,
/*ccb*/NULL, CAM_REQUEUE_REQ,
/*queued_only*/TRUE);
/*FALLTHROUGH*/
case QHSTA_M_NO_AUTO_REQ_SENSE:
case QHSTA_NO_ERROR:
ccb->csio.scsi_status = scsi_status;
switch (scsi_status) {
case SCSI_STATUS_CHECK_COND:
case SCSI_STATUS_CMD_TERMINATED:
ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
/* Structure copy */
ccb->csio.sense_data =
adv->sense_buffers[q_no - 1];
/* FALLTHROUGH */
case SCSI_STATUS_BUSY:
case SCSI_STATUS_RESERV_CONFLICT:
case SCSI_STATUS_QUEUE_FULL:
case SCSI_STATUS_COND_MET:
case SCSI_STATUS_INTERMED:
case SCSI_STATUS_INTERMED_COND_MET:
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
break;
case SCSI_STATUS_OK:
ccb->ccb_h.status |= CAM_REQ_CMP;
break;
}
break;
case QHSTA_M_SEL_TIMEOUT:
ccb->ccb_h.status = CAM_SEL_TIMEOUT;
break;
case QHSTA_M_DATA_OVER_RUN:
ccb->ccb_h.status = CAM_DATA_RUN_ERR;
break;
case QHSTA_M_UNEXPECTED_BUS_FREE:
ccb->ccb_h.status = CAM_UNEXP_BUSFREE;
break;
case QHSTA_M_BAD_BUS_PHASE_SEQ:
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
break;
case QHSTA_M_BAD_CMPL_STATUS_IN:
/* No command complete after a status message */
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
break;
case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT:
case QHSTA_M_WTM_TIMEOUT:
case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET:
/* The SCSI bus hung in a phase */
ccb->ccb_h.status = CAM_SEQUENCE_FAIL;
adv_reset_bus(adv);
break;
case QHSTA_M_AUTO_REQ_SENSE_FAIL:
ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
break;
case QHSTA_D_QDONE_SG_LIST_CORRUPTED:
case QHSTA_D_ASC_DVC_ERROR_CODE_SET:
case QHSTA_D_HOST_ABORT_FAILED:
case QHSTA_D_EXE_SCSI_Q_FAILED:
case QHSTA_D_ASPI_NO_BUF_POOL:
case QHSTA_M_BAD_TAG_CODE:
case QHSTA_D_LRAM_CMP_ERROR:
case QHSTA_M_MICRO_CODE_ERROR_HALT:
default:
panic("%s: Unhandled Host status error %x",
adv_name(adv), host_stat);
/* NOTREACHED */
}
break;
case QD_ABORTED_BY_HOST:
/* Don't clobber any, more explicit, error codes we've set */
if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)
ccb->ccb_h.status = CAM_REQ_ABORTED;
break;
default:
xpt_print_path(ccb->ccb_h.path);
printf("adv_done - queue done with unknown status %x:%x\n",
done_stat, host_stat);
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
break;
}
if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0)
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
else if (adv->openings_needed > 0) {
int openings;
openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q;
if (openings >= adv->openings_needed) {
ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
adv->openings_needed = 0;
}
}
if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) {
/*
* We now traverse our list of pending CCBs and reinstate
* their timeouts.
*/
struct ccb_hdr *ccb_h;
ccb_h = LIST_FIRST(&adv->pending_ccbs);
while (ccb_h != NULL) {
ccb_h->timeout_ch =
timeout(adv_timeout, (caddr_t)ccb_h,
(ccb_h->timeout * hz) / 1000);
ccb_h = LIST_NEXT(ccb_h, sim_links.le);
}
printf("%s: No longer in timeout\n", adv_name(adv));
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP
&& (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
xpt_freeze_devq(ccb->ccb_h.path, /*count*/1);
ccb->ccb_h.status |= CAM_DEV_QFRZN;
}
adv_free_ccb_info(adv, cinfo);
ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
xpt_done(ccb);
}
/*
* Function to poll for command completion when
* interrupts are disabled (crash dumps)
*/
static void
adv_poll(struct cam_sim *sim)
{
adv_intr(cam_sim_softc(sim));
}
/*
* Attach all the sub-devices we can find
*/
int
adv_attach(adv)
struct adv_softc *adv;
{
struct ccb_setasync csa;
struct cam_devq *devq;
/*
* Create our DMA tags. These tags define the kinds of device
* accessable memory allocations and memory mappings we will
* need to perform during normal operation.
*
* Unless we need to further restrict the allocation, we rely
* on the restrictions of the parent dmat, hence the common
* use of MAXADDR and MAXSIZE.
*/
/* DMA tag for mapping buffers into device visible space. */
if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
/*lowaddr*/BUS_SPACE_MAXADDR,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
/*maxsize*/MAXBSIZE,
/*nsegments*/ADV_MAX_SG_LIST,
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
/*flags*/BUS_DMA_ALLOCNOW,
&adv->buffer_dmat) != 0) {
goto error_exit;
}
adv->init_level++;
/* DMA tag for our sense buffers */
if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0,
/*lowaddr*/BUS_SPACE_MAXADDR,
/*highaddr*/BUS_SPACE_MAXADDR,
/*filter*/NULL, /*filterarg*/NULL,
sizeof(struct scsi_sense_data)*adv->max_openings,
/*nsegments*/1,
/*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
/*flags*/0, &adv->sense_dmat) != 0) {
goto error_exit;
}
adv->init_level++;
/* Allocation for our sense buffers */
if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers,
BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) {
goto error_exit;
}
adv->init_level++;
/* And permanently map them */
bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap,
adv->sense_buffers,
sizeof(struct scsi_sense_data)*adv->max_openings,
adv_map, &adv->sense_physbase, /*flags*/0);
adv->init_level++;
/*
* Fire up the chip
*/
if (adv_start_chip(adv) != 1) {
printf("adv%d: Unable to start on board processor. Aborting.\n",
adv->unit);
return (0);
}
/*
* Create the device queue for our SIM.
*/
devq = cam_simq_alloc(adv->max_openings);
if (devq == NULL)
return (0);
/*
* Construct our SIM entry.
*/
adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
1, adv->max_openings, devq);
if (adv->sim == NULL)
return (0);
/*
* Register the bus.
*
* XXX Twin Channel EISA Cards???
*/
if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) {
cam_sim_free(adv->sim, /*free devq*/TRUE);
return (0);
}
if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim),
CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)
== CAM_REQ_CMP) {
xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5);
csa.ccb_h.func_code = XPT_SASYNC_CB;
csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE;
csa.callback = advasync;
csa.callback_arg = adv;
xpt_action((union ccb *)&csa);
}
return (1);
error_exit:
return (0);
}