1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-16 10:20:30 +00:00

Add to CTL support for logical block provisioning threshold notifications.

For ZVOL-backed LUNs this allows to inform initiators if storage's used or
available spaces get above/below the configured thresholds.

MFC after:	2 weeks
Sponsored by:	iXsystems, Inc.
This commit is contained in:
Alexander Motin 2014-11-06 00:48:36 +00:00
parent d6112c71c6
commit c3e7ba3e6d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=274154
11 changed files with 484 additions and 28 deletions

View File

@ -137,7 +137,7 @@ static struct scsi_da_rw_recovery_page rw_er_page_default = {
/*correction_span*/0,
/*head_offset_count*/0,
/*data_strobe_offset_cnt*/0,
/*byte8*/0,
/*byte8*/SMS_RWER_LBPERE,
/*write_retry_count*/0,
/*reserved2*/0,
/*recovery_time_limit*/{0, 0},
@ -297,22 +297,58 @@ static struct scsi_info_exceptions_page ie_page_changeable = {
/*report_count*/{0, 0, 0, 0}
};
static struct scsi_logical_block_provisioning_page lbp_page_default = {
#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4)
static struct ctl_logical_block_provisioning_page lbp_page_default = {{
/*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
/*subpage_code*/0x02,
/*page_length*/{0, sizeof(struct scsi_logical_block_provisioning_page) - 4},
/*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
/*flags*/0,
/*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
/*descr*/{}
/*descr*/{}},
{{/*flags*/0,
/*resource*/0x01,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0x02,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0xf1,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0xf2,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}}
}
};
static struct scsi_logical_block_provisioning_page lbp_page_changeable = {
static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{
/*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF,
/*subpage_code*/0x02,
/*page_length*/{0, sizeof(struct scsi_logical_block_provisioning_page) - 4},
/*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN},
/*flags*/0,
/*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
/*descr*/{}
/*descr*/{}},
{{/*flags*/0,
/*resource*/0,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}},
{/*flags*/0,
/*resource*/0,
/*reserved*/{0, 0},
/*count*/{0, 0, 0, 0}}
}
};
/*
@ -447,6 +483,7 @@ static void ctl_datamove_remote_read(union ctl_io *io);
static void ctl_datamove_remote(union ctl_io *io);
static int ctl_process_done(union ctl_io *io);
static void ctl_lun_thread(void *arg);
static void ctl_thresh_thread(void *arg);
static void ctl_work_thread(void *arg);
static void ctl_enqueue_incoming(union ctl_io *io);
static void ctl_enqueue_rtr(union ctl_io *io);
@ -1085,6 +1122,15 @@ ctl_init(void)
ctl_pool_free(other_pool);
return (error);
}
error = kproc_kthread_add(ctl_thresh_thread, softc,
&softc->ctl_proc, NULL, 0, 0, "ctl", "thresh");
if (error != 0) {
printf("error creating CTL threshold thread!\n");
ctl_pool_free(internal_pool);
ctl_pool_free(emergency_pool);
ctl_pool_free(other_pool);
return (error);
}
if (bootverbose)
printf("ctl: CAM Target Layer loaded\n");
@ -3991,6 +4037,52 @@ ctl_copy_io(union ctl_io *src, union ctl_io *dest)
dest->io_hdr.flags |= CTL_FLAG_INT_COPY;
}
static int
ctl_expand_number(const char *buf, uint64_t *num)
{
char *endptr;
uint64_t number;
unsigned shift;
number = strtoq(buf, &endptr, 0);
switch (tolower((unsigned char)*endptr)) {
case 'e':
shift = 60;
break;
case 'p':
shift = 50;
break;
case 't':
shift = 40;
break;
case 'g':
shift = 30;
break;
case 'm':
shift = 20;
break;
case 'k':
shift = 10;
break;
case 'b':
case '\0': /* No unit. */
*num = number;
return (0);
default:
/* Unrecognized unit. */
return (-1);
}
if ((number << shift) >> shift != number) {
/* Overflow */
return (-1);
}
*num = number << shift;
return (0);
}
/*
* This routine could be used in the future to load default and/or saved
* mode page parameters for a particuar lun.
@ -4001,6 +4093,7 @@ ctl_init_page_index(struct ctl_lun *lun)
int i;
struct ctl_page_index *page_index;
const char *value;
uint64_t ival;
memcpy(&lun->mode_pages.index, page_index_template,
sizeof(page_index_template));
@ -4245,22 +4338,77 @@ ctl_init_page_index(struct ctl_lun *lun)
page_index->page_data =
(uint8_t *)lun->mode_pages.ie_page;
break;
case 0x02:
memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
case 0x02: {
struct ctl_logical_block_provisioning_page *page;
memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
&lbp_page_default,
sizeof(lbp_page_default));
memcpy(&lun->mode_pages.lbp_page[
CTL_PAGE_CHANGEABLE], &lbp_page_changeable,
sizeof(lbp_page_changeable));
memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT],
&lbp_page_default,
sizeof(lbp_page_default));
memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
&lbp_page_default,
sizeof(lbp_page_default));
page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED];
value = ctl_get_opt(&lun->be_lun->options,
"avail-threshold");
if (value != NULL &&
ctl_expand_number(value, &ival) == 0) {
page->descr[0].flags |= SLBPPD_ENABLED |
SLBPPD_ARMING_DEC;
if (lun->be_lun->blocksize)
ival /= lun->be_lun->blocksize;
else
ival /= 512;
scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
page->descr[0].count);
}
value = ctl_get_opt(&lun->be_lun->options,
"used-threshold");
if (value != NULL &&
ctl_expand_number(value, &ival) == 0) {
page->descr[1].flags |= SLBPPD_ENABLED |
SLBPPD_ARMING_INC;
if (lun->be_lun->blocksize)
ival /= lun->be_lun->blocksize;
else
ival /= 512;
scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
page->descr[1].count);
}
value = ctl_get_opt(&lun->be_lun->options,
"pool-avail-threshold");
if (value != NULL &&
ctl_expand_number(value, &ival) == 0) {
page->descr[2].flags |= SLBPPD_ENABLED |
SLBPPD_ARMING_DEC;
if (lun->be_lun->blocksize)
ival /= lun->be_lun->blocksize;
else
ival /= 512;
scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
page->descr[2].count);
}
value = ctl_get_opt(&lun->be_lun->options,
"pool-used-threshold");
if (value != NULL &&
ctl_expand_number(value, &ival) == 0) {
page->descr[3].flags |= SLBPPD_ENABLED |
SLBPPD_ARMING_INC;
if (lun->be_lun->blocksize)
ival /= lun->be_lun->blocksize;
else
ival /= 512;
scsi_ulto4b(ival >> CTL_LBP_EXPONENT,
page->descr[3].count);
}
memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT],
&lun->mode_pages.lbp_page[CTL_PAGE_SAVED],
sizeof(lbp_page_default));
page_index->page_data =
(uint8_t *)lun->mode_pages.lbp_page;
}
}}
break;
}
case SMS_VENDOR_SPECIFIC_PAGE:{
@ -4319,13 +4467,13 @@ static int
ctl_init_log_page_index(struct ctl_lun *lun)
{
struct ctl_page_index *page_index;
int i, j, prev;
int i, j, k, prev;
memcpy(&lun->log_pages.index, log_page_index_template,
sizeof(log_page_index_template));
prev = -1;
for (i = 0, j = 0; i < CTL_NUM_LOG_PAGES; i++) {
for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) {
page_index = &lun->log_pages.index[i];
/*
@ -4338,18 +4486,26 @@ ctl_init_log_page_index(struct ctl_lun *lun)
&& (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY))
continue;
if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING &&
((lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) == 0 ||
lun->backend->lun_attr == NULL))
continue;
if (page_index->page_code != prev) {
lun->log_pages.pages_page[j] = page_index->page_code;
prev = page_index->page_code;
j++;
}
lun->log_pages.subpages_page[i*2] = page_index->page_code;
lun->log_pages.subpages_page[i*2+1] = page_index->subpage;
lun->log_pages.subpages_page[k*2] = page_index->page_code;
lun->log_pages.subpages_page[k*2+1] = page_index->subpage;
k++;
}
lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0];
lun->log_pages.index[0].page_len = j;
lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0];
lun->log_pages.index[1].page_len = i * 2;
lun->log_pages.index[1].page_len = k * 2;
lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0];
lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS;
return (CTL_RETVAL_COMPLETE);
}
@ -6937,6 +7093,75 @@ ctl_mode_sense(struct ctl_scsiio *ctsio)
return (CTL_RETVAL_COMPLETE);
}
int
ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc)
{
struct ctl_lun *lun;
struct scsi_log_param_header *phdr;
uint8_t *data;
uint64_t val;
lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
data = page_index->page_data;
if (lun->backend->lun_attr != NULL &&
(val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail"))
!= UINT64_MAX) {
phdr = (struct scsi_log_param_header *)data;
scsi_ulto2b(0x0001, phdr->param_code);
phdr->param_control = SLP_LBIN | SLP_LP;
phdr->param_len = 8;
data = (uint8_t *)(phdr + 1);
scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
data[4] = 0x01; /* per-LUN */
data += phdr->param_len;
}
if (lun->backend->lun_attr != NULL &&
(val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused"))
!= UINT64_MAX) {
phdr = (struct scsi_log_param_header *)data;
scsi_ulto2b(0x0002, phdr->param_code);
phdr->param_control = SLP_LBIN | SLP_LP;
phdr->param_len = 8;
data = (uint8_t *)(phdr + 1);
scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
data[4] = 0x02; /* per-pool */
data += phdr->param_len;
}
if (lun->backend->lun_attr != NULL &&
(val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail"))
!= UINT64_MAX) {
phdr = (struct scsi_log_param_header *)data;
scsi_ulto2b(0x00f1, phdr->param_code);
phdr->param_control = SLP_LBIN | SLP_LP;
phdr->param_len = 8;
data = (uint8_t *)(phdr + 1);
scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
data[4] = 0x02; /* per-pool */
data += phdr->param_len;
}
if (lun->backend->lun_attr != NULL &&
(val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused"))
!= UINT64_MAX) {
phdr = (struct scsi_log_param_header *)data;
scsi_ulto2b(0x00f2, phdr->param_code);
phdr->param_control = SLP_LBIN | SLP_LP;
phdr->param_len = 8;
data = (uint8_t *)(phdr + 1);
scsi_ulto4b(val >> CTL_LBP_EXPONENT, data);
data[4] = 0x02; /* per-pool */
data += phdr->param_len;
}
page_index->page_len = data - page_index->page_data;
return (0);
}
int
ctl_log_sense(struct ctl_scsiio *ctsio)
{
@ -10245,9 +10470,10 @@ ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len)
lbp_ptr->page_code = SVPD_LBP;
scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length);
if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) {
lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT;
lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 |
SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP;
lbp_ptr->prov_type = SVPD_LBP_RESOURCE;
lbp_ptr->prov_type = SVPD_LBP_THIN;
}
ctsio->scsi_status = SCSI_STATUS_OK;
@ -13993,6 +14219,88 @@ ctl_lun_thread(void *arg)
}
}
static void
ctl_thresh_thread(void *arg)
{
struct ctl_softc *softc = (struct ctl_softc *)arg;
struct ctl_lun *lun;
struct ctl_be_lun *be_lun;
struct scsi_da_rw_recovery_page *rwpage;
struct ctl_logical_block_provisioning_page *page;
const char *attr;
uint64_t thres, val;
int i, e;
CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n"));
for (;;) {
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
be_lun = lun->be_lun;
if ((lun->flags & CTL_LUN_DISABLED) ||
(lun->flags & CTL_LUN_OFFLINE) ||
(be_lun->flags & CTL_LUN_FLAG_UNMAP) == 0 ||
lun->backend->lun_attr == NULL)
continue;
rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT];
if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0)
continue;
e = 0;
page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT];
for (i = 0; i < CTL_NUM_LBP_THRESH; i++) {
if ((page->descr[i].flags & SLBPPD_ENABLED) == 0)
continue;
thres = scsi_4btoul(page->descr[i].count);
thres <<= CTL_LBP_EXPONENT;
switch (page->descr[i].resource) {
case 0x01:
attr = "blocksavail";
break;
case 0x02:
attr = "blocksused";
break;
case 0xf1:
attr = "poolblocksavail";
break;
case 0xf2:
attr = "poolblocksused";
break;
default:
continue;
}
mtx_unlock(&softc->ctl_lock); // XXX
val = lun->backend->lun_attr(
lun->be_lun->be_lun, attr);
mtx_lock(&softc->ctl_lock);
if (val == UINT64_MAX)
continue;
if ((page->descr[i].flags & SLBPPD_ARMING_MASK)
== SLBPPD_ARMING_INC)
e |= (val >= thres);
else
e |= (val <= thres);
}
mtx_lock(&lun->lun_lock);
if (e) {
if (lun->lasttpt == 0 ||
time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) {
lun->lasttpt = time_uptime;
for (i = 0; i < CTL_MAX_INITIATORS; i++)
lun->pending_ua[i] |=
CTL_UA_THIN_PROV_THRES;
}
} else {
lun->lasttpt = 0;
for (i = 0; i < CTL_MAX_INITIATORS; i++)
lun->pending_ua[i] &= ~CTL_UA_THIN_PROV_THRES;
}
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
pause("-", CTL_LBP_PERIOD * hz);
}
}
static void
ctl_enqueue_incoming(union ctl_io *io)
{

View File

@ -127,7 +127,8 @@ typedef enum {
CTL_UA_RES_RELEASE = 0x0800,
CTL_UA_REG_PREEMPT = 0x1000,
CTL_UA_ASYM_ACC_CHANGE = 0x2000,
CTL_UA_CAPACITY_CHANGED = 0x4000
CTL_UA_CAPACITY_CHANGED = 0x4000,
CTL_UA_THIN_PROV_THRES = 0x8000
} ctl_ua_type;
#ifdef _KERNEL
@ -178,6 +179,9 @@ int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio,
int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
uint8_t *page_ptr);
int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio,
struct ctl_page_index *page_index,
int pc);
int ctl_config_move_done(union ctl_io *io);
void ctl_datamove(union ctl_io *io);
void ctl_done(union ctl_io *io);

View File

@ -218,6 +218,7 @@ typedef void (*be_vfunc_t)(union ctl_io *io);
typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
typedef int (*be_luninfo_t)(void *be_lun, struct sbuf *sb);
typedef uint64_t (*be_lunattr_t)(void *be_lun, const char *attrname);
struct ctl_backend_driver {
char name[CTL_BE_NAME_LEN]; /* passed to CTL */
@ -229,6 +230,7 @@ struct ctl_backend_driver {
be_func_t config_write; /* passed to CTL */
be_ioctl_t ioctl; /* passed to CTL */
be_luninfo_t lun_info; /* passed to CTL */
be_lunattr_t lun_attr; /* passed to CTL */
#ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED
be_func_t config_move_done; /* passed to backend */
#endif

View File

@ -145,6 +145,8 @@ struct ctl_be_block_lun;
typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio);
typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun,
const char *attrname);
/*
* Backend LUN structure. There is a 1:1 mapping between a block device
@ -161,6 +163,7 @@ struct ctl_be_block_lun {
cbb_dispatch_t dispatch;
cbb_dispatch_t lun_flush;
cbb_dispatch_t unmap;
cbb_getattr_t getattr;
uma_zone_t lun_zone;
uint64_t size_blocks;
uint64_t size_bytes;
@ -240,6 +243,8 @@ static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio);
static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio);
static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun,
const char *attrname);
static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
union ctl_io *io);
static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
@ -272,6 +277,7 @@ static void ctl_be_block_lun_config_status(void *be_lun,
static int ctl_be_block_config_write(union ctl_io *io);
static int ctl_be_block_config_read(union ctl_io *io);
static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname);
int ctl_be_block_init(void);
static struct ctl_backend_driver ctl_be_block_driver =
@ -284,7 +290,8 @@ static struct ctl_backend_driver ctl_be_block_driver =
.config_read = ctl_be_block_config_read,
.config_write = ctl_be_block_config_write,
.ioctl = ctl_be_block_ioctl,
.lun_info = ctl_be_block_lun_info
.lun_info = ctl_be_block_lun_info,
.lun_attr = ctl_be_block_lun_attr
};
MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
@ -1012,6 +1019,24 @@ ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
}
}
static uint64_t
ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname)
{
struct ctl_be_block_devdata *dev_data = &be_lun->backend.dev;
struct diocgattr_arg arg;
int error;
if (dev_data->csw == NULL || dev_data->csw->d_ioctl == NULL)
return (UINT64_MAX);
strlcpy(arg.name, attrname, sizeof(arg.name));
arg.len = sizeof(arg.value.off);
error = dev_data->csw->d_ioctl(dev_data->cdev,
DIOCGATTR, (caddr_t)&arg, FREAD, curthread);
if (error != 0)
return (UINT64_MAX);
return (arg.value.off);
}
static void
ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
{
@ -1647,6 +1672,7 @@ ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
be_lun->dispatch = ctl_be_block_dispatch_dev;
be_lun->lun_flush = ctl_be_block_flush_dev;
be_lun->unmap = ctl_be_block_unmap_dev;
be_lun->getattr = ctl_be_block_getattr_dev;
error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
if (error) {
@ -1993,10 +2019,10 @@ ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
}
num_threads = tmp_num_threads;
}
unmap = 0;
unmap = (be_lun->dispatch == ctl_be_block_dispatch_zvol);
value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
if (value != NULL && strcmp(value, "on") == 0)
unmap = 1;
if (value != NULL)
unmap = (strcmp(value, "on") == 0);
be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
@ -2582,6 +2608,16 @@ ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
return (retval);
}
static uint64_t
ctl_be_block_lun_attr(void *be_lun, const char *attrname)
{
struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)be_lun;
if (lun->getattr == NULL)
return (UINT64_MAX);
return (lun->getattr(lun, attrname));
}
int
ctl_be_block_init(void)
{

View File

@ -463,6 +463,11 @@ ctl_build_ua(ctl_ua_type *ua_type, struct scsi_sense_data *sense,
asc = 0x2A;
ascq = 0x09;
break;
case CTL_UA_THIN_PROV_THRES:
/* 38h/07n THIN PROVISIONING SOFT THRESHOLD REACHED */
asc = 0x38;
ascq = 0x07;
break;
default:
panic("ctl_build_ua: Unknown UA %x", ua_to_build);
}

View File

@ -302,6 +302,17 @@ struct ctl_page_index {
#define CTL_PAGE_DEFAULT 0x02
#define CTL_PAGE_SAVED 0x03
#define CTL_NUM_LBP_PARAMS 4
#define CTL_NUM_LBP_THRESH 4
#define CTL_LBP_EXPONENT 11 /* 2048 sectors */
#define CTL_LBP_PERIOD 10 /* 10 seconds */
#define CTL_LBP_UA_PERIOD 300 /* 5 minutes */
struct ctl_logical_block_provisioning_page {
struct scsi_logical_block_provisioning_page main;
struct scsi_logical_block_provisioning_page_descr descr[CTL_NUM_LBP_THRESH];
};
static const struct ctl_page_index page_index_template[] = {
{SMS_RW_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_rw_recovery_page), NULL,
CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
@ -316,7 +327,7 @@ static const struct ctl_page_index page_index_template[] = {
{SMS_INFO_EXCEPTIONS_PAGE, 0, sizeof(struct scsi_info_exceptions_page), NULL,
CTL_PAGE_FLAG_NONE, NULL, NULL},
{SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 0x02,
sizeof(struct scsi_logical_block_provisioning_page), NULL,
sizeof(struct ctl_logical_block_provisioning_page), NULL,
CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL},
{SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, DBGCNF_SUBPAGE_CODE,
sizeof(struct copan_debugconf_subpage), NULL, CTL_PAGE_FLAG_NONE,
@ -333,7 +344,7 @@ struct ctl_mode_pages {
struct scsi_caching_page caching_page[4];
struct scsi_control_page control_page[4];
struct scsi_info_exceptions_page ie_page[4];
struct scsi_logical_block_provisioning_page lbp_page[4];
struct ctl_logical_block_provisioning_page lbp_page[4];
struct copan_debugconf_subpage debugconf_subpage[4];
struct ctl_page_index index[CTL_NUM_MODE_PAGES];
};
@ -343,6 +354,8 @@ static const struct ctl_page_index log_page_index_template[] = {
CTL_PAGE_FLAG_NONE, NULL, NULL},
{SLS_SUPPORTED_PAGES_PAGE, SLS_SUPPORTED_SUBPAGES_SUBPAGE, 0, NULL,
CTL_PAGE_FLAG_NONE, NULL, NULL},
{SLS_LOGICAL_BLOCK_PROVISIONING, 0, 0, NULL,
CTL_PAGE_FLAG_NONE, ctl_lbp_log_sense_handler, NULL},
};
#define CTL_NUM_LOG_PAGES sizeof(log_page_index_template)/ \
@ -351,6 +364,7 @@ static const struct ctl_page_index log_page_index_template[] = {
struct ctl_log_pages {
uint8_t pages_page[CTL_NUM_LOG_PAGES];
uint8_t subpages_page[CTL_NUM_LOG_PAGES * 2];
uint8_t lbp_page[12*CTL_NUM_LBP_PARAMS];
struct ctl_page_index index[CTL_NUM_LOG_PAGES];
};
@ -411,6 +425,7 @@ struct ctl_lun {
struct scsi_sense_data pending_sense[CTL_MAX_INITIATORS];
#endif
ctl_ua_type pending_ua[CTL_MAX_INITIATORS];
time_t lasttpt;
struct ctl_mode_pages mode_pages;
struct ctl_log_pages log_pages;
struct ctl_lun_io_stats stats;

View File

@ -559,6 +559,7 @@ struct scsi_log_sense
#define SLS_ERROR_VERIFY_PAGE 0x05
#define SLS_ERROR_NONMEDIUM_PAGE 0x06
#define SLS_ERROR_LASTN_PAGE 0x07
#define SLS_LOGICAL_BLOCK_PROVISIONING 0x0c
#define SLS_SELF_TEST_PAGE 0x10
#define SLS_IE_PAGE 0x2f
#define SLS_PAGE_CTRL_MASK 0xC0
@ -740,6 +741,11 @@ struct scsi_info_exceptions_page {
struct scsi_logical_block_provisioning_page_descr {
uint8_t flags;
#define SLBPPD_ENABLED 0x80
#define SLBPPD_TYPE_MASK 0x38
#define SLBPPD_ARMING_MASK 0x07
#define SLBPPD_ARMING_DEC 0x02
#define SLBPPD_ARMING_INC 0x01
uint8_t resource;
uint8_t reserved[2];
uint8_t count[4];

View File

@ -2459,10 +2459,38 @@ zvol_geom_start(struct bio *bp)
goto enqueue;
zvol_strategy(bp);
break;
case BIO_GETATTR:
case BIO_GETATTR: {
spa_t *spa = dmu_objset_spa(zv->zv_objset);
uint64_t refd, avail, usedobjs, availobjs, val;
if (g_handleattr_int(bp, "GEOM::candelete", 1))
return;
if (strcmp(bp->bio_attribute, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksavail",
avail / DEV_BSIZE))
return;
} else if (strcmp(bp->bio_attribute, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
if (g_handleattr_off_t(bp, "blocksused",
refd / DEV_BSIZE))
return;
} else if (strcmp(bp->bio_attribute, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksavail",
avail / DEV_BSIZE))
return;
} else if (strcmp(bp->bio_attribute, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
if (g_handleattr_off_t(bp, "poolblocksused",
refd / DEV_BSIZE))
return;
}
/* FALLTHROUGH */
}
default:
g_io_deliver(bp, EOPNOTSUPP);
break;
@ -2861,6 +2889,30 @@ zvol_d_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct threa
case DIOCGSTRIPEOFFSET:
*(off_t *)data = 0;
break;
case DIOCGATTR: {
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
if (strcmp(arg->name, "blocksavail") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "blocksused") == 0) {
dmu_objset_space(zv->zv_objset, &refd, &avail,
&usedobjs, &availobjs);
arg->value.off = refd / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksavail") == 0) {
avail = metaslab_class_get_space(spa_normal_class(spa));
avail -= metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = avail / DEV_BSIZE;
} else if (strcmp(arg->name, "poolblocksused") == 0) {
refd = metaslab_class_get_alloc(spa_normal_class(spa));
arg->value.off = refd / DEV_BSIZE;
} else
error = ENOIOCTL;
break;
}
default:
error = ENOIOCTL;
}

View File

@ -510,6 +510,16 @@ g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
if (error == 0 && *(char *)data == '\0')
error = ENOENT;
break;
case DIOCGATTR: {
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
if (arg->len > sizeof(arg->value)) {
error = EINVAL;
break;
}
error = g_io_getattr(arg->name, cp, &arg->len, &arg->value);
break;
}
default:
if (cp->provider->geom->ioctl != NULL) {
error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);

View File

@ -124,4 +124,14 @@ void disk_err(struct bio *bp, const char *what, int blkdone, int nl);
* occupant of that location.
*/
struct diocgattr_arg {
char name[64];
int len;
union {
char str[DISK_IDENT_SIZE];
off_t off;
} value;
};
#define DIOCGATTR _IOWR('d', 142, struct diocgattr_arg)
#endif /* _SYS_DISK_H_ */

View File

@ -34,7 +34,7 @@
.\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $
.\" $FreeBSD$
.\"
.Dd October 26, 2014
.Dd November 5, 2014
.Dt CTLADM 8
.Os
.Sh NAME
@ -1003,6 +1003,14 @@ Specifies nominal form factor of the device: 0 -- not reported, 1 -- 5.25",
2 -- 3.5", 3 -- 2.5", 4 -- 1.8", 5 -- less then 1.8".
.It Va unmap
Set to "on", enables UNMAP support for the LUN, if supported by the backend.
.It Va avail-threshold
.It Va used-threshold
.It Va pool-avail-threshold
.It Va pool-used-threshold
Set per-LUN/-pool thin provisioning soft thresholds for ZVOL-backed LUNs.
LUN will establish UNIT ATTENTION condition if its or pool available space
get below configured avail values, or its or pool used space get above
configured used values.
.It Va writecache
Set to "off", disables write caching for the LUN, if supported by the backend.
.El