1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-21 11:13:30 +00:00

Merge ^/head r287680 through r287877.

This commit is contained in:
Dimitry Andric 2015-09-16 22:35:59 +00:00
commit a1cb6af119
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang370-import/; revision=287878
197 changed files with 4923 additions and 3689 deletions

View File

@ -1258,7 +1258,7 @@ _elftoolchain_libs= lib/libelf lib/libdwarf
legacy:
.if ${BOOTSTRAPPING} < 800107 && ${BOOTSTRAPPING} != 0
@echo "ERROR: Source upgrades from versions prior to 8.0 not supported."; \
@echo "ERROR: Source upgrades from versions prior to 8.0 are not supported."; \
false
.endif
.for _tool in tools/build ${_elftoolchain_libs}
@ -1355,11 +1355,8 @@ ${_bt}-usr.bin/clang/clang-tblgen: ${_bt}-lib/clang/libllvmtablegen ${_bt}-lib/c
${_bt}-usr.bin/clang/tblgen: ${_bt}-lib/clang/libllvmtablegen ${_bt}-lib/clang/libllvmsupport
.endif
# ELF Tool Chain libraries are needed for ELF tools and dtrace tools.
# dtrace tools are required for older bootstrap env and cross-build
# pre libdwarf
.if ${BOOTSTRAPPING} < 1100006 || (${MACHINE} != ${TARGET} || \
${MACHINE_ARCH} != ${TARGET_ARCH})
# Rebuild ctfconvert and ctfmerge to avoid difficult-to-diagnose failures
# resulting from missing bug fixes or ELF Toolchain updates.
.if ${MK_CDDL} != "no"
_dtrace_tools= cddl/usr.bin/sgsmsg cddl/lib/libctf cddl/usr.bin/ctfconvert \
cddl/usr.bin/ctfmerge
@ -1367,7 +1364,6 @@ _dtrace_tools= cddl/usr.bin/sgsmsg cddl/lib/libctf cddl/usr.bin/ctfconvert \
${_bt}-cddl/usr.bin/ctfconvert: ${_bt}-cddl/lib/libctf
${_bt}-cddl/usr.bin/ctfmerge: ${_bt}-cddl/lib/libctf
.endif
.endif
# Default to building the GPL DTC, but build the BSDL one if users explicitly
# request it.

View File

@ -132,6 +132,8 @@ typedef struct msg_string {
static msg_string *msg_head;
static msg_string *msg_tail;
int aok;
/*
* message_append() is responsible for both inserting strings into
* the master Str_tbl as well as maintaining a list of the

View File

@ -1205,7 +1205,9 @@ snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp)
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "B=%llu",
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),

View File

@ -31,7 +31,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 30, 2015
.Dd September 14, 2015
.Dt ZFS 8
.Os
.Sh NAME
@ -2144,7 +2144,8 @@ Property name
.It value
Property value
.It source
Property source. Can either be local, default, temporary, inherited, or none
Property source. Can either be local, default, temporary, inherited, received,
or none
(\&-).
.El
.Pp
@ -2210,8 +2211,11 @@ The default value is all sources.
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
.Xc
.Pp
Clears the specified property, causing it to be inherited from an ancestor. If
no ancestor has the property set, then the default value is used. See the
Clears the specified property, causing it to be inherited from an ancestor,
restored to default if no ancestor has the property set, or with the
.Fl S
option reverted to the received value if one exists.
See the
.Qq Sx Properties
section for a listing of default values, and details on which properties can be
inherited.
@ -2219,8 +2223,10 @@ inherited.
.It Fl r
Recursively inherit the given property for all children.
.It Fl S
For properties with a received value, revert to this value. This flag has no
effect on properties that do not have a received value.
Revert the property to the received value if one exists; otherwise operate as
if the
.Fl S
option was not specified.
.El
.It Xo
.Nm

View File

@ -1928,9 +1928,13 @@ zfs_do_inherit(int argc, char **argv)
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION)
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||

View File

@ -110,7 +110,6 @@ typedef struct ipv4info {
* These values are NULL if the packet is not IPv6.
*/
typedef struct in6_addr in6_addr_t;
typedef struct ip6_hdr ip6_t;
typedef struct ipv6info {
uint8_t ipv6_ver; /* IP version (6) */
uint8_t ipv6_tclass; /* traffic class */
@ -123,7 +122,7 @@ typedef struct ipv6info {
in6_addr_t *ipv6_dst; /* destination address */
string ipv6_saddr; /* source address, string */
string ipv6_daddr; /* destination address, string */
ip6_t *ipv6_hdr; /* pointer to raw header */
struct ip6_hdr *ipv6_hdr; /* pointer to raw header */
} ipv6info_t;
#pragma D binding "1.5" IPPROTO_IP
@ -282,5 +281,5 @@ translator ipv6info_t < struct ip6_hdr *p > {
ipv6_dst = p == NULL ? 0 : (in6_addr_t *)&p->ip6_dst;
ipv6_saddr = p == NULL ? 0 : inet_ntoa6(&p->ip6_src);
ipv6_daddr = p == NULL ? 0 : inet_ntoa6(&p->ip6_dst);
ipv6_hdr = (ip6_t *)p;
ipv6_hdr = p;
};

View File

@ -103,11 +103,15 @@ typedef struct tcpsinfo {
int32_t tcps_state; /* TCP state */
uint32_t tcps_iss; /* Initial sequence # sent */
uint32_t tcps_suna; /* sequence # sent but unacked */
uint32_t tcps_smax; /* highest sequence number sent */
uint32_t tcps_snxt; /* next sequence # to send */
uint32_t tcps_rack; /* sequence # we have acked */
uint32_t tcps_rnxt; /* next sequence # expected */
uint32_t tcps_swnd; /* send window size */
int32_t tcps_snd_ws; /* send window scaling */
uint32_t tcps_swl1; /* window update seg seq number */
uint32_t tcps_swl2; /* window update seg ack number */
uint32_t tcps_rup; /* receive urgent pointer */
uint32_t tcps_rwnd; /* receive window size */
int32_t tcps_rcv_ws; /* receive window scaling */
uint32_t tcps_cwnd; /* congestion window */
@ -117,7 +121,8 @@ typedef struct tcpsinfo {
uint32_t tcps_rto; /* round-trip timeout, msec */
uint32_t tcps_mss; /* max segment size */
int tcps_retransmit; /* retransmit send event, boolean */
int tcps_srtt; /* smoothed RTT in units of (TCP_RTT_SCALE*hz) */
int tcps_srtt; /* smoothed RTT in units of (TCP_RTT_SCALE*hz) */
int tcps_debug; /* socket has SO_DEBUG set */
} tcpsinfo_t;
/*
@ -188,12 +193,16 @@ translator tcpsinfo_t < struct tcpcb *p > {
tcps_state = p == NULL ? -1 : p->t_state;
tcps_iss = p == NULL ? 0 : p->iss;
tcps_suna = p == NULL ? 0 : p->snd_una;
tcps_smax = p == NULL ? 0 : p->snd_max;
tcps_snxt = p == NULL ? 0 : p->snd_nxt;
tcps_rack = p == NULL ? 0 : p->last_ack_sent;
tcps_rnxt = p == NULL ? 0 : p->rcv_nxt;
tcps_swnd = p == NULL ? -1 : p->snd_wnd;
tcps_snd_ws = p == NULL ? -1 : p->snd_scale;
tcps_swl1 = p == NULL ? -1 : p->snd_wl1;
tcps_swl2 = p == NULL ? -1 : p->snd_wl2;
tcps_rwnd = p == NULL ? -1 : p->rcv_wnd;
tcps_rup = p == NULL ? -1 : p->rcv_up;
tcps_rcv_ws = p == NULL ? -1 : p->rcv_scale;
tcps_cwnd = p == NULL ? -1 : p->snd_cwnd;
tcps_cwnd_ssthresh = p == NULL ? -1 : p->snd_ssthresh;
@ -203,6 +212,8 @@ translator tcpsinfo_t < struct tcpcb *p > {
tcps_mss = p == NULL ? -1 : p->t_maxseg;
tcps_retransmit = p == NULL ? -1 : p->t_rxtshift > 0 ? 1 : 0;
tcps_srtt = p == NULL ? -1 : p->t_srtt; /* smoothed RTT in units of (TCP_RTT_SCALE*hz) */
tcps_debug = p == NULL ? 0 :
p->t_inpcb->inp_socket->so_options & 1;
};
#pragma D binding "1.6.3" translator
@ -242,3 +253,123 @@ translator tcpinfoh_t < struct tcphdr *p > {
translator tcplsinfo_t < int s > {
tcps_state = s;
};
/* Support for TCP debug */
#pragma D binding "1.12.1" TA_INPUT
inline int TA_INPUT = 0;
#pragma D binding "1.12.1" TA_OUTPUT
inline int TA_OUTPUT = 1;
#pragma D binding "1.12.1" TA_USER
inline int TA_USER = 2;
#pragma D binding "1.12.1" TA_RESPOND
inline int TA_RESPOND = 3;
#pragma D binding "1.12.1" TA_DROP
inline int TA_DROP = 4;
/* direction strings. */
#pragma D binding "1.12.1" tcpdebug_dir_string
inline string tcpdebug_dir_string[uint8_t direction] =
direction == TA_INPUT ? "input" :
direction == TA_OUTPUT ? "output" :
direction == TA_USER ? "user" :
direction == TA_RESPOND ? "respond" :
direction == TA_OUTPUT ? "drop" :
"unknown" ;
#pragma D binding "1.12.1" tcpflag_string
inline string tcpflag_string[uint8_t flags] =
flags & TH_FIN ? "FIN" :
flags & TH_SYN ? "SYN" :
flags & TH_RST ? "RST" :
flags & TH_PUSH ? "PUSH" :
flags & TH_ACK ? "ACK" :
flags & TH_URG ? "URG" :
flags & TH_ECE ? "ECE" :
flags & TH_CWR ? "CWR" :
"unknown" ;
#pragma D binding "1.12.1" PRU_ATTACH
inline int PRU_ATTACH = 0;
#pragma D binding "1.12.1" PRU_DETACH
inline int PRU_DETACH = 1;
#pragma D binding "1.12.1" PRU_BIND
inline int PRU_BIND = 2;
#pragma D binding "1.12.1" PRU_LISTEN
inline int PRU_LISTEN = 3;
#pragma D binding "1.12.1" PRU_CONNECT
inline int PRU_CONNECT = 4;
#pragma D binding "1.12.1" PRU_ACCEPT
inline int PRU_ACCEPT = 5 ;
#pragma D binding "1.12.1" PRU_DISCONNECT
inline int PRU_DISCONNECT= 6;
#pragma D binding "1.12.1" PRU_SHUTDOWN
inline int PRU_SHUTDOWN = 7;
#pragma D binding "1.12.1" PRU_RCVD
inline int PRU_RCVD = 8;
#pragma D binding "1.12.1" PRU_SEND
inline int PRU_SEND = 9;
#pragma D binding "1.12.1" PRU_ABORT
inline int PRU_ABORT = 10;
#pragma D binding "1.12.1" PRU_CONTROL
inline int PRU_CONTROL = 11;
#pragma D binding "1.12.1" PRU_SENSE
inline int PRU_SENSE = 12;
#pragma D binding "1.12.1" PRU_RCVOOB
inline int PRU_RCVOOB = 13;
#pragma D binding "1.12.1" PRU_SENDOOB
inline int PRU_SENDOOB = 14;
#pragma D binding "1.12.1" PRU_SOCKADDR
inline int PRU_SOCKADDR = 15;
#pragma D binding "1.12.1" PRU_PEERADDR
inline int PRU_PEERADDR = 16;
#pragma D binding "1.12.1" PRU_CONNECT2
inline int PRU_CONNECT2 = 17;
#pragma D binding "1.12.1" PRU_FASTTIMO
inline int PRU_FASTTIMO = 18;
#pragma D binding "1.12.1" PRU_SLOWTIMO
inline int PRU_SLOWTIMO = 19;
#pragma D binding "1.12.1" PRU_PROTORCV
inline int PRU_PROTORCV = 20;
#pragma D binding "1.12.1" PRU_PROTOSEND
inline int PRU_PROTOSEND = 21;
#pragma D binding "1.12.1" PRU_SEND_EOF
inline int PRU_SEND_EOF = 22;
#pragma D binding "1.12.1" PRU_SOSETLABEL
inline int PRU_SOSETLABEL = 23;
#pragma D binding "1.12.1" PRU_CLOSE
inline int PRU_CLOSE = 24;
#pragma D binding "1.12.1" PRU_FLUSH
inline int PRU_FLUSH = 25;
#pragma D binding "1.12.1" prureq_string
inline string prureq_string[uint8_t req] =
req == PRU_ATTACH ? "ATTACH" :
req == PRU_DETACH ? "DETACH" :
req == PRU_BIND ? "BIND" :
req == PRU_LISTEN ? "LISTEN" :
req == PRU_CONNECT ? "CONNECT" :
req == PRU_ACCEPT ? "ACCEPT" :
req == PRU_DISCONNECT ? "DISCONNECT" :
req == PRU_SHUTDOWN ? "SHUTDOWN" :
req == PRU_RCVD ? "RCVD" :
req == PRU_SEND ? "SEND" :
req == PRU_ABORT ? "ABORT" :
req == PRU_CONTROL ? "CONTROL" :
req == PRU_SENSE ? "SENSE" :
req == PRU_RCVOOB ? "RCVOOB" :
req == PRU_SENDOOB ? "SENDOOB" :
req == PRU_SOCKADDR ? "SOCKADDR" :
req == PRU_PEERADDR ? "PEERADDR" :
req == PRU_CONNECT2 ? "CONNECT2" :
req == PRU_FASTTIMO ? "FASTTIMO" :
req == PRU_SLOWTIMO ? "SLOWTIMO" :
req == PRU_PROTORCV ? "PROTORCV" :
req == PRU_PROTOSEND ? "PROTOSEND" :
req == PRU_SEND ? "SEND_EOF" :
req == PRU_SOSETLABEL ? "SOSETLABEL" :
req == PRU_CLOSE ? "CLOSE" :
req == PRU_FLUSH ? "FLUSE" :
"unknown" ;

View File

@ -255,6 +255,7 @@ copies:
${INSTALL} -C -o ${BINOWN} -g ${BINGRP} -m 444 teken.h \
${DESTDIR}${INCLUDEDIR}/teken
.if ${MK_META_MODE} == "yes"
cd ${.OBJDIR}
touch ${.TARGET}
.endif
@ -372,6 +373,7 @@ symlinks:
${DESTDIR}${INCLUDEDIR}/rpc; \
done
.if ${MK_META_MODE} == "yes"
cd ${.OBJDIR}
touch ${.TARGET}
.endif

View File

@ -64,7 +64,7 @@ __rec_open(const char *fname, int flags, int mode, const RECNOINFO *openinfo,
int rfd, sverrno;
/* Open the user's file -- if this fails, we're done. */
if (fname != NULL && (rfd = _open(fname, flags, mode)) < 0)
if (fname != NULL && (rfd = _open(fname, flags | O_CLOEXEC, mode)) < 0)
return (NULL);
/* Create a btree in memory (backed by disk). */

View File

@ -635,9 +635,7 @@ __fts_set_clientptr_44bsd(FTS *sp, void *clientptr)
* been found, cutting the stat calls by about 2/3.
*/
static FTSENT *
fts_build(sp, type)
FTS *sp;
int type;
fts_build(FTS *sp, int type)
{
struct dirent *dp;
FTSENT *p, *head;
@ -901,10 +899,7 @@ mem1: saved_errno = errno;
}
static u_short
fts_stat(sp, p, follow)
FTS *sp;
FTSENT *p;
int follow;
fts_stat(FTS *sp, FTSENT *p, int follow)
{
FTSENT *t;
dev_t dev;
@ -999,10 +994,7 @@ fts_compar(const void *a, const void *b)
}
static FTSENT *
fts_sort(sp, head, nitems)
FTS *sp;
FTSENT *head;
int nitems;
fts_sort(FTS *sp, FTSENT *head, int nitems)
{
FTSENT **ap, *p;
@ -1031,10 +1023,7 @@ fts_sort(sp, head, nitems)
}
static FTSENT *
fts_alloc(sp, name, namelen)
FTS *sp;
char *name;
int namelen;
fts_alloc(FTS *sp, char *name, int namelen)
{
FTSENT *p;
size_t len;
@ -1081,8 +1070,7 @@ fts_alloc(sp, name, namelen)
}
static void
fts_lfree(head)
FTSENT *head;
fts_lfree(FTSENT *head)
{
FTSENT *p;
@ -1100,9 +1088,7 @@ fts_lfree(head)
* plus 256 bytes so don't realloc the path 2 bytes at a time.
*/
static int
fts_palloc(sp, more)
FTS *sp;
size_t more;
fts_palloc(FTS *sp, size_t more)
{
sp->fts_pathlen += more + 256;
@ -1127,9 +1113,7 @@ fts_palloc(sp, more)
* already returned.
*/
static void
fts_padjust(sp, head)
FTS *sp;
FTSENT *head;
fts_padjust(FTS *sp, FTSENT *head)
{
FTSENT *p;
char *addr = sp->fts_path;
@ -1153,8 +1137,7 @@ fts_padjust(sp, head)
}
static size_t
fts_maxarglen(argv)
char * const *argv;
fts_maxarglen(char * const *argv)
{
size_t len, max;
@ -1170,11 +1153,7 @@ fts_maxarglen(argv)
* Assumes p->fts_dev and p->fts_ino are filled in.
*/
static int
fts_safe_changedir(sp, p, fd, path)
FTS *sp;
FTSENT *p;
int fd;
char *path;
fts_safe_changedir(FTS *sp, FTSENT *p, int fd, char *path)
{
int ret, oerrno, newfd;
struct stat sb;

View File

@ -1238,7 +1238,7 @@ compat_setgrent(void *retval, void *mdata, va_list ap)
int rv, stayopen;
#define set_setent(x, y) do { \
int i; \
unsigned int i; \
\
for (i = 0; i < (sizeof(x)/sizeof(x[0])) - 1; i++) \
x[i].mdata = (void *)y; \
@ -1308,7 +1308,7 @@ compat_group(void *retval, void *mdata, va_list ap)
int rv, stayopen, *errnop;
#define set_lookup_type(x, y) do { \
int i; \
unsigned int i; \
\
for (i = 0; i < (sizeof(x)/sizeof(x[0])) - 1; i++) \
x[i].mdata = (void *)y; \

View File

@ -48,9 +48,7 @@ __FBSDID("$FreeBSD$");
* Return number of samples retrieved, or -1 on error.
*/
int
getloadavg(loadavg, nelem)
double loadavg[];
int nelem;
getloadavg(double loadavg[], int nelem)
{
struct loadavg loadinfo;
int i, mib[2];

View File

@ -42,9 +42,7 @@ __FBSDID("$FreeBSD$");
* Return information about mounted filesystems.
*/
int
getmntinfo(mntbufp, flags)
struct statfs **mntbufp;
int flags;
getmntinfo(struct statfs **mntbufp, int flags)
{
static struct statfs *mntbuf;
static int mntsize;

View File

@ -1607,7 +1607,8 @@ compat_redispatch(struct compat_state *st, enum nss_lookup_type how,
{ NULL, NULL, NULL }
};
void *discard;
int rv, e, i;
int rv, e;
unsigned int i;
for (i = 0; i < sizeof(dtab)/sizeof(dtab[0]) - 1; i++)
dtab[i].mdata = (void *)lookup_how;
@ -1702,7 +1703,7 @@ compat_setpwent(void *retval, void *mdata, va_list ap)
int rv, stayopen;
#define set_setent(x, y) do { \
int i; \
unsigned int i; \
\
for (i = 0; i < (sizeof(x)/sizeof(x[0])) - 1; i++) \
x[i].mdata = (void *)y; \

View File

@ -63,9 +63,7 @@ int __aout_fdnlist(int, struct nlist *);
int __elf_fdnlist(int, struct nlist *);
int
nlist(name, list)
const char *name;
struct nlist *list;
nlist(const char *name, struct nlist *list)
{
int fd, n;
@ -89,11 +87,10 @@ static struct nlist_handlers {
};
int
__fdnlist(fd, list)
int fd;
struct nlist *list;
__fdnlist(int fd, struct nlist *list)
{
int n = -1, i;
int n = -1;
unsigned int i;
for (i = 0; i < sizeof(nlist_fn) / sizeof(nlist_fn[0]); i++) {
n = (nlist_fn[i].fn)(fd, list);
@ -107,9 +104,7 @@ __fdnlist(fd, list)
#ifdef _NLIST_DO_AOUT
int
__aout_fdnlist(fd, list)
int fd;
struct nlist *list;
__aout_fdnlist(int fd, struct nlist *list)
{
struct nlist *p, *symtab;
caddr_t strtab, a_out_mmap;
@ -235,9 +230,7 @@ __elf_is_okay__(Elf_Ehdr *ehdr)
}
int
__elf_fdnlist(fd, list)
int fd;
struct nlist *list;
__elf_fdnlist(int fd, struct nlist *list)
{
struct nlist *p;
Elf_Off symoff = 0, symstroff = 0;
@ -377,11 +370,7 @@ __elf_fdnlist(fd, list)
* n_value and n_type members.
*/
static void
elf_sym_to_nlist(nl, s, shdr, shnum)
struct nlist *nl;
Elf_Sym *s;
Elf_Shdr *shdr;
int shnum;
elf_sym_to_nlist(struct nlist *nl, Elf_Sym *s, Elf_Shdr *shdr, int shnum)
{
nl->n_value = s->st_value;

View File

@ -94,14 +94,13 @@ static struct {
* are set, return the empty string.
*/
char *
fflagstostr(flags)
u_long flags;
fflagstostr(u_long flags)
{
char *string;
const char *sp;
char *dp;
u_long setflags;
int i;
u_int i;
if ((string = (char *)malloc(nmappings * (longestflaglen + 1))) == NULL)
return (NULL);
@ -128,9 +127,7 @@ fflagstostr(flags)
* to the offending token.
*/
int
strtofflags(stringp, setp, clrp)
char **stringp;
u_long *setp, *clrp;
strtofflags(char **stringp, u_long *setp, u_long *clrp)
{
char *string, *p;
int i;

View File

@ -70,9 +70,7 @@ void moncontrol(int);
static int hertz(void);
void
monstartup(lowpc, highpc)
u_long lowpc;
u_long highpc;
monstartup(u_long lowpc, u_long highpc)
{
int o;
char *cp;
@ -218,8 +216,7 @@ _mcleanup(void)
* all the data structures are ready.
*/
void
moncontrol(mode)
int mode;
moncontrol(int mode)
{
struct gmonparam *p = &_gmonparam;
@ -239,7 +236,7 @@ moncontrol(mode)
* if something goes wrong, we return 0, an impossible hertz.
*/
static int
hertz()
hertz(void)
{
struct itimerval tim;

View File

@ -147,16 +147,11 @@ typedef DECLARE_BLOCK(int, heapsort_block, const void *, const void *);
*/
#ifdef I_AM_HEAPSORT_B
int
heapsort_b(vbase, nmemb, size, compar)
void *vbase;
size_t nmemb, size;
heapsort_block compar;
heapsort_b(void *vbase, size_t nmemb, size_t size, heapsort_block compar)
#else
int
heapsort(vbase, nmemb, size, compar)
void *vbase;
size_t nmemb, size;
int (*compar)(const void *, const void *);
heapsort(void *vbase, size_t nmemb, size_t size,
int (*compar)(const void *, const void *))
#endif
{
size_t cnt, i, j, l;

View File

@ -104,14 +104,10 @@ static void insertionsort(u_char *, size_t, size_t, cmp_t);
*/
int
#ifdef I_AM_MERGESORT_B
mergesort_b(base, nmemb, size, cmp)
mergesort_b(void *base, size_t nmemb, size_t size, cmp_t cmp)
#else
mergesort(base, nmemb, size, cmp)
mergesort(void *base, size_t nmemb, size_t size, cmp_t cmp)
#endif
void *base;
size_t nmemb;
size_t size;
cmp_t cmp;
{
size_t i;
int sense;
@ -271,10 +267,7 @@ COPY: b = t;
* is defined. Otherwise simple pairwise merging is used.)
*/
void
setup(list1, list2, n, size, cmp)
size_t n, size;
u_char *list1, *list2;
cmp_t cmp;
setup(u_char *list1, u_char *list2, size_t n, size_t size, cmp_t cmp)
{
int i, length, size2, tmp, sense;
u_char *f1, *f2, *s, *l2, *last, *p2;
@ -345,10 +338,7 @@ setup(list1, list2, n, size, cmp)
* last 4 elements.
*/
static void
insertionsort(a, n, size, cmp)
u_char *a;
size_t n, size;
cmp_t cmp;
insertionsort(u_char *a, size_t n, size_t size, cmp_t cmp)
{
u_char *ai, *s, *t, *u, tmp;
int i;

View File

@ -88,10 +88,7 @@ static void r_sort_b(const u_char **, const u_char **, int, int,
}
int
radixsort(a, n, tab, endch)
const u_char **a, *tab;
int n;
u_int endch;
radixsort(const u_char **a, int n, const u_char *tab, u_int endch)
{
const u_char *tr;
int c;
@ -103,10 +100,7 @@ radixsort(a, n, tab, endch)
}
int
sradixsort(a, n, tab, endch)
const u_char **a, *tab;
int n;
u_int endch;
sradixsort(const u_char **a, int n, const u_char *tab, u_int endch)
{
const u_char *tr, **ta;
int c;
@ -131,11 +125,7 @@ sradixsort(a, n, tab, endch)
/* Unstable, in-place sort. */
static void
r_sort_a(a, n, i, tr, endch)
const u_char **a;
int n, i;
const u_char *tr;
u_int endch;
r_sort_a(const u_char **a, int n, int i, const u_char *tr, u_int endch)
{
static int count[256], nc, bmin;
int c;
@ -233,11 +223,8 @@ r_sort_a(a, n, i, tr, endch)
/* Stable sort, requiring additional memory. */
static void
r_sort_b(a, ta, n, i, tr, endch)
const u_char **a, **ta;
int n, i;
const u_char *tr;
u_int endch;
r_sort_b(const u_char **a, const u_char **ta, int n, int i, const u_char *tr,
u_int endch)
{
static int count[256], nc, bmin;
int c;
@ -304,12 +291,9 @@ r_sort_b(a, ta, n, i, tr, endch)
}
}
/* insertion sort */
static inline void
simplesort(a, n, b, tr, endch) /* insertion sort */
const u_char **a;
int n, b;
const u_char *tr;
u_int endch;
simplesort(const u_char **a, int n, int b, const u_char *tr, u_int endch)
{
u_char ch;
const u_char **ak, **ai, *s, *t;

View File

@ -28,7 +28,7 @@
.\" @(#)setuid.2 8.1 (Berkeley) 6/4/93
.\" $FreeBSD$
.\"
.Dd June 4, 1993
.Dd September 13, 2015
.Dt SETUID 2
.Os
.Sh NAME
@ -178,15 +178,10 @@ pseudocode(void)
int fd;
/* ... */
fd = open("/path/to/sensitive/data", O_RDWR);
fd = open("/path/to/sensitive/data", O_RDWR | O_CLOEXEC);
if (fd == -1)
err(1, "open");
/*
* Set close-on-exec flag; see fcntl(2) for more information.
*/
if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1)
err(1, "fcntl(F_SETFD)");
/* ... */
execve(path, argv, environ);
}

View File

@ -21,6 +21,7 @@
# (by default, the directory above this one)
# PORTSDIR: location of ports tree to distribute (default: /usr/ports)
# DOCDIR: location of doc tree (default: /usr/doc)
# XTRADIR: xtra-bits-dir argument for <arch>/mkisoimages.sh
# NOPKG: if set, do not distribute third-party packages
# NOPORTS: if set, do not distribute ports tree
# NOSRC: if set, do not distribute source tree
@ -242,13 +243,13 @@ dvd: packagesystem
release.iso: disc1.iso
disc1.iso: disc1
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_CD ${.TARGET} disc1
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_CD ${.TARGET} disc1 ${XTRADIR}
dvd1.iso: dvd pkg-stage
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_DVD ${.TARGET} dvd
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_DVD ${.TARGET} dvd ${XTRADIR}
bootonly.iso: bootonly
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_BO ${.TARGET} bootonly
sh ${.CURDIR}/${TARGET}/mkisoimages.sh -b ${VOLUME_LABEL}_BO ${.TARGET} bootonly ${XTRADIR}
memstick: memstick.img
memstick.img: disc1

View File

@ -43,14 +43,16 @@ struct g_command class_commands[] = {
{
{ 'e', "error", "-1", G_TYPE_NUMBER },
{ 'o', "offset", "0", G_TYPE_NUMBER },
{ 'p', "stripesize", "0", G_TYPE_NUMBER },
{ 'P', "stripeoffset", "0", G_TYPE_NUMBER },
{ 'r', "rfailprob", "-1", G_TYPE_NUMBER },
{ 's', "size", "0", G_TYPE_NUMBER },
{ 'S', "secsize", "0", G_TYPE_NUMBER },
{ 'w', "wfailprob", "-1", G_TYPE_NUMBER },
G_OPT_SENTINEL
},
"[-v] [-e error] [-o offset] [-r rfailprob] [-s size] "
"[-S secsize] [-w wfailprob] dev ..."
"[-v] [-e error] [-o offset] [-p stripesize] [-P stripeoffset] "
"[-r rfailprob] [-s size] [-S secsize] [-w wfailprob] dev ..."
},
{ "configure", G_FLAG_VERBOSE, NULL,
{

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 14, 2013
.Dd September 15, 2015
.Dt GNOP 8
.Os
.Sh NAME
@ -36,6 +36,8 @@
.Op Fl v
.Op Fl e Ar error
.Op Fl o Ar offset
.Op Fl p Ar stripesize
.Op Fl P Ar stripeoffset
.Op Fl r Ar rfailprob
.Op Fl s Ar size
.Op Fl S Ar secsize
@ -115,6 +117,10 @@ Specifies the error number to return on failure.
Force the removal of the specified provider.
.It Fl o Ar offset
Where to begin on the original provider.
.It Fl p Ar stripesize
Value of the stripesize property of the transparent provider.
.It Fl P Ar stripeoffset
Value of the stripeoffset property of the transparent provider.
.It Fl r Ar rfailprob
Specifies read failure probability in percent.
.It Fl s Ar size

View File

@ -995,7 +995,7 @@ setifmetric(const char *val, int dummy __unused, int s,
strncpy(ifr.ifr_name, name, sizeof (ifr.ifr_name));
ifr.ifr_metric = atoi(val);
if (ioctl(s, SIOCSIFMETRIC, (caddr_t)&ifr) < 0)
warn("ioctl (set metric)");
err(1, "ioctl SIOCSIFMETRIC (set metric)");
}
static void
@ -1005,7 +1005,7 @@ setifmtu(const char *val, int dummy __unused, int s,
strncpy(ifr.ifr_name, name, sizeof (ifr.ifr_name));
ifr.ifr_mtu = atoi(val);
if (ioctl(s, SIOCSIFMTU, (caddr_t)&ifr) < 0)
warn("ioctl (set mtu)");
err(1, "ioctl SIOCSIFMTU (set mtu)");
}
static void
@ -1015,15 +1015,12 @@ setifname(const char *val, int dummy __unused, int s,
char *newname;
newname = strdup(val);
if (newname == NULL) {
warn("no memory to set ifname");
return;
}
if (newname == NULL)
err(1, "no memory to set ifname");
ifr.ifr_data = newname;
if (ioctl(s, SIOCSIFNAME, (caddr_t)&ifr) < 0) {
warn("ioctl (set name)");
free(newname);
return;
err(1, "ioctl SIOCSIFNAME (set name)");
}
strlcpy(name, newname, sizeof(name));
free(newname);
@ -1050,7 +1047,7 @@ setifdescr(const char *val, int dummy __unused, int s,
}
if (ioctl(s, SIOCSIFDESCR, (caddr_t)&ifr) < 0)
warn("ioctl (set descr)");
err(1, "ioctl SIOCSIFDESCR (set descr)");
free(newdescr);
}

165
share/dtrace/tcpdebug Executable file
View File

@ -0,0 +1,165 @@
#!/usr/sbin/dtrace -s
/*
* Copyright (c) 2015 George V. Neville-Neil
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
* The tcpdebug D script uses the tcp:kernel::debug tracepoints
* to replicate the action of turning on TCPDEBUG in a kernel configuration.
*
* A TCP debug statement shows a connection's
*
* direction: input, output, user, drop
* state: CLOSED, LISTEN, SYN_SENT, SYN_RCVD, ESTABLISHED,
* CLOSE_WAIT, FIN_WAIT_1, CLOSING, LAST_ACK, FIN_WAIT_2,TIME_WAIT
* sequence: sequence space
*
* congestion: rcv_nxt, rcv_wnd, rcv_up, snd_una, snd_nxt, snx_max,
* snd_wl1, snd_wl2, snd_wnd
*
* NOTE: Only sockets with SO_DEBUG set will be shown.
*
* Usage: tcpdebug
*/
#pragma D option quiet
tcp:kernel::debug-input
/args[0]->tcps_debug/
{
seq = args[1]->tcp_seq;
ack = args[1]->tcp_ack;
len = args[2]->ip_plength - sizeof(struct tcphdr);
flags = args[1]->tcp_flags;
printf("%p %s: input [%xu..%xu]", arg0,
tcp_state_string[args[0]->tcps_state], seq, seq + len);
printf("@%x, urp=%x", ack, args[1]->tcp_urgent);
printf("%s", flags != 0 ? "<" : "");
printf("%s", flags & TH_SYN ? "SYN," :"");
printf("%s", flags & TH_ACK ? "ACK," :"");
printf("%s", flags & TH_FIN ? "FIN," :"");
printf("%s", flags & TH_RST ? "RST," :"");
printf("%s", flags & TH_PUSH ? "PUSH," :"");
printf("%s", flags & TH_URG ? "URG," :"");
printf("%s", flags & TH_ECE ? "ECE," :"");
printf("%s", flags & TH_CWR ? "CWR" :"");
printf("%s", flags != 0 ? ">" : "");
printf("\n");
printf("\trcv_(nxt,wnd,up) (%x,%x,%x) snd_(una,nxt,max) (%x,%x,%x)\n",
args[0]->tcps_rnxt, args[0]->tcps_rwnd, args[0]->tcps_rup,
args[0]->tcps_suna, args[0]->tcps_snxt, args[0]->tcps_smax);
printf("\tsnd_(wl1,wl2,wnd) (%x,%x,%x)\n",
args[0]->tcps_swl1, args[0]->tcps_swl2, args[0]->tcps_swnd);
}
tcp:kernel::debug-output
/args[0]->tcps_debug/
{
seq = args[1]->tcp_seq;
ack = args[1]->tcp_ack;
len = args[2]->ip_plength - 20;
printf("%p %s: output [%x..%x]", arg0,
tcp_state_string[args[0]->tcps_state], seq, seq + len);
printf("@%x, urp=%x", ack, args[1]->tcp_urgent);
printf("%s", flags != 0 ? "<" : "");
printf("%s", flags & TH_SYN ? "SYN," :"");
printf("%s", flags & TH_ACK ? "ACK," :"");
printf("%s", flags & TH_FIN ? "FIN," :"");
printf("%s", flags & TH_RST ? "RST," :"");
printf("%s", flags & TH_PUSH ? "PUSH," :"");
printf("%s", flags & TH_URG ? "URG," :"");
printf("%s", flags & TH_ECE ? "ECE," :"");
printf("%s", flags & TH_CWR ? "CWR" :"");
printf("%s", flags != 0 ? ">" : "");
printf("\n");
printf("\trcv_(nxt,wnd,up) (%u,%x,%x) snd_(una,nxt,max) (%x,%x,%x)\n",
args[0]->tcps_rnxt, args[0]->tcps_rwnd, args[0]->tcps_rup,
args[0]->tcps_suna, args[0]->tcps_snxt, args[0]->tcps_smax);
printf("\tsnd_(wl1,wl2,wnd) (%x,%x,%x)\n",
args[0]->tcps_swl1, args[0]->tcps_swl2, args[0]->tcps_swnd);
}
tcp:kernel::debug-drop
/args[0]->tcps_debug/
{
printf("%p %s: output [x..x] @%x, urp=%x\n", arg0,
tcp_state_string[args[0]->tcps_state],
args[1]->tcp_ack,
args[1]->tcp_urgent);
seq = args[1]->tcp_seq;
ack = args[1]->tcp_ack;
len = args[2]->ip_plength;
printf("%p %s: drop [%x..%x]", arg0,
tcp_state_string[args[0]->tcps_state], seq, seq + len);
printf("@%x, urp=%x", ack, args[1]->tcp_urgent);
printf("%s", flags != 0 ? "<" : "");
printf("%s", flags & TH_SYN ? "SYN," :"");
printf("%s", flags & TH_ACK ? "ACK," :"");
printf("%s", flags & TH_FIN ? "FIN," :"");
printf("%s", flags & TH_RST ? "RST," :"");
printf("%s", flags & TH_PUSH ? "PUSH," :"");
printf("%s", flags & TH_URG ? "URG," :"");
printf("%s", flags & TH_ECE ? "ECE," :"");
printf("%s", flags & TH_CWR ? "CWR" :"");
printf("%s", flags != 0 ? ">" : "");
printf("\n");
printf("\trcv_(nxt,wnd,up) (%x,%x,%x) snd_(una,nxt,max) (%x,%x,%x)\n",
args[0]->tcps_rnxt, args[0]->tcps_rwnd, args[0]->tcps_rup,
args[0]->tcps_suna, args[0]->tcps_snxt, args[0]->tcps_smax);
printf("\tsnd_(wl1,wl2,wnd) (%x,%x,%x)\n",
args[0]->tcps_swl1, args[0]->tcps_swl2, args[0]->tcps_swnd);
}
tcp:kernel::debug-user
/args[0]->tcps_debug/
{
printf("%p %s: user ", arg0,
tcp_state_string[args[0]->tcps_state]);
printf("%s", prureq_string[arg1]);
printf("\n");
printf("\trcv_(nxt,wnd,up) (%x,%x,%x) snd_(una,nxt,max) (%x,%x,%x)\n",
args[0]->tcps_rnxt, args[0]->tcps_rwnd, args[0]->tcps_rup,
args[0]->tcps_suna, args[0]->tcps_snxt, args[0]->tcps_smax);
printf("\tsnd_(wl1,wl2,wnd) (%x,%x,%x)\n",
args[0]->tcps_swl1, args[0]->tcps_swl2, args[0]->tcps_swnd);
}

View File

@ -1,4 +1,5 @@
.\" Copyright (c) 2013 Edward Tomasz Napierala
.\" Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
@ -23,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.Dd August 9, 2015
.Dd September 12, 2015
.Dt CTL 4
.Os
.Sh NAME
@ -80,6 +81,8 @@ Mode sense/select support
.It
Error injection support
.It
High Availability clustering support with ALUA
.It
All I/O handled in-kernel, no userland context switch overhead
.El
.Pp
@ -99,9 +102,57 @@ log commands with errors;
.It 2
log all commands;
.It 4
log received data for commands except READ/WRITE.
log data for commands other then READ/WRITE.
.El
Defaults to 0.
.It Va kern.cam.ctl.ha_id
Specifies unique position of this node within High Availability cluster.
Default is 0 -- no HA, 1 and 2 -- HA enabled at specified position.
.It Va kern.cam.ctl.ha_mode
Specifies High Availability cluster operation mode:
.Bl -tag -offset indent -compact
.It 0
Active/Standby -- primary node has backend access and processes requests,
while secondary can only do basic LUN discovery and reservation;
.It 1
Active/Active -- both nodes have backend access and process requests,
while secondary node synchronizes processing with primary one;
.It 2
Active/Active -- primary node has backend access and processes requests,
while secondary node forwards all requests and data to primary one;
.El
All above modes require established connection between HA cluster nodes.
If connection is not configured, secondary node will report Unavailable
state; if configured but not established -- Transitioning state.
Defaults to 0.
.It Va kern.cam.ctl.ha_peer
String value, specifying method to establish connection to peer HA node.
Can be "listen IP:port", "connect IP:port" or empty.
.It Va kern.cam.ctl.ha_link
Reports present state of connection between HA cluster nodes:
.Bl -tag -offset indent -compact
.It 0
not configured;
.It 1
configured but not established;
.It 2
established.
.El
.It Va kern.cam.ctl.ha_role
Specifies default role of this node:
.Bl -tag -offset indent -compact
.It 0
primary;
.It 1
secondary.
.El
This role can be overriden on per-LUN basis using "ha_role" LUN option,
so that for one LUN one node is primary, while for another -- another.
Role change from primary to secondary for HA modes 0 and 2 closes backends,
the opposite change -- opens.
If there is no primary node (both nodes are secondary, or secondary node has
no connection to primary one), secondary node(s) report Transitioning state.
State with two primary nodes is illegal (split brain condition).
.It Va kern.cam.ctl.iscsi.debug
Verbosity level for log messages from the kernel part of iSCSI target.
Set to 0 to disable logging or 1 to warn about potential problems.
@ -132,5 +183,7 @@ subsystem first appeared in
.Sh AUTHORS
The
.Nm
subsystem was written by
subsystem was originally written by
.An Kenneth Merry Aq Mt ken@FreeBSD.org .
Later work was done by
.An Alexander Motin Aq Mt mav@FreeBSD.org .

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 18, 2015
.Dd September 14, 2015
.Dt DTRACE_IP 4
.Os
.Sh NAME
@ -212,7 +212,7 @@ IPv6 destination address.
A string representation of the source address.
.It Vt string ipv6_daddr
A string representation of the destination address.
.It Vt ip6_t *ipv6_hdr
.It Vt struct ip6_hdr *ipv6_hdr
A pointer to the raw IPv6 header.
.El
.Sh FILES

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 2, 2005
.Dd September 12, 2015
.Dt GEOM_FOX 4
.Os
.Sh NAME
@ -46,6 +46,13 @@ module at boot time, place the following line in
geom_fox_load="YES"
.Ed
.Sh DESCRIPTION
.Bf -symbolic
This driver is obsolete.
Users are advised to use
.Xr gmultipath 8
instead.
.Ef
.Pp
The intent of the
.Nm
framework is to provide basic multipathing support to access direct

View File

@ -35,7 +35,7 @@
.\" $FreeBSD$
.\" $Whistle: ng_pppoe.8,v 1.1 1999/01/25 23:46:27 archie Exp $
.\"
.Dd November 13, 2012
.Dd September 15, 2015
.Dt NG_PPPOE 4
.Os
.Sh NAME
@ -187,7 +187,7 @@ above messages, and reports the Access Concentrator Name.
The four commands above use a common data structure:
.Bd -literal -offset 4n
struct ngpppoe_sts {
char hook[NG_HOOKSIZ]; /* hook associated with event session */
char hook[NG_HOOKSIZ];
};
.Ed
.Bl -tag -width 3n
@ -244,6 +244,20 @@ hook, or when user wants to override this address with another one.
.Tn ASCII
form of this message is
.Qq Li setenaddr .
.It Dv NGM_PPPOE_SETMAXP Pq Ic setmaxp
Set the node PPP-Max-Payload value as described in RFC 4638.
This message applies only to a client configuration.
.Tn ASCII
form of this message is
.Qq Li setmaxp .
.Pp
Data structure returned to client is:
.Bd -literal -offset 4n
struct ngpppoe_maxp {
char hook[NG_HOOKSIZ];
uint16_t data;
};
.Ed
.El
.Sh SHUTDOWN
This node shuts down upon receipt of a

View File

@ -1641,6 +1641,7 @@ MLINKS+=timeout.9 callout.9 \
timeout.9 callout_active.9 \
timeout.9 callout_deactivate.9 \
timeout.9 callout_drain.9 \
timeout.9 callout_drain_async.9 \
timeout.9 callout_handle_init.9 \
timeout.9 callout_init.9 \
timeout.9 callout_init_mtx.9 \

View File

@ -29,13 +29,14 @@
.\"
.\" $FreeBSD$
.\"
.Dd October 8, 2014
.Dd September 14, 2015
.Dt TIMEOUT 9
.Os
.Sh NAME
.Nm callout_active ,
.Nm callout_deactivate ,
.Nm callout_drain ,
.Nm callout_drain_async ,
.Nm callout_handle_init ,
.Nm callout_init ,
.Nm callout_init_mtx ,
@ -70,6 +71,8 @@ typedef void timeout_t (void *);
.Fn callout_deactivate "struct callout *c"
.Ft int
.Fn callout_drain "struct callout *c"
.Ft int
.Fn callout_drain_async "struct callout *c" "callout_func_t *fn" "void *arg"
.Ft void
.Fn callout_handle_init "struct callout_handle *handle"
.Bd -literal
@ -264,6 +267,24 @@ fully stopped before
.Fn callout_drain
returns.
.Pp
The function
.Fn callout_drain_async
is non-blocking and works the same as the
.Fn callout_stop
function.
When this function returns non-zero, do not call it again until the callback function given by
.Fa fn
has been called with argument
.Fa arg .
Only one of
.Fn callout_drain
or
.Fn callout_drain_async
should be called at a time to drain a callout.
If this function returns zero, it is safe to free the callout structure pointed to by the
.Fa c
argument immediately.
.Pp
The
.Fn callout_reset
and

View File

@ -7,6 +7,8 @@
# we need this until there is an alternative
MK_INSTALL_AS_USER= yes
_default_makeobjdir=$${.CURDIR:S,$${SRCTOP},$${OBJTOP},}
.if empty(OBJROOT) || ${.MAKE.LEVEL} == 0
.if !make(showconfig)
.if defined(MAKEOBJDIRPREFIX) && exists(${MAKEOBJDIRPREFIX})
@ -16,9 +18,9 @@ OBJROOT:=${MAKEOBJDIRPREFIX}${SRCTOP:S,/src,,}/
MAKEOBJDIRPREFIX=
.export MAKEOBJDIRPREFIX
.endif
.if empty(MAKEOBJDIR) || ${MAKEOBJDIR:M*/*} == ""
.if empty(MAKEOBJDIR)
# OBJTOP set below
MAKEOBJDIR=$${.CURDIR:S,$${SRCTOP},$${OBJTOP},}
MAKEOBJDIR=${_default_makeobjdir}
# export but do not track
.export-env MAKEOBJDIR
# now for our own use
@ -32,7 +34,7 @@ OBJROOT ?= ${SB_OBJROOT}
.endif
OBJROOT ?= ${SRCTOP:H}/obj/
.if ${OBJROOT:M*/} != ""
OBJROOT:= ${OBJROOT:tA}/
OBJROOT:= ${OBJROOT:H:tA}/
.else
OBJROOT:= ${OBJROOT:H:tA}/${OBJROOT:T}
.endif
@ -106,6 +108,12 @@ TARGET_SPEC = ${TARGET_SPEC_VARS:@v@${$v:U}@:ts,}
TARGET_OBJ_SPEC:= ${TARGET_SPEC:S;,;.;g}
OBJTOP:= ${OBJROOT}${TARGET_OBJ_SPEC}
.if defined(MAKEOBJDIR)
.if ${MAKEOBJDIR:M*/*} == ""
.error Cannot use MAKEOBJDIR=${MAKEOBJDIR}${.newline}Unset MAKEOBJDIR to get default: MAKEOBJDIR='${_default_makeobjdir}'
.endif
.endif
.if ${.CURDIR} == ${SRCTOP}
RELDIR = .
.elif ${.CURDIR:M${SRCTOP}/*}
@ -186,6 +194,10 @@ UPDATE_DEPENDFILE= NO
# define the list of places that contain files we are responsible for
.MAKE.META.BAILIWICK = ${SB} ${OBJROOT} ${STAGE_ROOT}
.if defined(CCACHE_DIR)
.MAKE.META.IGNORE_PATHS += ${CCACHE_DIR}
.endif
CSU_DIR.${MACHINE_ARCH} ?= csu/${MACHINE_ARCH}
CSU_DIR := ${CSU_DIR.${MACHINE_ARCH}}

View File

@ -119,7 +119,10 @@ device pl011
# USB support
options USB_DEBUG # enable debug msgs
device dwcotg # DWC OTG controller
device xhci # XHCI PCI->USB interface (USB 3.0)
device usb # USB Bus (required)
device ukbd # Keyboard
device umass # Disks/Mass storage - Requires scbus and da
# Pseudo devices.
device loop # Network loopback

View File

@ -143,6 +143,7 @@ KSRCS+= usb_template_kbd.c
KSRCS+= usb_template_audio.c
KSRCS+= usb_template_phone.c
KSRCS+= usb_template_serialnet.c
KSRCS+= usb_template_midi.c
#
# USB mass storage support

View File

@ -40,25 +40,24 @@ Features:
- Support for multiple ports
- Support for multiple simultaneous initiators
- Support for multiple simultaneous backing stores
- Support for VMWare VAAI: COMPARE AND WRITE, XCOPY, WRITE SAME and
UNMAP commands
- Support for Microsoft ODX: POPULATE TOKEN/WRITE USING TOKEN, WRITE SAME
and UNMAP commands
- Persistent reservation support
- Mode sense/select support
- Error injection support
- High Availability support
- High Availability clustering support with ALUA
- All I/O handled in-kernel, no userland context switch overhead.
Configuring and Running CTL:
===========================
- After applying the CTL patchset to your tree, build world and install it
on your target system.
- Add 'device ctl' to your kernel configuration file.
- Add 'device ctl' to your kernel configuration file or load the module.
- If you're running with a 8Gb or 4Gb Qlogic FC board, add
'options ISP_TARGET_MODE' to your kernel config file. Keep in mind that
the isp(4) driver can run in target or initiator mode, but not both on
the same machine. 'device ispfw' or loading the ispfw module is also
recommended.
'options ISP_TARGET_MODE' to your kernel config file. 'device ispfw' or
loading the ispfw module is also recommended.
- Rebuild and install a new kernel.

View File

@ -405,12 +405,6 @@ static int ctl_scsiio_lun_check(struct ctl_lun *lun,
const struct ctl_cmd_entry *entry,
struct ctl_scsiio *ctsio);
static void ctl_failover_lun(struct ctl_lun *lun);
static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
static void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
ctl_ua_type ua_type);
static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc,
struct ctl_scsiio *ctsio);
static int ctl_scsiio(struct ctl_scsiio *ctsio);
@ -418,11 +412,14 @@ static int ctl_scsiio(struct ctl_scsiio *ctsio);
static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io,
static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io,
ctl_ua_type ua_type);
static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io);
static int ctl_abort_task(union ctl_io *io);
static int ctl_abort_task_set(union ctl_io *io);
static int ctl_query_task(union ctl_io *io, int task_set);
static int ctl_i_t_nexus_reset(union ctl_io *io);
static int ctl_query_async_event(union ctl_io *io);
static void ctl_run_task(union ctl_io *io);
#ifdef CTL_IO_DELAY
static void ctl_datamove_timer_wakeup(void *arg);
@ -519,8 +516,6 @@ ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc,
ctsio->residual = msg_info->scsi.residual;
memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data,
msg_info->scsi.sense_len);
memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
&msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen));
ctl_enqueue_isc((union ctl_io *)ctsio);
}
@ -673,7 +668,10 @@ ctl_isc_ha_link_down(struct ctl_softc *softc)
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) {
lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
}
mtx_unlock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
@ -700,8 +698,11 @@ ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
struct ctl_lun *lun;
uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
mtx_lock(&softc->ctl_lock);
if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS &&
(lun = softc->ctl_luns[msg->hdr.nexus.targ_lun]) != NULL) {
(lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) {
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
if (msg->ua.ua_all) {
if (msg->ua.ua_set)
ctl_est_ua_all(lun, iid, msg->ua.ua_type);
@ -713,7 +714,9 @@ ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
else
ctl_clr_ua(lun, iid, msg->ua.ua_type);
}
}
mtx_unlock(&lun->lun_lock);
} else
mtx_unlock(&softc->ctl_lock);
}
static void
@ -722,58 +725,69 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
struct ctl_lun *lun;
struct ctl_ha_msg_lun_pr_key pr_key;
int i, k;
ctl_lun_flags oflags;
uint32_t targ_lun;
lun = softc->ctl_luns[msg->hdr.nexus.targ_lun];
if (lun == NULL) {
CTL_DEBUG_PRINT(("%s: Unknown LUN %d\n", __func__,
msg->hdr.nexus.targ_lun));
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
((lun = softc->ctl_luns[targ_lun]) == NULL)) {
mtx_unlock(&softc->ctl_lock);
return;
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
if (lun->flags & CTL_LUN_DISABLED) {
mtx_unlock(&lun->lun_lock);
return;
}
i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
if (msg->lun.lun_devid_len != i || (i > 0 &&
memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
mtx_unlock(&lun->lun_lock);
printf("%s: Received conflicting HA LUN %d\n",
__func__, msg->hdr.nexus.targ_lun);
return;
} else {
mtx_lock(&lun->lun_lock);
i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0;
if (msg->lun.lun_devid_len != i || (i > 0 &&
memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) {
mtx_unlock(&lun->lun_lock);
printf("%s: Received conflicting HA LUN %d\n",
__func__, msg->hdr.nexus.targ_lun);
return;
} else {
/* Record whether peer is primary. */
if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
(msg->lun.flags & CTL_LUN_DISABLED) == 0)
lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
else
lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
/* Record whether peer is primary. */
oflags = lun->flags;
if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) &&
(msg->lun.flags & CTL_LUN_DISABLED) == 0)
lun->flags |= CTL_LUN_PEER_SC_PRIMARY;
else
lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY;
if (oflags != lun->flags)
ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
/* If peer is primary and we are not -- use data */
if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
(lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
lun->PRGeneration = msg->lun.pr_generation;
lun->pr_res_idx = msg->lun.pr_res_idx;
lun->res_type = msg->lun.pr_res_type;
lun->pr_key_count = msg->lun.pr_key_count;
for (k = 0; k < CTL_MAX_INITIATORS; k++)
ctl_clr_prkey(lun, k);
for (k = 0; k < msg->lun.pr_key_count; k++) {
memcpy(&pr_key, &msg->lun.data[i],
sizeof(pr_key));
ctl_alloc_prkey(lun, pr_key.pr_iid);
ctl_set_prkey(lun, pr_key.pr_iid,
pr_key.pr_key);
i += sizeof(pr_key);
}
/* If peer is primary and we are not -- use data */
if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 &&
(lun->flags & CTL_LUN_PEER_SC_PRIMARY)) {
lun->PRGeneration = msg->lun.pr_generation;
lun->pr_res_idx = msg->lun.pr_res_idx;
lun->res_type = msg->lun.pr_res_type;
lun->pr_key_count = msg->lun.pr_key_count;
for (k = 0; k < CTL_MAX_INITIATORS; k++)
ctl_clr_prkey(lun, k);
for (k = 0; k < msg->lun.pr_key_count; k++) {
memcpy(&pr_key, &msg->lun.data[i],
sizeof(pr_key));
ctl_alloc_prkey(lun, pr_key.pr_iid);
ctl_set_prkey(lun, pr_key.pr_iid,
pr_key.pr_key);
i += sizeof(pr_key);
}
mtx_unlock(&lun->lun_lock);
CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
__func__, msg->hdr.nexus.targ_lun,
(msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
"primary" : "secondary"));
/* If we are primary but peer doesn't know -- notify */
if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
(msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
ctl_isc_announce_lun(lun);
}
mtx_unlock(&lun->lun_lock);
CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n",
__func__, msg->hdr.nexus.targ_lun,
(msg->lun.flags & CTL_LUN_PRIMARY_SC) ?
"primary" : "secondary"));
/* If we are primary but peer doesn't know -- notify */
if ((lun->flags & CTL_LUN_PRIMARY_SC) &&
(msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0)
ctl_isc_announce_lun(lun);
}
}
@ -781,6 +795,7 @@ static void
ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
{
struct ctl_port *port;
struct ctl_lun *lun;
int i, new;
port = softc->ctl_ports[msg->hdr.nexus.targ_port];
@ -856,6 +871,15 @@ ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
__func__);
}
}
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
}
/*
@ -954,6 +978,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
* when the datamove is complete.
*/
io->io_hdr.serializing_sc = msg->hdr.serializing_sc;
if (msg->hdr.status == CTL_SUCCESS)
io->io_hdr.status = msg->hdr.status;
if (msg->dt.sg_sequence == 0) {
i = msg->dt.kern_sg_entries +
@ -1036,6 +1062,8 @@ ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param)
memcpy(&io->scsiio.sense_data,
&msg->scsi.sense_data,
msg->scsi.sense_len);
if (msg->hdr.status == CTL_SUCCESS)
io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
}
ctl_enqueue_isc(io);
break;
@ -1178,7 +1206,7 @@ ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest)
dest->hdr.status = src->io_hdr.status;
}
static void
void
ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
struct ctl_softc *softc = lun->ctl_softc;
@ -1193,25 +1221,33 @@ ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua;
}
static void
ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
void
ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua)
{
struct ctl_softc *softc = lun->ctl_softc;
int i, j;
int i;
mtx_assert(&lun->lun_lock, MA_OWNED);
for (i = softc->port_min; i < softc->port_max; i++) {
if (lun->pending_ua[i] == NULL)
if (lun->pending_ua[port] == NULL)
return;
for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) {
if (port * CTL_MAX_INIT_PER_PORT + i == except)
continue;
for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) {
if (i * CTL_MAX_INIT_PER_PORT + j == except)
continue;
lun->pending_ua[i][j] |= ua;
}
lun->pending_ua[port][i] |= ua;
}
}
static void
void
ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
struct ctl_softc *softc = lun->ctl_softc;
int i;
mtx_assert(&lun->lun_lock, MA_OWNED);
for (i = softc->port_min; i < softc->port_max; i++)
ctl_est_ua_port(lun, i, except, ua);
}
void
ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
{
struct ctl_softc *softc = lun->ctl_softc;
@ -1226,7 +1262,7 @@ ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua)
pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua;
}
static void
void
ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
{
struct ctl_softc *softc = lun->ctl_softc;
@ -1244,7 +1280,7 @@ ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua)
}
}
static void
void
ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
ctl_ua_type ua_type)
{
@ -1730,20 +1766,24 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
softc = control_softc;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS) &&
((lun = softc->ctl_luns[targ_lun]) != NULL)) {
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
/*
* If the LUN is invalid, pretend that it doesn't exist.
* It will go away as soon as all pending I/O has been
* completed.
*/
mtx_lock(&lun->lun_lock);
if (lun->flags & CTL_LUN_DISABLED) {
mtx_unlock(&lun->lun_lock);
lun = NULL;
}
} else
} else {
mtx_unlock(&softc->ctl_lock);
lun = NULL;
}
if (lun == NULL) {
/*
* The other node would not send this request to us unless
@ -2092,6 +2132,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td)
{
struct ctl_softc *softc;
struct ctl_lun *lun;
int retval;
softc = control_softc;
@ -2250,7 +2291,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_DUMP_OOA: {
struct ctl_lun *lun;
union ctl_io *io;
char printbuf[128];
struct sbuf sb;
@ -2287,7 +2327,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
break;
}
case CTL_GET_OOA: {
struct ctl_lun *lun;
struct ctl_ooa *ooa_hdr;
struct ctl_ooa_entry *entries;
uint32_t cur_fill_num;
@ -2379,7 +2418,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_CHECK_OOA: {
union ctl_io *io;
struct ctl_lun *lun;
struct ctl_ooa_info *ooa_info;
@ -2412,9 +2450,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_DELAY_IO: {
struct ctl_io_delay_info *delay_info;
#ifdef CTL_IO_DELAY
struct ctl_lun *lun;
#endif /* CTL_IO_DELAY */
delay_info = (struct ctl_io_delay_info *)addr;
@ -2505,7 +2540,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
case CTL_SETSYNC:
case CTL_GETSYNC: {
struct ctl_sync_info *sync_info;
struct ctl_lun *lun;
sync_info = (struct ctl_sync_info *)addr;
@ -2514,6 +2548,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (lun == NULL) {
mtx_unlock(&softc->ctl_lock);
sync_info->status = CTL_GS_SYNC_NO_LUN;
break;
}
/*
* Get or set the sync interval. We're not bounds checking
@ -2534,7 +2569,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_GETSTATS: {
struct ctl_stats *stats;
struct ctl_lun *lun;
int i;
stats = (struct ctl_stats *)addr;
@ -2570,7 +2604,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_ERROR_INJECT: {
struct ctl_error_desc *err_desc, *new_err_desc;
struct ctl_lun *lun;
err_desc = (struct ctl_error_desc *)addr;
@ -2617,7 +2650,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_ERROR_INJECT_DELETE: {
struct ctl_error_desc *delete_desc, *desc, *desc2;
struct ctl_lun *lun;
int delete_done;
delete_desc = (struct ctl_error_desc *)addr;
@ -2661,8 +2693,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
printf("CTL Persistent Reservation information start:\n");
for (i = 0; i < CTL_MAX_LUNS; i++) {
struct ctl_lun *lun;
lun = softc->ctl_luns[i];
if ((lun == NULL)
@ -2756,7 +2786,6 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
case CTL_LUN_LIST: {
struct sbuf *sb;
struct ctl_lun *lun;
struct ctl_lun_list *list;
struct ctl_option *opt;
@ -3129,6 +3158,17 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_unlock(&softc->ctl_lock);
return (ENXIO);
}
if (port->status & CTL_PORT_STATUS_ONLINE) {
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) >=
CTL_MAX_LUNS)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_port(lun, lm->port, -1,
CTL_UA_LUN_CHANGE);
mtx_unlock(&lun->lun_lock);
}
}
mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps
if (lm->plun < CTL_MAX_LUNS) {
if (lm->lun == UINT32_MAX)
@ -3145,6 +3185,8 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
retval = ctl_lun_map_init(port);
} else
return (ENXIO);
if (port->status & CTL_PORT_STATUS_ONLINE)
ctl_isc_announce_port(port);
break;
}
default: {
@ -4531,8 +4573,8 @@ ctl_lun_primary(struct ctl_be_lun *be_lun)
mtx_lock(&lun->lun_lock);
lun->flags |= CTL_LUN_PRIMARY_SC;
mtx_unlock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
mtx_unlock(&lun->lun_lock);
ctl_isc_announce_lun(lun);
return (0);
}
@ -4544,8 +4586,8 @@ ctl_lun_secondary(struct ctl_be_lun *be_lun)
mtx_lock(&lun->lun_lock);
lun->flags &= ~CTL_LUN_PRIMARY_SC;
mtx_unlock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE);
mtx_unlock(&lun->lun_lock);
ctl_isc_announce_lun(lun);
return (0);
}
@ -7325,8 +7367,9 @@ ctl_report_supported_tmf(struct ctl_scsiio *ctsio)
ctsio->kern_rel_offset = 0;
data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS;
data->byte2 |= RST_ITNRS;
data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS |
RST_TRS;
data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS;
ctl_set_success(ctsio);
ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED;
@ -8380,13 +8423,23 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
struct ctl_lun *lun;
struct ctl_softc *softc;
int i;
uint32_t targ_lun;
uint32_t residx, targ_lun;
softc = control_softc;
targ_lun = msg->hdr.nexus.targ_mapped_lun;
lun = softc->ctl_luns[targ_lun];
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
((lun = softc->ctl_luns[targ_lun]) == NULL)) {
mtx_unlock(&softc->ctl_lock);
return;
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
if (lun->flags & CTL_LUN_DISABLED) {
mtx_unlock(&lun->lun_lock);
return;
}
residx = ctl_get_initindex(&msg->hdr.nexus);
switch(msg->pr.pr_info.action) {
case CTL_PR_REG_KEY:
ctl_alloc_prkey(lun, msg->pr.pr_info.residx);
@ -8451,8 +8504,9 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg)
if (lun->res_type != SPR_TYPE_EX_AC
&& lun->res_type != SPR_TYPE_WR_EX) {
for (i = softc->init_min; i < softc->init_max; i++)
if (ctl_get_prkey(lun, i) != 0)
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
if (i == residx || ctl_get_prkey(lun, i) == 0)
continue;
ctl_est_ua(lun, i, CTL_UA_RES_RELEASE);
}
lun->flags &= ~CTL_LUN_PR_RESERVED;
@ -10798,9 +10852,7 @@ ctl_scsiio_lun_check(struct ctl_lun *lun,
if (entry->pattern & CTL_LUN_PAT_WRITE) {
if (lun->be_lun &&
lun->be_lun->flags & CTL_LUN_FLAG_READONLY) {
ctl_set_sense(ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_DATA_PROTECT,
/*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE);
ctl_set_hw_write_protected(ctsio);
retval = 1;
goto bailout;
}
@ -10898,6 +10950,7 @@ ctl_failover_lun(struct ctl_lun *lun)
if (io->flags & CTL_FLAG_FROM_OTHER_SC) {
if (io->flags & CTL_FLAG_IO_ACTIVE) {
io->flags |= CTL_FLAG_ABORT;
io->flags |= CTL_FLAG_FAILOVER;
} else { /* This can be only due to DATAMOVE */
io->msg_type = CTL_MSG_DATAMOVE_DONE;
io->flags |= CTL_FLAG_IO_ACTIVE;
@ -11301,6 +11354,7 @@ static int
ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
ctl_ua_type ua_type)
{
struct ctl_port *port;
struct ctl_lun *lun;
int retval;
@ -11321,10 +11375,15 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
retval = 0;
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links)
retval += ctl_lun_reset(lun, io, ua_type);
port = softc->ctl_ports[io->io_hdr.nexus.targ_port];
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (port != NULL &&
ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
retval += ctl_do_lun_reset(lun, io, ua_type);
}
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (retval);
}
@ -11350,7 +11409,7 @@ ctl_target_reset(struct ctl_softc *softc, union ctl_io *io,
* XXX KDM for now, we're setting unit attention for all initiators.
*/
static int
ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
{
union ctl_io *xio;
#if 0
@ -11398,6 +11457,39 @@ ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type)
return (0);
}
static int
ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io)
{
struct ctl_lun *lun;
uint32_t targ_lun;
int retval;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET);
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_WAITOK);
}
return (retval);
}
static void
ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id,
int other_sc)
@ -11453,10 +11545,10 @@ ctl_abort_task_set(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
@ -11471,6 +11563,7 @@ ctl_abort_task_set(union ctl_io *io)
(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
}
mtx_unlock(&lun->lun_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11481,13 +11574,24 @@ ctl_i_t_nexus_reset(union ctl_io *io)
struct ctl_lun *lun;
uint32_t initidx;
if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) {
union ctl_ha_msg msg_info;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_WAITOK);
}
initidx = ctl_get_initindex(&io->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
STAILQ_FOREACH(lun, &softc->lun_list, links) {
mtx_lock(&lun->lun_lock);
ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port,
io->io_hdr.nexus.initid,
(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0);
io->io_hdr.nexus.initid, 1);
#ifdef CTL_WITH_CA
ctl_clear_mask(lun->have_ca, initidx);
#endif
@ -11497,6 +11601,7 @@ ctl_i_t_nexus_reset(union ctl_io *io)
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11521,11 +11626,10 @@ ctl_abort_task(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
@ -11631,6 +11735,77 @@ ctl_abort_task(union ctl_io *io)
io->taskio.tag_type);
#endif
}
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
static int
ctl_query_task(union ctl_io *io, int task_set)
{
union ctl_io *xio;
struct ctl_lun *lun;
struct ctl_softc *softc;
int found = 0;
uint32_t targ_lun;
softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL;
xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) {
if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port)
|| (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid)
|| (xio->io_hdr.flags & CTL_FLAG_ABORT))
continue;
if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) {
found = 1;
break;
}
}
mtx_unlock(&lun->lun_lock);
if (found)
io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
else
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
static int
ctl_query_async_event(union ctl_io *io)
{
struct ctl_lun *lun;
struct ctl_softc *softc;
ctl_ua_type ua;
uint32_t targ_lun, initidx;
softc = control_softc;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun >= CTL_MAX_LUNS) ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
return (1);
}
mtx_lock(&lun->lun_lock);
mtx_unlock(&softc->ctl_lock);
initidx = ctl_get_initindex(&io->io_hdr.nexus);
ua = ctl_build_qae(lun, initidx, io->taskio.task_resp);
mtx_unlock(&lun->lun_lock);
if (ua != CTL_UA_NONE)
io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED;
else
io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE;
return (0);
}
@ -11639,41 +11814,12 @@ ctl_run_task(union ctl_io *io)
{
struct ctl_softc *softc = control_softc;
int retval = 1;
const char *task_desc;
CTL_DEBUG_PRINT(("ctl_run_task\n"));
KASSERT(io->io_hdr.io_type == CTL_IO_TASK,
("ctl_run_task: Unextected io_type %d\n",
io->io_hdr.io_type));
task_desc = ctl_scsi_task_string(&io->taskio);
if (task_desc != NULL) {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
CTL_TASK_REPORT,
csevent_LogType_Trace,
csevent_Severity_Information,
csevent_AlertLevel_Green,
csevent_FRU_Firmware,
csevent_FRU_Unknown,
"CTL: received task: %s",task_desc);
#endif
} else {
#ifdef NEEDTOPORT
csevent_log(CSC_CTL | CSC_SHELF_SW |
CTL_TASK_REPORT,
csevent_LogType_Trace,
csevent_Severity_Information,
csevent_AlertLevel_Green,
csevent_FRU_Firmware,
csevent_FRU_Unknown,
"CTL: received unknown task "
"type: %d (%#x)",
io->taskio.task_action,
io->taskio.task_action);
#endif
}
("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type));
io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED;
bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp));
switch (io->taskio.task_action) {
case CTL_TASK_ABORT_TASK:
retval = ctl_abort_task(io);
@ -11687,36 +11833,9 @@ ctl_run_task(union ctl_io *io)
case CTL_TASK_I_T_NEXUS_RESET:
retval = ctl_i_t_nexus_reset(io);
break;
case CTL_TASK_LUN_RESET: {
struct ctl_lun *lun;
uint32_t targ_lun;
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if ((targ_lun < CTL_MAX_LUNS)
&& (softc->ctl_luns[targ_lun] != NULL))
lun = softc->ctl_luns[targ_lun];
else {
mtx_unlock(&softc->ctl_lock);
retval = 1;
break;
}
retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET);
mtx_unlock(&softc->ctl_lock);
if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) {
union ctl_ha_msg msg_info;
msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS;
msg_info.hdr.nexus = io->io_hdr.nexus;
msg_info.task.task_action = CTL_TASK_LUN_RESET;
msg_info.hdr.original_sc = NULL;
msg_info.hdr.serializing_sc = NULL;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info,
sizeof(msg_info.task), M_WAITOK);
}
case CTL_TASK_LUN_RESET:
retval = ctl_lun_reset(softc, io);
break;
}
case CTL_TASK_TARGET_RESET:
retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET);
break;
@ -11727,9 +11846,18 @@ ctl_run_task(union ctl_io *io)
break;
case CTL_TASK_PORT_LOGOUT:
break;
case CTL_TASK_QUERY_TASK:
retval = ctl_query_task(io, 0);
break;
case CTL_TASK_QUERY_TASK_SET:
retval = ctl_query_task(io, 1);
break;
case CTL_TASK_QUERY_ASYNC_EVENT:
retval = ctl_query_async_event(io);
break;
default:
printf("ctl_run_task: got unknown task management event %d\n",
io->taskio.task_action);
printf("%s: got unknown task management event %d\n",
__func__, io->taskio.task_action);
break;
}
if (retval == 0)
@ -11975,12 +12103,14 @@ ctl_datamove_timer_wakeup(void *arg)
void
ctl_datamove(union ctl_io *io)
{
struct ctl_lun *lun;
void (*fe_datamove)(union ctl_io *io);
mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED);
CTL_DEBUG_PRINT(("ctl_datamove\n"));
lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
#ifdef CTL_TIME_IO
if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) {
char str[256];
@ -12021,9 +12151,6 @@ ctl_datamove(union ctl_io *io)
if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) {
io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE;
} else {
struct ctl_lun *lun;
lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr;
if ((lun != NULL)
&& (lun->delay_info.datamove_delay > 0)) {
@ -12084,6 +12211,7 @@ ctl_datamove(union ctl_io *io)
msg.hdr.original_sc = io->io_hdr.original_sc;
msg.hdr.serializing_sc = io;
msg.hdr.nexus = io->io_hdr.nexus;
msg.hdr.status = io->io_hdr.status;
msg.dt.flags = io->io_hdr.flags;
/*
* We convert everything into a S/G list here. We can't
@ -12198,7 +12326,24 @@ ctl_datamove(union ctl_io *io)
msg.dt.sent_sg_entries = sg_entries_sent;
}
/*
* Officially handover the request from us to peer.
* If failover has just happened, then we must return error.
* If failover happen just after, then it is not our problem.
*/
if (lun)
mtx_lock(&lun->lun_lock);
if (io->io_hdr.flags & CTL_FLAG_FAILOVER) {
if (lun)
mtx_unlock(&lun->lun_lock);
io->io_hdr.port_status = 31342;
io->scsiio.be_move_done(io);
return;
}
io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE;
if (lun)
mtx_unlock(&lun->lun_lock);
} else {
/*
@ -12467,10 +12612,12 @@ ctl_datamove_remote_xfer(union ctl_io *io, unsigned command,
* failure.
*/
if ((rq == NULL)
&& ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE))
&& ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
(io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS))
ctl_set_busy(&io->scsiio);
if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) {
if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
(io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) {
if (rq != NULL)
ctl_dt_req_free(rq);
@ -12851,15 +12998,6 @@ ctl_process_done(union ctl_io *io)
msg.scsi.residual = io->scsiio.residual;
memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data,
io->scsiio.sense_len);
/*
* We copy this whether or not this is an I/O-related
* command. Otherwise, we'd have to go and check to see
* whether it's a read/write command, and it really isn't
* worth it.
*/
memcpy(&msg.scsi.lbalen,
&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
sizeof(msg.scsi.lbalen));
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg,
sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) +

View File

@ -120,6 +120,7 @@ typedef enum {
CTL_UA_LUN_CHANGE = 0x0020,
CTL_UA_MODE_CHANGE = 0x0040,
CTL_UA_LOG_CHANGE = 0x0080,
CTL_UA_INQ_CHANGE = 0x0100,
CTL_UA_RES_PREEMPT = 0x0400,
CTL_UA_RES_RELEASE = 0x0800,
CTL_UA_REG_PREEMPT = 0x1000,
@ -138,6 +139,10 @@ struct ctl_page_index;
SYSCTL_DECL(_kern_cam_ctl);
#endif
struct ctl_lun;
struct ctl_port;
struct ctl_softc;
/*
* Put a string into an sbuf, escaping characters that are illegal or not
* recommended in XML. Note this doesn't escape everything, just > < and &.
@ -174,9 +179,17 @@ void ctl_config_write_done(union ctl_io *io);
void ctl_portDB_changed(int portnum);
int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct thread *td);
struct ctl_lun;
void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except,
ctl_ua_type ua);
void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua);
void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua);
void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx,
ctl_ua_type ua_type);
void ctl_isc_announce_lun(struct ctl_lun *lun);
struct ctl_port;
void ctl_isc_announce_port(struct ctl_port *port);
/*

View File

@ -351,6 +351,48 @@ ctl_complete_beio(struct ctl_be_block_io *beio)
}
}
static size_t
cmp(uint8_t *a, uint8_t *b, size_t size)
{
size_t i;
for (i = 0; i < size; i++) {
if (a[i] != b[i])
break;
}
return (i);
}
static void
ctl_be_block_compare(union ctl_io *io)
{
struct ctl_be_block_io *beio;
uint64_t off, res;
int i;
uint8_t info[8];
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
off = 0;
for (i = 0; i < beio->num_segs; i++) {
res = cmp(beio->sg_segs[i].addr,
beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
beio->sg_segs[i].len);
off += res;
if (res < beio->sg_segs[i].len)
break;
}
if (i < beio->num_segs) {
scsi_u64to8b(off, info);
ctl_set_sense(&io->scsiio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_MISCOMPARE,
/*asc*/ 0x1D, /*ascq*/ 0x00,
/*type*/ SSD_ELEM_INFO,
/*size*/ sizeof(info), /*data*/ &info,
/*type*/ SSD_ELEM_NONE);
} else
ctl_set_success(&io->scsiio);
}
static int
ctl_be_block_move_done(union ctl_io *io)
{
@ -360,7 +402,6 @@ ctl_be_block_move_done(union ctl_io *io)
#ifdef CTL_TIME_IO
struct bintime cur_bt;
#endif
int i;
beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
be_lun = beio->lun;
@ -388,21 +429,7 @@ ctl_be_block_move_done(union ctl_io *io)
ctl_set_success(&io->scsiio);
} else if (lbalen->flags & CTL_LLF_COMPARE) {
/* We have two data blocks ready for comparison. */
for (i = 0; i < beio->num_segs; i++) {
if (memcmp(beio->sg_segs[i].addr,
beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
beio->sg_segs[i].len) != 0)
break;
}
if (i < beio->num_segs)
ctl_set_sense(&io->scsiio,
/*current_error*/ 1,
/*sense_key*/ SSD_KEY_MISCOMPARE,
/*asc*/ 0x1D,
/*ascq*/ 0x00,
SSD_ELEM_NONE);
else
ctl_set_success(&io->scsiio);
ctl_be_block_compare(io);
}
} else if ((io->io_hdr.port_status != 0) &&
((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
@ -508,6 +535,8 @@ ctl_be_block_biodone(struct bio *bio)
ctl_set_invalid_opcode(&io->scsiio);
} else if (error == ENOSPC || error == EDQUOT) {
ctl_set_space_alloc_fail(&io->scsiio);
} else if (error == EROFS || error == EACCES) {
ctl_set_hw_write_protected(&io->scsiio);
} else if (beio->bio_cmd == BIO_FLUSH) {
/* XXX KDM is there is a better error here? */
ctl_set_internal_failure(&io->scsiio,
@ -603,8 +632,8 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
union ctl_io *io;
struct uio xuio;
struct iovec *xiovec;
int flags;
int error, i;
size_t s;
int error, flags, i;
DPRINTF("entered\n");
@ -665,6 +694,22 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
VOP_UNLOCK(be_lun->vn, 0);
SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
if (error == 0 && xuio.uio_resid > 0) {
/*
* If we red less then requested (EOF), then
* we should clean the rest of the buffer.
*/
s = beio->io_len - xuio.uio_resid;
for (i = 0; i < beio->num_segs; i++) {
if (s >= beio->sg_segs[i].len) {
s -= beio->sg_segs[i].len;
continue;
}
bzero((uint8_t *)beio->sg_segs[i].addr + s,
beio->sg_segs[i].len - s);
s = 0;
}
}
} else {
struct mount *mountpoint;
int lock_flags;
@ -720,6 +765,8 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
(beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
if (error == ENOSPC || error == EDQUOT) {
ctl_set_space_alloc_fail(&io->scsiio);
} else if (error == EROFS || error == EACCES) {
ctl_set_hw_write_protected(&io->scsiio);
} else
ctl_set_medium_error(&io->scsiio);
ctl_complete_beio(beio);
@ -885,6 +932,8 @@ ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
if (error != 0) {
if (error == ENOSPC || error == EDQUOT) {
ctl_set_space_alloc_fail(&io->scsiio);
} else if (error == EROFS || error == EACCES) {
ctl_set_hw_write_protected(&io->scsiio);
} else
ctl_set_medium_error(&io->scsiio);
ctl_complete_beio(beio);

View File

@ -486,7 +486,7 @@ const struct ctl_cmd_entry ctl_cmd_table_a3[32] =
CTL_FLAG_DATA_IN |
CTL_CMD_FLAG_ALLOW_ON_PR_RESV,
CTL_LUN_PAT_NONE,
12, {0x0a, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
12, {0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},
/* 0B */
{NULL, CTL_SERIDX_INVLD, CTL_CMD_FLAG_NONE, CTL_LUN_PAT_NONE},
@ -768,7 +768,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
/* 35 SYNCHRONIZE CACHE(10) */
{ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_NONE,
CTL_LUN_PAT_NONE,
CTL_LUN_PAT_WRITE,
10, {0x02, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0x07}},
/* 36 LOCK UNLOCK CACHE(10) */
@ -1117,7 +1117,7 @@ const struct ctl_cmd_entry ctl_cmd_table[256] =
/* 91 SYNCHRONIZE CACHE(16) */
{ctl_sync_cache, CTL_SERIDX_SYNC, CTL_CMD_FLAG_OK_ON_SLUN |
CTL_FLAG_DATA_NONE,
CTL_LUN_PAT_NONE,
CTL_LUN_PAT_WRITE,
16, {0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0x07}},

View File

@ -365,6 +365,132 @@ ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq)
SSD_ELEM_NONE);
}
static void
ctl_ua_to_acsq(ctl_ua_type ua_to_build, int *asc, int *ascq,
ctl_ua_type *ua_to_clear)
{
switch (ua_to_build) {
case CTL_UA_POWERON:
/* 29h/01h POWER ON OCCURRED */
*asc = 0x29;
*ascq = 0x01;
*ua_to_clear = ~0;
break;
case CTL_UA_BUS_RESET:
/* 29h/02h SCSI BUS RESET OCCURRED */
*asc = 0x29;
*ascq = 0x02;
*ua_to_clear = ~0;
break;
case CTL_UA_TARG_RESET:
/* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/
*asc = 0x29;
*ascq = 0x03;
*ua_to_clear = ~0;
break;
case CTL_UA_I_T_NEXUS_LOSS:
/* 29h/07h I_T NEXUS LOSS OCCURRED */
*asc = 0x29;
*ascq = 0x07;
*ua_to_clear = ~0;
break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*
* Since we don't have a specific ASC/ASCQ pair for a LUN
* reset, just return the generic reset code.
*/
*asc = 0x29;
*ascq = 0x00;
break;
case CTL_UA_LUN_CHANGE:
/* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */
*asc = 0x3F;
*ascq = 0x0E;
break;
case CTL_UA_MODE_CHANGE:
/* 2Ah/01h MODE PARAMETERS CHANGED */
*asc = 0x2A;
*ascq = 0x01;
break;
case CTL_UA_LOG_CHANGE:
/* 2Ah/02h LOG PARAMETERS CHANGED */
*asc = 0x2A;
*ascq = 0x02;
break;
case CTL_UA_INQ_CHANGE:
/* 3Fh/03h INQUIRY DATA HAS CHANGED */
*asc = 0x3F;
*ascq = 0x03;
break;
case CTL_UA_RES_PREEMPT:
/* 2Ah/03h RESERVATIONS PREEMPTED */
*asc = 0x2A;
*ascq = 0x03;
break;
case CTL_UA_RES_RELEASE:
/* 2Ah/04h RESERVATIONS RELEASED */
*asc = 0x2A;
*ascq = 0x04;
break;
case CTL_UA_REG_PREEMPT:
/* 2Ah/05h REGISTRATIONS PREEMPTED */
*asc = 0x2A;
*ascq = 0x05;
break;
case CTL_UA_ASYM_ACC_CHANGE:
/* 2Ah/06h ASYMMETRIC ACCESS STATE CHANGED */
*asc = 0x2A;
*ascq = 0x06;
break;
case CTL_UA_CAPACITY_CHANGED:
/* 2Ah/09h CAPACITY DATA HAS CHANGED */
*asc = 0x2A;
*ascq = 0x09;
break;
case CTL_UA_THIN_PROV_THRES:
/* 38h/07h THIN PROVISIONING SOFT THRESHOLD REACHED */
*asc = 0x38;
*ascq = 0x07;
break;
default:
panic("%s: Unknown UA %x", __func__, ua_to_build);
}
}
ctl_ua_type
ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp)
{
ctl_ua_type ua;
ctl_ua_type ua_to_build, ua_to_clear;
int asc, ascq;
uint32_t p, i;
mtx_assert(&lun->lun_lock, MA_OWNED);
p = initidx / CTL_MAX_INIT_PER_PORT;
i = initidx % CTL_MAX_INIT_PER_PORT;
if (lun->pending_ua[p] == NULL)
ua = CTL_UA_POWERON;
else
ua = lun->pending_ua[p][i];
if (ua == CTL_UA_NONE)
return (CTL_UA_NONE);
ua_to_build = (1 << (ffs(ua) - 1));
ua_to_clear = ua_to_build;
ctl_ua_to_acsq(ua_to_build, &asc, &ascq, &ua_to_clear);
resp[0] = SSD_KEY_UNIT_ATTENTION;
if (ua_to_build == ua)
resp[0] |= 0x10;
else
resp[0] |= 0x20;
resp[1] = asc;
resp[2] = ascq;
return (ua);
}
ctl_ua_type
ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
struct scsi_sense_data *sense, scsi_sense_data_type sense_format)
@ -396,89 +522,7 @@ ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
ua_to_build = (1 << (ffs(ua[i]) - 1));
ua_to_clear = ua_to_build;
switch (ua_to_build) {
case CTL_UA_POWERON:
/* 29h/01h POWER ON OCCURRED */
asc = 0x29;
ascq = 0x01;
ua_to_clear = ~0;
break;
case CTL_UA_BUS_RESET:
/* 29h/02h SCSI BUS RESET OCCURRED */
asc = 0x29;
ascq = 0x02;
ua_to_clear = ~0;
break;
case CTL_UA_TARG_RESET:
/* 29h/03h BUS DEVICE RESET FUNCTION OCCURRED*/
asc = 0x29;
ascq = 0x03;
ua_to_clear = ~0;
break;
case CTL_UA_I_T_NEXUS_LOSS:
/* 29h/07h I_T NEXUS LOSS OCCURRED */
asc = 0x29;
ascq = 0x07;
ua_to_clear = ~0;
break;
case CTL_UA_LUN_RESET:
/* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
/*
* Since we don't have a specific ASC/ASCQ pair for a LUN
* reset, just return the generic reset code.
*/
asc = 0x29;
ascq = 0x00;
break;
case CTL_UA_LUN_CHANGE:
/* 3Fh/0Eh REPORTED LUNS DATA HAS CHANGED */
asc = 0x3F;
ascq = 0x0E;
break;
case CTL_UA_MODE_CHANGE:
/* 2Ah/01h MODE PARAMETERS CHANGED */
asc = 0x2A;
ascq = 0x01;
break;
case CTL_UA_LOG_CHANGE:
/* 2Ah/02h LOG PARAMETERS CHANGED */
asc = 0x2A;
ascq = 0x02;
break;
case CTL_UA_RES_PREEMPT:
/* 2Ah/03h RESERVATIONS PREEMPTED */
asc = 0x2A;
ascq = 0x03;
break;
case CTL_UA_RES_RELEASE:
/* 2Ah/04h RESERVATIONS RELEASED */
asc = 0x2A;
ascq = 0x04;
break;
case CTL_UA_REG_PREEMPT:
/* 2Ah/05h REGISTRATIONS PREEMPTED */
asc = 0x2A;
ascq = 0x05;
break;
case CTL_UA_ASYM_ACC_CHANGE:
/* 2Ah/06n ASYMMETRIC ACCESS STATE CHANGED */
asc = 0x2A;
ascq = 0x06;
break;
case CTL_UA_CAPACITY_CHANGED:
/* 2Ah/09n CAPACITY DATA HAS CHANGED */
asc = 0x2A;
ascq = 0x09;
break;
case CTL_UA_THIN_PROV_THRES:
/* 38h/07n THIN PROVISIONING SOFT THRESHOLD REACHED */
asc = 0x38;
ascq = 0x07;
break;
default:
panic("ctl_build_ua: Unknown UA %x", ua_to_build);
}
ctl_ua_to_acsq(ua_to_build, &asc, &ascq, &ua_to_clear);
ctl_set_sense_data(sense,
/*lun*/ NULL,
@ -842,6 +886,18 @@ ctl_set_task_aborted(struct ctl_scsiio *ctsio)
ctsio->io_hdr.status = CTL_CMD_ABORTED;
}
void
ctl_set_hw_write_protected(struct ctl_scsiio *ctsio)
{
/* "Hardware write protected" */
ctl_set_sense(ctsio,
/*current_error*/ 1,
/*sense_key*/ SSD_KEY_DATA_PROTECT,
/*asc*/ 0x27,
/*ascq*/ 0x01,
SSD_ELEM_NONE);
}
void
ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio)
{

View File

@ -57,6 +57,7 @@ void ctl_sense_to_desc(struct scsi_sense_data_fixed *sense_src,
void ctl_sense_to_fixed(struct scsi_sense_data_desc *sense_src,
struct scsi_sense_data_fixed *sense_dest);
void ctl_set_ua(struct ctl_scsiio *ctsio, int asc, int ascq);
ctl_ua_type ctl_build_qae(struct ctl_lun *lun, uint32_t initidx, uint8_t *resp);
ctl_ua_type ctl_build_ua(struct ctl_lun *lun, uint32_t initidx,
struct scsi_sense_data *sense, scsi_sense_data_type sense_format);
void ctl_set_overlapped_cmd(struct ctl_scsiio *ctsio);
@ -85,6 +86,7 @@ void ctl_set_reservation_conflict(struct ctl_scsiio *ctsio);
void ctl_set_queue_full(struct ctl_scsiio *ctsio);
void ctl_set_busy(struct ctl_scsiio *ctsio);
void ctl_set_task_aborted(struct ctl_scsiio *ctsio);
void ctl_set_hw_write_protected(struct ctl_scsiio *ctsio);
void ctl_set_space_alloc_fail(struct ctl_scsiio *ctsio);
void ctl_set_success(struct ctl_scsiio *ctsio);

View File

@ -328,8 +328,16 @@ ctl_port_online(struct ctl_port *port)
}
if (port->port_online != NULL)
port->port_online(port->onoff_arg);
/* XXX KDM need a lock here? */
mtx_lock(&softc->ctl_lock);
port->status |= CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
ctl_isc_announce_port(port);
}
@ -355,8 +363,16 @@ ctl_port_offline(struct ctl_port *port)
port->lun_disable(port->targ_lun_arg, lun->lun);
}
}
/* XXX KDM need a lock here? */
mtx_lock(&softc->ctl_lock);
port->status &= ~CTL_PORT_STATUS_ONLINE;
STAILQ_FOREACH(lun, &softc->lun_list, links) {
if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS)
continue;
mtx_lock(&lun->lun_lock);
ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE);
mtx_unlock(&lun->lun_lock);
}
mtx_unlock(&softc->ctl_lock);
ctl_isc_announce_port(port);
}

View File

@ -435,6 +435,14 @@ cfcs_datamove(union ctl_io *io)
io->scsiio.ext_data_filled += len_copied;
if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) {
io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL;
io->io_hdr.flags |= CTL_FLAG_STATUS_SENT;
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
ccb->ccb_h.status |= CAM_REQ_CMP;
xpt_done(ccb);
}
io->scsiio.be_move_done(io);
}
@ -458,12 +466,13 @@ cfcs_done(union ctl_io *io)
/*
* Translate CTL status to CAM status.
*/
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
switch (io->io_hdr.status & CTL_STATUS_MASK) {
case CTL_SUCCESS:
ccb->ccb_h.status = CAM_REQ_CMP;
ccb->ccb_h.status |= CAM_REQ_CMP;
break;
case CTL_SCSI_ERROR:
ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
ccb->csio.scsi_status = io->scsiio.scsi_status;
bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data,
min(io->scsiio.sense_len, ccb->csio.sense_len));
@ -479,14 +488,18 @@ cfcs_done(union ctl_io *io)
}
break;
case CTL_CMD_ABORTED:
ccb->ccb_h.status = CAM_REQ_ABORTED;
ccb->ccb_h.status |= CAM_REQ_ABORTED;
break;
case CTL_ERROR:
default:
ccb->ccb_h.status = CAM_REQ_CMP_ERR;
ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
break;
}
if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP &&
(ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
xpt_freeze_devq(ccb->ccb_h.path, 1);
ccb->ccb_h.status |= CAM_DEV_QFRZN;
}
xpt_done(ccb);
ctl_free_io(io);
}

View File

@ -157,11 +157,8 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
M_WAITOK);
ext_sglist_malloced = 1;
if (copyin(ctsio->ext_data_ptr, ext_sglist,
ext_sglen) != 0) {
ctl_set_internal_failure(ctsio,
/*sks_valid*/ 0,
/*retry_count*/ 0);
if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
ctsio->io_hdr.port_status = 31343;
goto bailout;
}
ext_sg_entries = ctsio->ext_sg_entries;
@ -229,9 +226,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
"to %p\n", kern_ptr, ext_ptr));
if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
ctl_set_internal_failure(ctsio,
/*sks_valid*/ 0,
/*retry_count*/ 0);
ctsio->io_hdr.port_status = 31344;
goto bailout;
}
} else {
@ -240,9 +235,7 @@ ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
"to %p\n", ext_ptr, kern_ptr));
if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
ctl_set_internal_failure(ctsio,
/*sks_valid*/ 0,
/*retry_count*/0);
ctsio->io_hdr.port_status = 31345;
goto bailout;
}
}

View File

@ -639,6 +639,12 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
#endif
io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
break;
case BHSTMR_FUNCTION_CLEAR_TASK_SET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET");
#endif
io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
break;
case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET");
@ -651,6 +657,37 @@ cfiscsi_pdu_handle_task_request(struct icl_pdu *request)
#endif
io->taskio.task_action = CTL_TASK_TARGET_RESET;
break;
case BHSTMR_FUNCTION_TARGET_COLD_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET");
#endif
io->taskio.task_action = CTL_TASK_TARGET_RESET;
break;
case BHSTMR_FUNCTION_QUERY_TASK:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK");
#endif
io->taskio.task_action = CTL_TASK_QUERY_TASK;
io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag;
break;
case BHSTMR_FUNCTION_QUERY_TASK_SET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET");
#endif
io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
break;
case BHSTMR_FUNCTION_I_T_NEXUS_RESET:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET");
#endif
io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
break;
case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT:
#if 0
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT");
#endif
io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT;
break;
default:
CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x",
bhstmr->bhstmr_function & ~0x80);
@ -2842,7 +2879,9 @@ cfiscsi_task_management_done(union ctl_io *io)
struct iscsi_bhs_task_management_request *bhstmr;
struct iscsi_bhs_task_management_response *bhstmr2;
struct cfiscsi_data_wait *cdw, *tmpcdw;
struct cfiscsi_session *cs;
struct cfiscsi_session *cs, *tcs;
struct cfiscsi_softc *softc;
int cold_reset = 0;
request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
cs = PDU_SESSION(request);
@ -2880,29 +2919,48 @@ cfiscsi_task_management_done(union ctl_io *io)
}
CFISCSI_SESSION_UNLOCK(cs);
}
if ((bhstmr->bhstmr_function & ~0x80) ==
BHSTMR_FUNCTION_TARGET_COLD_RESET &&
io->io_hdr.status == CTL_SUCCESS)
cold_reset = 1;
response = cfiscsi_pdu_new_response(request, M_WAITOK);
bhstmr2 = (struct iscsi_bhs_task_management_response *)
response->ip_bhs;
bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE;
bhstmr2->bhstmr_flags = 0x80;
if (io->io_hdr.status == CTL_SUCCESS) {
switch (io->taskio.task_status) {
case CTL_TASK_FUNCTION_COMPLETE:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE;
} else {
/*
* XXX: How to figure out what exactly went wrong? iSCSI spec
* expects us to provide detailed error, e.g. "Task does
* not exist" or "LUN does not exist".
*/
CFISCSI_SESSION_DEBUG(cs, "BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED");
bhstmr2->bhstmr_response =
BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
break;
case CTL_TASK_FUNCTION_SUCCEEDED:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED;
break;
case CTL_TASK_LUN_DOES_NOT_EXIST:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST;
break;
case CTL_TASK_FUNCTION_NOT_SUPPORTED:
default:
bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED;
break;
}
memcpy(bhstmr2->bhstmr_additional_reponse_information,
io->taskio.task_resp, sizeof(io->taskio.task_resp));
bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag;
ctl_free_io(io);
icl_pdu_free(request);
cfiscsi_pdu_queue(response);
if (cold_reset) {
softc = cs->cs_target->ct_softc;
mtx_lock(&softc->lock);
TAILQ_FOREACH(tcs, &softc->sessions, cs_next) {
if (tcs->cs_target == cs->cs_target)
cfiscsi_session_terminate(tcs);
}
mtx_unlock(&softc->lock);
}
}
static void

View File

@ -622,28 +622,33 @@ ctl_ha_peer_sysctl(SYSCTL_HANDLER_ARGS)
struct ha_softc *softc = (struct ha_softc *)arg1;
struct sockaddr_in *sa;
int error, b1, b2, b3, b4, p, num;
char buf[128];
error = sysctl_handle_string(oidp, softc->ha_peer,
sizeof(softc->ha_peer), req);
if ((error != 0) || (req->newptr == NULL))
strlcpy(buf, softc->ha_peer, sizeof(buf));
error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if ((error != 0) || (req->newptr == NULL) ||
strncmp(buf, softc->ha_peer, sizeof(buf)) == 0)
return (error);
sa = &softc->ha_peer_in;
mtx_lock(&softc->ha_lock);
if ((num = sscanf(softc->ha_peer, "connect %d.%d.%d.%d:%d",
if ((num = sscanf(buf, "connect %d.%d.%d.%d:%d",
&b1, &b2, &b3, &b4, &p)) >= 4) {
softc->ha_connect = 1;
softc->ha_listen = 0;
} else if ((num = sscanf(softc->ha_peer, "listen %d.%d.%d.%d:%d",
} else if ((num = sscanf(buf, "listen %d.%d.%d.%d:%d",
&b1, &b2, &b3, &b4, &p)) >= 4) {
softc->ha_connect = 0;
softc->ha_listen = 1;
} else {
softc->ha_connect = 0;
softc->ha_listen = 0;
if (softc->ha_peer[0] != 0)
if (buf[0] != 0) {
buf[0] = 0;
error = EINVAL;
}
}
strlcpy(softc->ha_peer, buf, sizeof(softc->ha_peer));
if (softc->ha_connect || softc->ha_listen) {
memset(sa, 0, sizeof(*sa));
sa->sin_len = sizeof(struct sockaddr_in);

View File

@ -328,9 +328,20 @@ typedef enum {
CTL_TASK_TARGET_RESET,
CTL_TASK_BUS_RESET,
CTL_TASK_PORT_LOGIN,
CTL_TASK_PORT_LOGOUT
CTL_TASK_PORT_LOGOUT,
CTL_TASK_QUERY_TASK,
CTL_TASK_QUERY_TASK_SET,
CTL_TASK_QUERY_ASYNC_EVENT
} ctl_task_type;
typedef enum {
CTL_TASK_FUNCTION_COMPLETE,
CTL_TASK_FUNCTION_SUCCEEDED,
CTL_TASK_FUNCTION_REJECTED,
CTL_TASK_LUN_DOES_NOT_EXIST,
CTL_TASK_FUNCTION_NOT_SUPPORTED
} ctl_task_status;
/*
* Task management I/O structure. Aborts, bus resets, etc., are sent using
* this structure.
@ -343,6 +354,8 @@ struct ctl_taskio {
ctl_task_type task_action; /* Target Reset, Abort, etc. */
uint32_t tag_num; /* tag number */
ctl_tag_type tag_type; /* simple, ordered, etc. */
uint8_t task_status; /* Complete, Succeeded, etc. */
uint8_t task_resp[3];/* Response information */
};
typedef enum {
@ -439,7 +452,6 @@ struct ctl_ha_msg_scsi {
uint32_t residual; /* data residual length */
uint32_t fetd_status; /* trans status, set by FETD,
0 = good*/
struct ctl_lba_len lbalen; /* used for stats */
struct scsi_sense_data sense_data; /* sense data */
};

View File

@ -394,8 +394,7 @@ ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len)
scsi_ulto2b(0, rtfb_ptr->optimal_length_granularity);
scsi_u64to8b(0, rtfb_ptr->maximum_bytes);
scsi_u64to8b(0, rtfb_ptr->optimal_bytes);
scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
rtfb_ptr->optimal_bytes_to_token_per_segment);
scsi_u64to8b(UINT64_MAX, rtfb_ptr->optimal_bytes_to_token_per_segment);
scsi_u64to8b(TPC_MAX_IOCHUNK_SIZE,
rtfb_ptr->optimal_bytes_from_token_per_segment);
@ -1590,6 +1589,10 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
if (len == 0) {
ctl_set_success(ctsio);
goto done;
}
if (len < sizeof(struct scsi_extended_copy_lid1_data) ||
len > sizeof(struct scsi_extended_copy_lid1_data) +
TPC_MAX_LIST + TPC_MAX_INLINE) {
@ -1620,20 +1623,22 @@ ctl_extended_copy_lid1(struct ctl_scsiio *ctsio)
lencscd = scsi_2btoul(data->cscd_list_length);
lenseg = scsi_4btoul(data->segment_list_length);
leninl = scsi_4btoul(data->inline_data_length);
if (len < sizeof(struct scsi_extended_copy_lid1_data) +
lencscd + lenseg + leninl ||
leninl > TPC_MAX_INLINE) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
/*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
goto done;
}
if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
ctl_set_sense(ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
/*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
goto done;
}
if (lencscd + lenseg > TPC_MAX_LIST) {
if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
ctl_set_sense(ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
/*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
goto done;
}
if (lencscd + lenseg > TPC_MAX_LIST ||
leninl > TPC_MAX_INLINE ||
len < sizeof(struct scsi_extended_copy_lid1_data) +
lencscd + lenseg + leninl) {
ctl_set_param_len_error(ctsio);
goto done;
}
@ -1717,6 +1722,10 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
cdb = (struct scsi_extended_copy *)ctsio->cdb;
len = scsi_4btoul(cdb->length);
if (len == 0) {
ctl_set_success(ctsio);
goto done;
}
if (len < sizeof(struct scsi_extended_copy_lid4_data) ||
len > sizeof(struct scsi_extended_copy_lid4_data) +
TPC_MAX_LIST + TPC_MAX_INLINE) {
@ -1747,20 +1756,22 @@ ctl_extended_copy_lid4(struct ctl_scsiio *ctsio)
lencscd = scsi_2btoul(data->cscd_list_length);
lenseg = scsi_2btoul(data->segment_list_length);
leninl = scsi_2btoul(data->inline_data_length);
if (len < sizeof(struct scsi_extended_copy_lid4_data) +
lencscd + lenseg + leninl ||
leninl > TPC_MAX_INLINE) {
ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0,
/*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0);
goto done;
}
if (lencscd > TPC_MAX_CSCDS * sizeof(struct scsi_ec_cscd)) {
ctl_set_sense(ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
/*asc*/ 0x26, /*ascq*/ 0x06, SSD_ELEM_NONE);
goto done;
}
if (lencscd + lenseg > TPC_MAX_LIST) {
if (lenseg > TPC_MAX_SEGS * sizeof(struct scsi_ec_segment)) {
ctl_set_sense(ctsio, /*current_error*/ 1,
/*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
/*asc*/ 0x26, /*ascq*/ 0x08, SSD_ELEM_NONE);
goto done;
}
if (lencscd + lenseg > TPC_MAX_LIST ||
leninl > TPC_MAX_INLINE ||
len < sizeof(struct scsi_extended_copy_lid1_data) +
lencscd + lenseg + leninl) {
ctl_set_param_len_error(ctsio);
goto done;
}

View File

@ -281,7 +281,9 @@ tpcl_resolve(struct ctl_softc *softc, int init_port,
struct ctl_lun *lun;
uint64_t lunid = UINT64_MAX;
if (cscd->type_code != EC_CSCD_ID)
if (cscd->type_code != EC_CSCD_ID ||
(cscd->luidt_pdt & EC_LUIDT_MASK) != EC_LUIDT_LUN ||
(cscd->luidt_pdt & EC_NUL) != 0)
return (lunid);
cscdid = (struct scsi_ec_cscd_id *)cscd;

View File

@ -89,7 +89,10 @@ static struct ctl_task_desc ctl_task_table[] = {
{CTL_TASK_TARGET_RESET, "Target Reset"},
{CTL_TASK_BUS_RESET, "Bus Reset"},
{CTL_TASK_PORT_LOGIN, "Port Login"},
{CTL_TASK_PORT_LOGOUT, "Port Logout"}
{CTL_TASK_PORT_LOGOUT, "Port Logout"},
{CTL_TASK_QUERY_TASK, "Query Task"},
{CTL_TASK_QUERY_TASK_SET, "Query Task Set"},
{CTL_TASK_QUERY_ASYNC_EVENT, "Query Async Event"}
};
void

View File

@ -509,7 +509,8 @@ static struct op_table_entry scsi_op_codes[] = {
/* 99 */
/* 9A */
/* 9B */
/* 9C */
/* 9C O WRITE ATOMIC(16) */
{ 0x9C, D, "WRITE ATOMIC(16)" },
/* 9D */
/* XXX KDM ALL for this? op-num.txt defines it for none.. */
/* 9E SERVICE ACTION IN(16) */
@ -1079,7 +1080,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x04, 0x00, SS_RDEF,
"Logical unit not ready, cause not reportable") },
/* DTLPWROMAEBKVF */
{ SST(0x04, 0x01, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | EBUSY,
{ SST(0x04, 0x01, SS_WAIT | EBUSY,
"Logical unit is in process of becoming ready") },
/* DTLPWROMAEBKVF */
{ SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO,
@ -1106,7 +1107,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x04, 0x09, SS_RDEF, /* XXX TBD */
"Logical unit not ready, self-test in progress") },
/* DTLPWROMAEBKVF */
{ SST(0x04, 0x0A, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | ENXIO,
{ SST(0x04, 0x0A, SS_WAIT | ENXIO,
"Logical unit not accessible, asymmetric access state transition")},
/* DTLPWROMAEBKVF */
{ SST(0x04, 0x0B, SS_FATAL | ENXIO,
@ -1121,7 +1122,7 @@ static struct asc_table_entry asc_table[] = {
{ SST(0x04, 0x10, SS_RDEF, /* XXX TBD */
"Logical unit not ready, auxiliary memory not accessible") },
/* DT WRO AEB VF */
{ SST(0x04, 0x11, SS_TUR | SSQ_MANY | SSQ_DECREMENT_COUNT | EBUSY,
{ SST(0x04, 0x11, SS_WAIT | EBUSY,
"Logical unit not ready, notify (enable spinup) required") },
/* M V */
{ SST(0x04, 0x12, SS_RDEF, /* XXX TBD */
@ -3803,8 +3804,6 @@ scsi_set_sense_data_va(struct scsi_sense_data *sense_data,
*/
sense->extra_len = 10;
sense_len = (int)va_arg(ap, int);
len_to_copy = MIN(sense_len, SSD_EXTRA_MAX -
sense->extra_len);
data = (uint8_t *)va_arg(ap, uint8_t *);
switch (elem_type) {
@ -3822,10 +3821,14 @@ scsi_set_sense_data_va(struct scsi_sense_data *sense_data,
uint8_t *data_dest;
int i;
if (elem_type == SSD_ELEM_COMMAND)
if (elem_type == SSD_ELEM_COMMAND) {
data_dest = &sense->cmd_spec_info[0];
else {
len_to_copy = MIN(sense_len,
sizeof(sense->cmd_spec_info));
} else {
data_dest = &sense->info[0];
len_to_copy = MIN(sense_len,
sizeof(sense->info));
/*
* We're setting the info field, so
* set the valid bit.

View File

@ -103,6 +103,9 @@ typedef enum {
/* The retyable, error action, with table specified error code */
#define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
/* Wait for transient error status to change */
#define SS_WAIT SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE
/* Fatal error action, with table specified error code */
#define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE
@ -1666,6 +1669,7 @@ struct scsi_ec_cscd
uint8_t type_code;
#define EC_CSCD_EXT 0xff
uint8_t luidt_pdt;
#define EC_NUL 0x20
#define EC_LUIDT_MASK 0xc0
#define EC_LUIDT_LUN 0x00
#define EC_LUIDT_PROXY_TOKEN 0x40

View File

@ -655,11 +655,13 @@ chdone(struct cam_periph *periph, union ccb *done_ccb)
*/
return;
} else if (error != 0) {
int retry_scheduled;
struct scsi_mode_sense_6 *sms;
int frozen, retry_scheduled;
sms = (struct scsi_mode_sense_6 *)
done_ccb->csio.cdb_io.cdb_bytes;
frozen = (done_ccb->ccb_h.status &
CAM_DEV_QFRZN) != 0;
/*
* Check to see if block descriptors were
@ -670,7 +672,8 @@ chdone(struct cam_periph *periph, union ccb *done_ccb)
* block descriptors were disabled, enable
* them and re-send the command.
*/
if (sms->byte2 & SMS_DBD) {
if ((sms->byte2 & SMS_DBD) != 0 &&
(periph->flags & CAM_PERIPH_INVALID) == 0) {
sms->byte2 &= ~SMS_DBD;
xpt_action(done_ccb);
softc->quirks |= CH_Q_NO_DBD;
@ -679,7 +682,7 @@ chdone(struct cam_periph *periph, union ccb *done_ccb)
retry_scheduled = 0;
/* Don't wedge this device's queue */
if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
if (frozen)
cam_release_devq(done_ccb->ccb_h.path,
/*relsim_flags*/0,
/*reduction*/0,

View File

@ -25,6 +25,7 @@
/*
* Copyright (c) 2014 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
*/
/*
@ -635,14 +636,17 @@ avl_add(avl_tree_t *tree, void *new_node)
/*
* This is unfortunate. We want to call panic() here, even for
* non-DEBUG kernels. In userland, however, we can't depend on anything
* in libc or else the rtld build process gets confused. So, all we can
* do in userland is resort to a normal ASSERT().
* in libc or else the rtld build process gets confused.
* Thankfully, rtld provides us with its own assfail() so we can use
* that here. We use assfail() directly to get a nice error message
* in the core - much like what panic() does for crashdumps.
*/
if (avl_find(tree, new_node, &where) != NULL)
#ifdef _KERNEL
panic("avl_find() succeeded inside avl_add()");
#else
ASSERT(0);
(void) assfail("avl_find() succeeded inside avl_add()",
__FILE__, __LINE__);
#endif
avl_insert(tree, new_node, where);
}

View File

@ -213,7 +213,7 @@ static int arc_min_prefetch_lifespan;
int arc_lotsfree_percent = 10;
static int arc_dead;
extern int zfs_prefetch_disable;
extern boolean_t zfs_prefetch_disable;
/*
* The arc has filled available memory and has now warmed up.
@ -582,6 +582,8 @@ typedef struct arc_stats {
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_meta_max;
kstat_named_t arcstat_meta_min;
kstat_named_t arcstat_sync_wait_for_async;
kstat_named_t arcstat_demand_hit_predictive_prefetch;
} arc_stats_t;
static arc_stats_t arc_stats = {
@ -680,7 +682,9 @@ static arc_stats_t arc_stats = {
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 }
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "sync_wait_for_async", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
@ -844,6 +848,7 @@ typedef struct l2arc_buf_hdr {
uint64_t b_daddr; /* disk address, offset byte */
/* real alloc'd buffer size depending on b_compress applied */
int32_t b_asize;
uint8_t b_compress;
list_node_t b_l2node;
} l2arc_buf_hdr_t;
@ -923,15 +928,6 @@ static arc_buf_hdr_t arc_eviction_hdr;
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET 24
#define HDR_COMPRESS_NBITS 7
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET(hdr->b_flags, \
HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET(hdr->b_flags, \
HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS, (cmp))
/*
* Other sizes
*/
@ -2222,7 +2218,7 @@ arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
* separately compressed buffer, so there's nothing to free (it
* points to the same buffer as the arc_buf_t's b_data field).
*/
if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) {
if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_OFF) {
hdr->b_l1hdr.b_tmp_cdata = NULL;
return;
}
@ -2231,12 +2227,12 @@ arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
* There's nothing to free since the buffer was all zero's and
* compressed to a zero length buffer.
*/
if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_EMPTY) {
if (hdr->b_l2hdr.b_compress == ZIO_COMPRESS_EMPTY) {
ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
return;
}
ASSERT(L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr)));
ASSERT(L2ARC_IS_VALID_COMPRESS(hdr->b_l2hdr.b_compress));
arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata,
hdr->b_size, zio_data_buf_free);
@ -4250,6 +4246,36 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
if (HDR_IO_IN_PROGRESS(hdr)) {
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This sync read must wait for an
* in-progress async read (e.g. a predictive
* prefetch). Async reads are queued
* separately at the vdev_queue layer, so
* this is a form of priority inversion.
* Ideally, we would "inherit" the demand
* i/o's priority by moving the i/o from
* the async queue to the synchronous queue,
* but there is currently no mechanism to do
* so. Track this so that we can evaluate
* the magnitude of this potential performance
* problem.
*
* Note that if the prefetch i/o is already
* active (has been issued to the device),
* the prefetch improved performance, because
* we issued it sooner than we would have
* without the prefetch.
*/
DTRACE_PROBE1(arc__sync__wait__for__async,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_sync_wait_for_async);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
hdr->b_flags &= ~ARC_FLAG_PREDICTIVE_PREFETCH;
}
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
@ -4258,7 +4284,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
@ -4283,6 +4309,19 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
hdr->b_l1hdr.b_state == arc_mfu);
if (done) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
hdr->b_flags &= ~ARC_FLAG_PREDICTIVE_PREFETCH;
}
add_reference(hdr, hash_lock, private);
/*
* If this block is already in use, create a new
@ -4345,12 +4384,16 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
goto top; /* restart the IO request */
}
/* if this is a prefetch, we don't have a reference */
if (*arc_flags & ARC_FLAG_PREFETCH) {
/*
* If there is a callback, we pass our reference to
* it; otherwise we remove our reference.
*/
if (done == NULL) {
(void) remove_reference(hdr, hash_lock,
private);
hdr->b_flags |= ARC_FLAG_PREFETCH;
}
if (*arc_flags & ARC_FLAG_PREFETCH)
hdr->b_flags |= ARC_FLAG_PREFETCH;
if (*arc_flags & ARC_FLAG_L2CACHE)
hdr->b_flags |= ARC_FLAG_L2CACHE;
if (*arc_flags & ARC_FLAG_L2COMPRESS)
@ -4373,11 +4416,13 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/* if this is a prefetch, we don't have a reference */
/*
* If there is a callback, we pass a reference to it.
*/
if (done != NULL)
add_reference(hdr, hash_lock, private);
if (*arc_flags & ARC_FLAG_PREFETCH)
hdr->b_flags |= ARC_FLAG_PREFETCH;
else
add_reference(hdr, hash_lock, private);
if (*arc_flags & ARC_FLAG_L2CACHE)
hdr->b_flags |= ARC_FLAG_L2CACHE;
if (*arc_flags & ARC_FLAG_L2COMPRESS)
@ -4395,6 +4440,8 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
arc_access(hdr, hash_lock);
}
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
hdr->b_flags |= ARC_FLAG_PREDICTIVE_PREFETCH;
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
@ -4409,7 +4456,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
b_compress = HDR_GET_COMPRESS(hdr);
b_compress = hdr->b_l2hdr.b_compress;
b_asize = hdr->b_l2hdr.b_asize;
/*
* Lock out device removal.
@ -4437,6 +4484,11 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
curthread->td_ru.ru_inblock++;
#endif
if (priority == ZIO_PRIORITY_ASYNC_READ)
hdr->b_flags |= ARC_FLAG_PRIO_ASYNC_READ;
else
hdr->b_flags &= ~ARC_FLAG_PRIO_ASYNC_READ;
if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
@ -5965,6 +6017,8 @@ l2arc_read_done(zio_t *zio)
if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
ASSERT(zio->io_data != NULL);
ASSERT3U(zio->io_size, ==, hdr->b_size);
ASSERT3U(BP_GET_LSIZE(&cb->l2rcb_bp), ==, hdr->b_size);
/*
* Check this survived the L2ARC journey.
@ -6001,7 +6055,7 @@ l2arc_read_done(zio_t *zio)
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
buf->b_data, zio->io_size, arc_read_done, buf,
buf->b_data, hdr->b_size, arc_read_done, buf,
zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
}
}
@ -6318,7 +6372,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
* can't access without holding the ARC list locks
* (which we want to avoid during compression/writing).
*/
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
hdr->b_l2hdr.b_compress = ZIO_COMPRESS_OFF;
hdr->b_l2hdr.b_asize = hdr->b_size;
hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data;
@ -6520,7 +6574,7 @@ l2arc_compress_buf(arc_buf_hdr_t *hdr)
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF);
ASSERT3S(l2hdr->b_compress, ==, ZIO_COMPRESS_OFF);
ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
len = l2hdr->b_asize;
@ -6532,7 +6586,7 @@ l2arc_compress_buf(arc_buf_hdr_t *hdr)
if (csize == 0) {
/* zero block, indicate that there's nothing to write */
zio_data_buf_free(cdata, len);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_EMPTY);
l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
l2hdr->b_asize = 0;
hdr->b_l1hdr.b_tmp_cdata = NULL;
ARCSTAT_BUMP(arcstat_l2_compress_zeros);
@ -6550,7 +6604,7 @@ l2arc_compress_buf(arc_buf_hdr_t *hdr)
bzero((char *)cdata + csize, rounded - csize);
csize = rounded;
}
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_LZ4);
l2hdr->b_compress = ZIO_COMPRESS_LZ4;
l2hdr->b_asize = csize;
hdr->b_l1hdr.b_tmp_cdata = cdata;
ARCSTAT_BUMP(arcstat_l2_compress_successes);
@ -6637,7 +6691,8 @@ l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
static void
l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
{
enum zio_compress comp = HDR_GET_COMPRESS(hdr);
ASSERT(HDR_HAS_L2HDR(hdr));
enum zio_compress comp = hdr->b_l2hdr.b_compress;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp));

View File

@ -618,7 +618,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
}
static void
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
dnode_t *dn;
zbookmark_phys_t zb;
@ -664,7 +664,6 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
db->db.db_size, db, type));
bzero(db->db.db_data, db->db.db_size);
db->db_state = DB_CACHED;
*flags |= DB_RF_CACHED;
mutex_exit(&db->db_mtx);
return;
}
@ -687,10 +686,8 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
(*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
(flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
&aflags, &zb);
if (aflags & ARC_FLAG_CACHED)
*flags |= DB_RF_CACHED;
}
int
@ -723,8 +720,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
if (db->db_state == DB_CACHED) {
mutex_exit(&db->db_mtx);
if (prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
db->db.db_size, TRUE);
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
@ -733,13 +729,12 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
if (zio == NULL)
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
dbuf_read_impl(db, zio, &flags);
dbuf_read_impl(db, zio, flags);
/* dbuf_read_impl has dropped db_mtx for us */
if (prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
db->db.db_size, flags & DB_RF_CACHED);
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
@ -758,8 +753,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
*/
mutex_exit(&db->db_mtx);
if (prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
db->db.db_size, TRUE);
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
@ -2059,6 +2053,9 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
return;
if (dnode_block_freed(dn, blkid))
return;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
*/
/* Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */
/* Copyright (c) 2013, Joyent, Inc. All rights reserved. */
@ -389,7 +389,7 @@ dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
*/
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
dmu_buf_t **dbp;
uint64_t blkid, nblks, i;
@ -399,15 +399,19 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
ASSERT(length <= DMU_MAX_ACCESS);
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
dbuf_flags |= DB_RF_NOPREFETCH;
/*
* Note: We directly notify the prefetch code of this read, so that
* we can tell it about the multi-block read. dbuf_read() only knows
* about the one block it is accessing.
*/
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
DB_RF_NOPREFETCH;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
} else {
if (offset + length > dn->dn_datablksz) {
zfs_panic_recover("zfs: accessing past end of object "
@ -426,13 +430,14 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
blkid = dbuf_whichblock(dn, 0, offset);
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = dbuf_hold(dn, blkid+i, tag);
dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
if (db == NULL) {
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
zio_nowait(zio);
return (SET_ERROR(EIO));
}
/* initiate async i/o */
if (read)
(void) dbuf_read(db, zio, dbuf_flags);
@ -442,6 +447,11 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
#endif
dbp[i] = &db->db;
}
if ((flags & DMU_READ_NO_PREFETCH) == 0 && read &&
length < zfetch_array_rd_sz) {
dmu_zfetch(&dn->dn_zfetch, blkid, nblks);
}
rw_exit(&dn->dn_struct_rwlock);
/* wait for async i/o */
@ -495,7 +505,8 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
uint64_t length, boolean_t read, void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
@ -543,9 +554,6 @@ dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t blkid;
int nblks, err;
if (zfs_prefetch_disable)
return;
if (len == 0) { /* they're interested in the bonus buffer */
dn = DMU_META_DNODE(os);

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -36,19 +36,20 @@
#include <sys/kstat.h>
/*
* I'm against tune-ables, but these should probably exist as tweakable globals
* until we can get this working the way we want it to.
* This tunable disables predictive prefetch. Note that it leaves "prescient"
* prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
* prescient prefetch never issues i/os that end up not being needed,
* so it can't hurt performance.
*/
int zfs_prefetch_disable = 0;
boolean_t zfs_prefetch_disable = B_FALSE;
/* max # of streams per zfetch */
uint32_t zfetch_max_streams = 8;
/* min time before stream reclaim */
uint32_t zfetch_min_sec_reap = 2;
/* max number of blocks to fetch at a time */
uint32_t zfetch_block_cap = 256;
/* number of bytes in a array_read at which we stop prefetching (1Mb) */
/* max bytes to prefetch per stream (default 8MB) */
uint32_t zfetch_max_distance = 8 * 1024 * 1024;
/* number of bytes in a array_read at which we stop prefetching (1MB) */
uint64_t zfetch_array_rd_sz = 1024 * 1024;
SYSCTL_DECL(_vfs_zfs);
@ -59,198 +60,32 @@ SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_streams, CTLFLAG_RWTUN,
&zfetch_max_streams, 0, "Max # of streams per zfetch");
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, min_sec_reap, CTLFLAG_RWTUN,
&zfetch_min_sec_reap, 0, "Min time before stream reclaim");
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, block_cap, CTLFLAG_RWTUN,
&zfetch_block_cap, 0, "Max number of blocks to fetch at a time");
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, CTLFLAG_RWTUN,
&zfetch_max_distance, 0, "Max bytes to prefetch per stream");
SYSCTL_UQUAD(_vfs_zfs_zfetch, OID_AUTO, array_rd_sz, CTLFLAG_RWTUN,
&zfetch_array_rd_sz, 0,
"Number of bytes in a array_read at which we stop prefetching");
/* forward decls for static routines */
static boolean_t dmu_zfetch_colinear(zfetch_t *, zstream_t *);
static void dmu_zfetch_dofetch(zfetch_t *, zstream_t *);
static uint64_t dmu_zfetch_fetch(dnode_t *, uint64_t, uint64_t);
static uint64_t dmu_zfetch_fetchsz(dnode_t *, uint64_t, uint64_t);
static boolean_t dmu_zfetch_find(zfetch_t *, zstream_t *, int);
static int dmu_zfetch_stream_insert(zfetch_t *, zstream_t *);
static zstream_t *dmu_zfetch_stream_reclaim(zfetch_t *);
static void dmu_zfetch_stream_remove(zfetch_t *, zstream_t *);
static int dmu_zfetch_streams_equal(zstream_t *, zstream_t *);
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
kstat_named_t zfetchstat_misses;
kstat_named_t zfetchstat_colinear_hits;
kstat_named_t zfetchstat_colinear_misses;
kstat_named_t zfetchstat_stride_hits;
kstat_named_t zfetchstat_stride_misses;
kstat_named_t zfetchstat_reclaim_successes;
kstat_named_t zfetchstat_reclaim_failures;
kstat_named_t zfetchstat_stream_resets;
kstat_named_t zfetchstat_stream_noresets;
kstat_named_t zfetchstat_bogus_streams;
kstat_named_t zfetchstat_max_streams;
} zfetch_stats_t;
static zfetch_stats_t zfetch_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "colinear_hits", KSTAT_DATA_UINT64 },
{ "colinear_misses", KSTAT_DATA_UINT64 },
{ "stride_hits", KSTAT_DATA_UINT64 },
{ "stride_misses", KSTAT_DATA_UINT64 },
{ "reclaim_successes", KSTAT_DATA_UINT64 },
{ "reclaim_failures", KSTAT_DATA_UINT64 },
{ "streams_resets", KSTAT_DATA_UINT64 },
{ "streams_noresets", KSTAT_DATA_UINT64 },
{ "bogus_streams", KSTAT_DATA_UINT64 },
{ "max_streams", KSTAT_DATA_UINT64 },
};
#define ZFETCHSTAT_INCR(stat, val) \
atomic_add_64(&zfetch_stats.stat.value.ui64, (val));
#define ZFETCHSTAT_BUMP(stat) ZFETCHSTAT_INCR(stat, 1);
#define ZFETCHSTAT_BUMP(stat) \
atomic_inc_64(&zfetch_stats.stat.value.ui64);
kstat_t *zfetch_ksp;
/*
* Given a zfetch structure and a zstream structure, determine whether the
* blocks to be read are part of a co-linear pair of existing prefetch
* streams. If a set is found, coalesce the streams, removing one, and
* configure the prefetch so it looks for a strided access pattern.
*
* In other words: if we find two sequential access streams that are
* the same length and distance N appart, and this read is N from the
* last stream, then we are probably in a strided access pattern. So
* combine the two sequential streams into a single strided stream.
*
* Returns whether co-linear streams were found.
*/
static boolean_t
dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh)
{
zstream_t *z_walk;
zstream_t *z_comp;
if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
return (0);
if (zh == NULL) {
rw_exit(&zf->zf_rwlock);
return (0);
}
for (z_walk = list_head(&zf->zf_stream); z_walk;
z_walk = list_next(&zf->zf_stream, z_walk)) {
for (z_comp = list_next(&zf->zf_stream, z_walk); z_comp;
z_comp = list_next(&zf->zf_stream, z_comp)) {
int64_t diff;
if (z_walk->zst_len != z_walk->zst_stride ||
z_comp->zst_len != z_comp->zst_stride) {
continue;
}
diff = z_comp->zst_offset - z_walk->zst_offset;
if (z_comp->zst_offset + diff == zh->zst_offset) {
z_walk->zst_offset = zh->zst_offset;
z_walk->zst_direction = diff < 0 ? -1 : 1;
z_walk->zst_stride =
diff * z_walk->zst_direction;
z_walk->zst_ph_offset =
zh->zst_offset + z_walk->zst_stride;
dmu_zfetch_stream_remove(zf, z_comp);
mutex_destroy(&z_comp->zst_lock);
kmem_free(z_comp, sizeof (zstream_t));
dmu_zfetch_dofetch(zf, z_walk);
rw_exit(&zf->zf_rwlock);
return (1);
}
diff = z_walk->zst_offset - z_comp->zst_offset;
if (z_walk->zst_offset + diff == zh->zst_offset) {
z_walk->zst_offset = zh->zst_offset;
z_walk->zst_direction = diff < 0 ? -1 : 1;
z_walk->zst_stride =
diff * z_walk->zst_direction;
z_walk->zst_ph_offset =
zh->zst_offset + z_walk->zst_stride;
dmu_zfetch_stream_remove(zf, z_comp);
mutex_destroy(&z_comp->zst_lock);
kmem_free(z_comp, sizeof (zstream_t));
dmu_zfetch_dofetch(zf, z_walk);
rw_exit(&zf->zf_rwlock);
return (1);
}
}
}
rw_exit(&zf->zf_rwlock);
return (0);
}
/*
* Given a zstream_t, determine the bounds of the prefetch. Then call the
* routine that actually prefetches the individual blocks.
*/
static void
dmu_zfetch_dofetch(zfetch_t *zf, zstream_t *zs)
{
uint64_t prefetch_tail;
uint64_t prefetch_limit;
uint64_t prefetch_ofst;
uint64_t prefetch_len;
uint64_t blocks_fetched;
zs->zst_stride = MAX((int64_t)zs->zst_stride, zs->zst_len);
zs->zst_cap = MIN(zfetch_block_cap, 2 * zs->zst_cap);
prefetch_tail = MAX((int64_t)zs->zst_ph_offset,
(int64_t)(zs->zst_offset + zs->zst_stride));
/*
* XXX: use a faster division method?
*/
prefetch_limit = zs->zst_offset + zs->zst_len +
(zs->zst_cap * zs->zst_stride) / zs->zst_len;
while (prefetch_tail < prefetch_limit) {
prefetch_ofst = zs->zst_offset + zs->zst_direction *
(prefetch_tail - zs->zst_offset);
prefetch_len = zs->zst_len;
/*
* Don't prefetch beyond the end of the file, if working
* backwards.
*/
if ((zs->zst_direction == ZFETCH_BACKWARD) &&
(prefetch_ofst > prefetch_tail)) {
prefetch_len += prefetch_ofst;
prefetch_ofst = 0;
}
/* don't prefetch more than we're supposed to */
if (prefetch_len > zs->zst_len)
break;
blocks_fetched = dmu_zfetch_fetch(zf->zf_dnode,
prefetch_ofst, zs->zst_len);
prefetch_tail += zs->zst_stride;
/* stop if we've run out of stuff to prefetch */
if (blocks_fetched < zs->zst_len)
break;
}
zs->zst_ph_offset = prefetch_tail;
zs->zst_last = ddi_get_lbolt();
}
void
zfetch_init(void)
{
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
@ -278,285 +113,41 @@ zfetch_fini(void)
void
dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
{
if (zf == NULL) {
if (zf == NULL)
return;
}
zf->zf_dnode = dno;
zf->zf_stream_cnt = 0;
zf->zf_alloc_fail = 0;
list_create(&zf->zf_stream, sizeof (zstream_t),
offsetof(zstream_t, zst_node));
offsetof(zstream_t, zs_node));
rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
}
/*
* This function computes the actual size, in blocks, that can be prefetched,
* and fetches it.
*/
static uint64_t
dmu_zfetch_fetch(dnode_t *dn, uint64_t blkid, uint64_t nblks)
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
{
uint64_t fetchsz;
uint64_t i;
fetchsz = dmu_zfetch_fetchsz(dn, blkid, nblks);
for (i = 0; i < fetchsz; i++) {
dbuf_prefetch(dn, 0, blkid + i, ZIO_PRIORITY_ASYNC_READ,
ARC_FLAG_PREFETCH);
}
return (fetchsz);
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
list_remove(&zf->zf_stream, zs);
mutex_destroy(&zs->zs_lock);
kmem_free(zs, sizeof (*zs));
}
/*
* this function returns the number of blocks that would be prefetched, based
* upon the supplied dnode, blockid, and nblks. This is used so that we can
* update streams in place, and then prefetch with their old value after the
* fact. This way, we can delay the prefetch, but subsequent accesses to the
* stream won't result in the same data being prefetched multiple times.
*/
static uint64_t
dmu_zfetch_fetchsz(dnode_t *dn, uint64_t blkid, uint64_t nblks)
{
uint64_t fetchsz;
if (blkid > dn->dn_maxblkid) {
return (0);
}
/* compute fetch size */
if (blkid + nblks + 1 > dn->dn_maxblkid) {
fetchsz = (dn->dn_maxblkid - blkid) + 1;
ASSERT(blkid + fetchsz - 1 <= dn->dn_maxblkid);
} else {
fetchsz = nblks;
}
return (fetchsz);
}
/*
* given a zfetch and a zstream structure, see if there is an associated zstream
* for this block read. If so, it starts a prefetch for the stream it
* located and returns true, otherwise it returns false
*/
static boolean_t
dmu_zfetch_find(zfetch_t *zf, zstream_t *zh, int prefetched)
{
zstream_t *zs;
int64_t diff;
int reset = !prefetched;
int rc = 0;
if (zh == NULL)
return (0);
/*
* XXX: This locking strategy is a bit coarse; however, it's impact has
* yet to be tested. If this turns out to be an issue, it can be
* modified in a number of different ways.
*/
rw_enter(&zf->zf_rwlock, RW_READER);
top:
for (zs = list_head(&zf->zf_stream); zs;
zs = list_next(&zf->zf_stream, zs)) {
/*
* XXX - should this be an assert?
*/
if (zs->zst_len == 0) {
/* bogus stream */
ZFETCHSTAT_BUMP(zfetchstat_bogus_streams);
continue;
}
/*
* We hit this case when we are in a strided prefetch stream:
* we will read "len" blocks before "striding".
*/
if (zh->zst_offset >= zs->zst_offset &&
zh->zst_offset < zs->zst_offset + zs->zst_len) {
if (prefetched) {
/* already fetched */
ZFETCHSTAT_BUMP(zfetchstat_stride_hits);
rc = 1;
goto out;
} else {
ZFETCHSTAT_BUMP(zfetchstat_stride_misses);
}
}
/*
* This is the forward sequential read case: we increment
* len by one each time we hit here, so we will enter this
* case on every read.
*/
if (zh->zst_offset == zs->zst_offset + zs->zst_len) {
reset = !prefetched && zs->zst_len > 1;
if (mutex_tryenter(&zs->zst_lock) == 0) {
rc = 1;
goto out;
}
if (zh->zst_offset != zs->zst_offset + zs->zst_len) {
mutex_exit(&zs->zst_lock);
goto top;
}
zs->zst_len += zh->zst_len;
diff = zs->zst_len - zfetch_block_cap;
if (diff > 0) {
zs->zst_offset += diff;
zs->zst_len = zs->zst_len > diff ?
zs->zst_len - diff : 0;
}
zs->zst_direction = ZFETCH_FORWARD;
break;
/*
* Same as above, but reading backwards through the file.
*/
} else if (zh->zst_offset == zs->zst_offset - zh->zst_len) {
/* backwards sequential access */
reset = !prefetched && zs->zst_len > 1;
if (mutex_tryenter(&zs->zst_lock) == 0) {
rc = 1;
goto out;
}
if (zh->zst_offset != zs->zst_offset - zh->zst_len) {
mutex_exit(&zs->zst_lock);
goto top;
}
zs->zst_offset = zs->zst_offset > zh->zst_len ?
zs->zst_offset - zh->zst_len : 0;
zs->zst_ph_offset = zs->zst_ph_offset > zh->zst_len ?
zs->zst_ph_offset - zh->zst_len : 0;
zs->zst_len += zh->zst_len;
diff = zs->zst_len - zfetch_block_cap;
if (diff > 0) {
zs->zst_ph_offset = zs->zst_ph_offset > diff ?
zs->zst_ph_offset - diff : 0;
zs->zst_len = zs->zst_len > diff ?
zs->zst_len - diff : zs->zst_len;
}
zs->zst_direction = ZFETCH_BACKWARD;
break;
} else if ((zh->zst_offset - zs->zst_offset - zs->zst_stride <
zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
/* strided forward access */
if (mutex_tryenter(&zs->zst_lock) == 0) {
rc = 1;
goto out;
}
if ((zh->zst_offset - zs->zst_offset - zs->zst_stride >=
zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
mutex_exit(&zs->zst_lock);
goto top;
}
zs->zst_offset += zs->zst_stride;
zs->zst_direction = ZFETCH_FORWARD;
break;
} else if ((zh->zst_offset - zs->zst_offset + zs->zst_stride <
zs->zst_len) && (zs->zst_len != zs->zst_stride)) {
/* strided reverse access */
if (mutex_tryenter(&zs->zst_lock) == 0) {
rc = 1;
goto out;
}
if ((zh->zst_offset - zs->zst_offset + zs->zst_stride >=
zs->zst_len) || (zs->zst_len == zs->zst_stride)) {
mutex_exit(&zs->zst_lock);
goto top;
}
zs->zst_offset = zs->zst_offset > zs->zst_stride ?
zs->zst_offset - zs->zst_stride : 0;
zs->zst_ph_offset = (zs->zst_ph_offset >
(2 * zs->zst_stride)) ?
(zs->zst_ph_offset - (2 * zs->zst_stride)) : 0;
zs->zst_direction = ZFETCH_BACKWARD;
break;
}
}
if (zs) {
if (reset) {
zstream_t *remove = zs;
ZFETCHSTAT_BUMP(zfetchstat_stream_resets);
rc = 0;
mutex_exit(&zs->zst_lock);
rw_exit(&zf->zf_rwlock);
rw_enter(&zf->zf_rwlock, RW_WRITER);
/*
* Relocate the stream, in case someone removes
* it while we were acquiring the WRITER lock.
*/
for (zs = list_head(&zf->zf_stream); zs;
zs = list_next(&zf->zf_stream, zs)) {
if (zs == remove) {
dmu_zfetch_stream_remove(zf, zs);
mutex_destroy(&zs->zst_lock);
kmem_free(zs, sizeof (zstream_t));
break;
}
}
} else {
ZFETCHSTAT_BUMP(zfetchstat_stream_noresets);
rc = 1;
dmu_zfetch_dofetch(zf, zs);
mutex_exit(&zs->zst_lock);
}
}
out:
rw_exit(&zf->zf_rwlock);
return (rc);
}
/*
* Clean-up state associated with a zfetch structure. This frees allocated
* structure members, empties the zf_stream tree, and generally makes things
* nice. This doesn't free the zfetch_t itself, that's left to the caller.
* Clean-up state associated with a zfetch structure (e.g. destroy the
* streams). This doesn't free the zfetch_t itself, that's left to the caller.
*/
void
dmu_zfetch_rele(zfetch_t *zf)
dmu_zfetch_fini(zfetch_t *zf)
{
zstream_t *zs;
zstream_t *zs_next;
zstream_t *zs;
ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
for (zs = list_head(&zf->zf_stream); zs; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
list_remove(&zf->zf_stream, zs);
mutex_destroy(&zs->zst_lock);
kmem_free(zs, sizeof (zstream_t));
}
rw_enter(&zf->zf_rwlock, RW_WRITER);
while ((zs = list_head(&zf->zf_stream)) != NULL)
dmu_zfetch_stream_remove(zf, zs);
rw_exit(&zf->zf_rwlock);
list_destroy(&zf->zf_stream);
rw_destroy(&zf->zf_rwlock);
@ -564,103 +155,55 @@ dmu_zfetch_rele(zfetch_t *zf)
}
/*
* Given a zfetch and zstream structure, insert the zstream structure into the
* AVL tree contained within the zfetch structure. Peform the appropriate
* book-keeping. It is possible that another thread has inserted a stream which
* matches one that we are about to insert, so we must be sure to check for this
* case. If one is found, return failure, and let the caller cleanup the
* duplicates.
*/
static int
dmu_zfetch_stream_insert(zfetch_t *zf, zstream_t *zs)
{
zstream_t *zs_walk;
zstream_t *zs_next;
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
for (zs_walk = list_head(&zf->zf_stream); zs_walk; zs_walk = zs_next) {
zs_next = list_next(&zf->zf_stream, zs_walk);
if (dmu_zfetch_streams_equal(zs_walk, zs)) {
return (0);
}
}
list_insert_head(&zf->zf_stream, zs);
zf->zf_stream_cnt++;
return (1);
}
/*
* Walk the list of zstreams in the given zfetch, find an old one (by time), and
* reclaim it for use by the caller.
*/
static zstream_t *
dmu_zfetch_stream_reclaim(zfetch_t *zf)
{
zstream_t *zs;
clock_t ticks;
ticks = zfetch_min_sec_reap * hz;
if (! rw_tryenter(&zf->zf_rwlock, RW_WRITER))
return (0);
for (zs = list_head(&zf->zf_stream); zs;
zs = list_next(&zf->zf_stream, zs)) {
if (ddi_get_lbolt() - zs->zst_last > ticks)
break;
}
if (zs) {
dmu_zfetch_stream_remove(zf, zs);
mutex_destroy(&zs->zst_lock);
bzero(zs, sizeof (zstream_t));
} else {
zf->zf_alloc_fail++;
}
rw_exit(&zf->zf_rwlock);
return (zs);
}
/*
* Given a zfetch and zstream structure, remove the zstream structure from its
* container in the zfetch structure. Perform the appropriate book-keeping.
* If there aren't too many streams already, create a new stream.
* The "blkid" argument is the next block that we expect this stream to access.
* While we're here, clean up old streams (which haven't been
* accessed for at least zfetch_min_sec_reap seconds).
*/
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{
zstream_t *zs_next;
int numstreams = 0;
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
list_remove(&zf->zf_stream, zs);
zf->zf_stream_cnt--;
}
/*
* Clean up old streams.
*/
for (zstream_t *zs = list_head(&zf->zf_stream);
zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
if (((gethrtime() - zs->zs_atime) / NANOSEC) >
zfetch_min_sec_reap)
dmu_zfetch_stream_remove(zf, zs);
else
numstreams++;
}
static int
dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
{
if (zs1->zst_offset != zs2->zst_offset)
return (0);
/*
* The maximum number of streams is normally zfetch_max_streams,
* but for small files we lower it such that it's at least possible
* for all the streams to be non-overlapping.
*
* If we are already at the maximum number of streams for this file,
* even after removing old streams, then don't create this stream.
*/
uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance));
if (numstreams >= max_streams) {
ZFETCHSTAT_BUMP(zfetchstat_max_streams);
return;
}
if (zs1->zst_len != zs2->zst_len)
return (0);
zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_blkid = blkid;
zs->zs_pf_blkid = blkid;
zs->zs_atime = gethrtime();
mutex_init(&zs->zs_lock, NULL, MUTEX_DEFAULT, NULL);
if (zs1->zst_stride != zs2->zst_stride)
return (0);
if (zs1->zst_ph_offset != zs2->zst_ph_offset)
return (0);
if (zs1->zst_cap != zs2->zst_cap)
return (0);
if (zs1->zst_direction != zs2->zst_direction)
return (0);
return (1);
list_insert_head(&zf->zf_stream, zs);
}
/*
@ -668,91 +211,86 @@ dmu_zfetch_streams_equal(zstream_t *zs1, zstream_t *zs2)
* routines to create, delete, find, or operate upon prefetch streams.
*/
void
dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks)
{
zstream_t zst;
zstream_t *newstream;
boolean_t fetched;
int inserted;
unsigned int blkshft;
uint64_t blksz;
zstream_t *zs;
if (zfs_prefetch_disable)
return;
/* files that aren't ln2 blocksz are only one block -- nothing to do */
if (!zf->zf_dnode->dn_datablkshift)
/*
* As a fast path for small (single-block) files, ignore access
* to the first block.
*/
if (blkid == 0)
return;
/* convert offset and size, into blockid and nblocks */
blkshft = zf->zf_dnode->dn_datablkshift;
blksz = (1 << blkshft);
rw_enter(&zf->zf_rwlock, RW_READER);
bzero(&zst, sizeof (zstream_t));
zst.zst_offset = offset >> blkshft;
zst.zst_len = (P2ROUNDUP(offset + size, blksz) -
P2ALIGN(offset, blksz)) >> blkshft;
fetched = dmu_zfetch_find(zf, &zst, prefetched);
if (fetched) {
ZFETCHSTAT_BUMP(zfetchstat_hits);
} else {
ZFETCHSTAT_BUMP(zfetchstat_misses);
fetched = dmu_zfetch_colinear(zf, &zst);
if (fetched) {
ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
} else {
ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (blkid == zs->zs_blkid) {
mutex_enter(&zs->zs_lock);
/*
* zs_blkid could have changed before we
* acquired zs_lock; re-check them here.
*/
if (blkid != zs->zs_blkid) {
mutex_exit(&zs->zs_lock);
continue;
}
break;
}
}
if (!fetched) {
newstream = dmu_zfetch_stream_reclaim(zf);
if (zs == NULL) {
/*
* we still couldn't find a stream, drop the lock, and allocate
* one if possible. Otherwise, give up and go home.
* This access is not part of any existing stream. Create
* a new stream for it.
*/
if (newstream) {
ZFETCHSTAT_BUMP(zfetchstat_reclaim_successes);
} else {
uint64_t maxblocks;
uint32_t max_streams;
uint32_t cur_streams;
ZFETCHSTAT_BUMP(zfetchstat_reclaim_failures);
cur_streams = zf->zf_stream_cnt;
maxblocks = zf->zf_dnode->dn_maxblkid;
max_streams = MIN(zfetch_max_streams,
(maxblocks / zfetch_block_cap));
if (max_streams == 0) {
max_streams++;
}
if (cur_streams >= max_streams) {
return;
}
newstream = kmem_zalloc(sizeof (zstream_t), KM_SLEEP);
}
newstream->zst_offset = zst.zst_offset;
newstream->zst_len = zst.zst_len;
newstream->zst_stride = zst.zst_len;
newstream->zst_ph_offset = zst.zst_len + zst.zst_offset;
newstream->zst_cap = zst.zst_len;
newstream->zst_direction = ZFETCH_FORWARD;
newstream->zst_last = ddi_get_lbolt();
mutex_init(&newstream->zst_lock, NULL, MUTEX_DEFAULT, NULL);
rw_enter(&zf->zf_rwlock, RW_WRITER);
inserted = dmu_zfetch_stream_insert(zf, newstream);
ZFETCHSTAT_BUMP(zfetchstat_misses);
if (rw_tryupgrade(&zf->zf_rwlock))
dmu_zfetch_stream_create(zf, blkid + nblks);
rw_exit(&zf->zf_rwlock);
if (!inserted) {
mutex_destroy(&newstream->zst_lock);
kmem_free(newstream, sizeof (zstream_t));
}
return;
}
/*
* This access was to a block that we issued a prefetch for on
* behalf of this stream. Issue further prefetches for this stream.
*
* Normally, we start prefetching where we stopped
* prefetching last (zs_pf_blkid). But when we get our first
* hit on this stream, zs_pf_blkid == zs_blkid, we don't
* want to prefetch to block we just accessed. In this case,
* start just after the block we just accessed.
*/
int64_t pf_start = MAX(zs->zs_pf_blkid, blkid + nblks);
/*
* Double our amount of prefetched data, but don't let the
* prefetch get further ahead than zfetch_max_distance.
*/
int pf_nblks =
MIN((int64_t)zs->zs_pf_blkid - zs->zs_blkid + nblks,
zs->zs_blkid + nblks +
(zfetch_max_distance >> zf->zf_dnode->dn_datablkshift) - pf_start);
zs->zs_pf_blkid = pf_start + pf_nblks;
zs->zs_atime = gethrtime();
zs->zs_blkid = blkid + nblks;
/*
* dbuf_prefetch() issues the prefetch i/o
* asynchronously, but it may need to wait for an
* indirect block to be read from disk. Therefore
* we do not want to hold any locks while we call it.
*/
mutex_exit(&zs->zs_lock);
rw_exit(&zf->zf_rwlock);
for (int i = 0; i < pf_nblks; i++) {
dbuf_prefetch(zf->zf_dnode, 0, pf_start + i,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH);
}
ZFETCHSTAT_BUMP(zfetchstat_hits);
}

View File

@ -526,7 +526,7 @@ dnode_destroy(dnode_t *dn)
dn->dn_id_flags = 0;
dn->dn_unlisted_l0_blkid = 0;
dmu_zfetch_rele(&dn->dn_zfetch);
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
@ -774,8 +774,6 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
dmu_zfetch_init(&ndn->dn_zfetch, NULL);
list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
ndn->dn_zfetch.zf_stream_cnt = odn->dn_zfetch.zf_stream_cnt;
ndn->dn_zfetch.zf_alloc_fail = odn->dn_zfetch.zf_alloc_fail;
/*
* Update back pointers. Updating the handle fixes the back pointer of

View File

@ -22,7 +22,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
@ -1633,6 +1633,7 @@ load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error != 0)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
@ -3773,6 +3774,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
txg_wait_synced(spa->spa_dsl_pool, txg);
spa_config_sync(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create");
@ -4233,6 +4235,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_configfile_set(spa, props, B_FALSE);
spa_config_sync(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
return (0);
@ -4363,9 +4366,12 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
mutex_exit(&spa_namespace_lock);
spa_history_log_version(spa, "import");
spa_event_notify(spa, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
#ifdef __FreeBSD__
#ifdef _KERNEL
zvol_create_minors(pool);
@ -4711,6 +4717,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
@ -4905,6 +4912,11 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
*/
dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
@ -4919,9 +4931,6 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH);
return (0);
}

View File

@ -64,41 +64,30 @@ typedef enum arc_flags
ARC_FLAG_CACHED = 1 << 4, /* I/O was in cache */
ARC_FLAG_L2CACHE = 1 << 5, /* cache in L2ARC */
ARC_FLAG_L2COMPRESS = 1 << 6, /* compress in L2ARC */
ARC_FLAG_PREDICTIVE_PREFETCH = 1 << 7, /* I/O from zfetch */
/*
* Private ARC flags. These flags are private ARC only flags that
* will show up in b_flags in the arc_hdr_buf_t. These flags should
* only be set by ARC code.
*/
ARC_FLAG_IN_HASH_TABLE = 1 << 7, /* buffer is hashed */
ARC_FLAG_IO_IN_PROGRESS = 1 << 8, /* I/O in progress */
ARC_FLAG_IO_ERROR = 1 << 9, /* I/O failed for buf */
ARC_FLAG_FREED_IN_READ = 1 << 10, /* freed during read */
ARC_FLAG_BUF_AVAILABLE = 1 << 11, /* block not in use */
ARC_FLAG_INDIRECT = 1 << 12, /* indirect block */
ARC_FLAG_L2_WRITING = 1 << 13, /* write in progress */
ARC_FLAG_L2_EVICTED = 1 << 14, /* evicted during I/O */
ARC_FLAG_L2_WRITE_HEAD = 1 << 15, /* head of write list */
ARC_FLAG_IN_HASH_TABLE = 1 << 8, /* buffer is hashed */
ARC_FLAG_IO_IN_PROGRESS = 1 << 9, /* I/O in progress */
ARC_FLAG_IO_ERROR = 1 << 10, /* I/O failed for buf */
ARC_FLAG_FREED_IN_READ = 1 << 11, /* freed during read */
ARC_FLAG_BUF_AVAILABLE = 1 << 12, /* block not in use */
ARC_FLAG_INDIRECT = 1 << 13, /* indirect block */
/* Indicates that block was read with ASYNC priority. */
ARC_FLAG_PRIO_ASYNC_READ = 1 << 14,
ARC_FLAG_L2_WRITING = 1 << 15, /* write in progress */
ARC_FLAG_L2_EVICTED = 1 << 16, /* evicted during I/O */
ARC_FLAG_L2_WRITE_HEAD = 1 << 17, /* head of write list */
/* indicates that the buffer contains metadata (otherwise, data) */
ARC_FLAG_BUFC_METADATA = 1 << 16,
ARC_FLAG_BUFC_METADATA = 1 << 18,
/* Flags specifying whether optional hdr struct fields are defined */
ARC_FLAG_HAS_L1HDR = 1 << 17,
ARC_FLAG_HAS_L2HDR = 1 << 18,
/*
* The arc buffer's compression mode is stored in the top 7 bits of the
* flags field, so these dummy flags are included so that MDB can
* interpret the enum properly.
*/
ARC_FLAG_COMPRESS_0 = 1 << 24,
ARC_FLAG_COMPRESS_1 = 1 << 25,
ARC_FLAG_COMPRESS_2 = 1 << 26,
ARC_FLAG_COMPRESS_3 = 1 << 27,
ARC_FLAG_COMPRESS_4 = 1 << 28,
ARC_FLAG_COMPRESS_5 = 1 << 29,
ARC_FLAG_COMPRESS_6 = 1 << 30
ARC_FLAG_HAS_L1HDR = 1 << 19,
ARC_FLAG_HAS_L2HDR = 1 << 20,
} arc_flags_t;
struct arc_buf {

View File

@ -492,7 +492,8 @@ uint64_t dmu_buf_refcount(dmu_buf_t *db);
* individually with dmu_buf_rele.
*/
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp);
uint64_t length, boolean_t read, void *tag,
int *numbufsp, dmu_buf_t ***dbpp);
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag);
typedef void dmu_buf_evict_func_t(void *user_ptr);
@ -743,7 +744,7 @@ void dmu_xuio_clear(struct xuio *uio, int i);
void xuio_stat_wbuf_copied();
void xuio_stat_wbuf_nocopy();
extern int zfs_prefetch_disable;
extern boolean_t zfs_prefetch_disable;
extern int zfs_max_recordsize;
/*

View File

@ -23,8 +23,12 @@
* Use is subject to license terms.
*/
#ifndef _DFETCH_H
#define _DFETCH_H
/*
* Copyright (c) 2014 by Delphix. All rights reserved.
*/
#ifndef _DMU_ZFETCH_H
#define _DMU_ZFETCH_H
#include <sys/zfs_context.h>
@ -36,41 +40,30 @@ extern uint64_t zfetch_array_rd_sz;
struct dnode; /* so we can reference dnode */
typedef enum zfetch_dirn {
ZFETCH_FORWARD = 1, /* prefetch increasing block numbers */
ZFETCH_BACKWARD = -1 /* prefetch decreasing block numbers */
} zfetch_dirn_t;
typedef struct zstream {
uint64_t zst_offset; /* offset of starting block in range */
uint64_t zst_len; /* length of range, in blocks */
zfetch_dirn_t zst_direction; /* direction of prefetch */
uint64_t zst_stride; /* length of stride, in blocks */
uint64_t zst_ph_offset; /* prefetch offset, in blocks */
uint64_t zst_cap; /* prefetch limit (cap), in blocks */
kmutex_t zst_lock; /* protects stream */
clock_t zst_last; /* lbolt of last prefetch */
avl_node_t zst_node; /* embed avl node here */
uint64_t zs_blkid; /* expect next access at this blkid */
uint64_t zs_pf_blkid; /* next block to prefetch */
kmutex_t zs_lock; /* protects stream */
hrtime_t zs_atime; /* time last prefetch issued */
list_node_t zs_node; /* link for zf_stream */
} zstream_t;
typedef struct zfetch {
krwlock_t zf_rwlock; /* protects zfetch structure */
list_t zf_stream; /* AVL tree of zstream_t's */
list_t zf_stream; /* list of zstream_t's */
struct dnode *zf_dnode; /* dnode that owns this zfetch */
uint32_t zf_stream_cnt; /* # of active streams */
uint64_t zf_alloc_fail; /* # of failed attempts to alloc strm */
} zfetch_t;
void zfetch_init(void);
void zfetch_fini(void);
void dmu_zfetch_init(zfetch_t *, struct dnode *);
void dmu_zfetch_rele(zfetch_t *);
void dmu_zfetch(zfetch_t *, uint64_t, uint64_t, int);
void dmu_zfetch_fini(zfetch_t *);
void dmu_zfetch(zfetch_t *, uint64_t, uint64_t);
#ifdef __cplusplus
}
#endif
#endif /* _DFETCH_H */
#endif /* _DMU_ZFETCH_H */

View File

@ -2503,6 +2503,7 @@ int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t postevent = B_FALSE;
spa_vdev_state_enter(spa, SCL_NONE);
@ -2512,6 +2513,10 @@ vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
postevent =
(vd->vdev_offline == B_TRUE || vd->vdev_tmpoffline == B_TRUE) ?
B_TRUE : B_FALSE;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
@ -2547,6 +2552,10 @@ vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
if (postevent)
spa_event_notify(spa, vd, ESC_ZFS_VDEV_ONLINE);
return (spa_vdev_state_exit(spa, vd, 0));
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_SYSEVENT_EVENTDEFS_H
@ -249,9 +249,14 @@ extern "C" {
#define ESC_ZFS_RESILVER_START "ESC_ZFS_resilver_start"
#define ESC_ZFS_RESILVER_FINISH "ESC_ZFS_resilver_finish"
#define ESC_ZFS_VDEV_REMOVE "ESC_ZFS_vdev_remove"
#define ESC_ZFS_POOL_CREATE "ESC_ZFS_pool_create"
#define ESC_ZFS_POOL_DESTROY "ESC_ZFS_pool_destroy"
#define ESC_ZFS_POOL_IMPORT "ESC_ZFS_pool_import"
#define ESC_ZFS_VDEV_ADD "ESC_ZFS_vdev_add"
#define ESC_ZFS_VDEV_ATTACH "ESC_ZFS_vdev_attach"
#define ESC_ZFS_VDEV_CLEAR "ESC_ZFS_vdev_clear"
#define ESC_ZFS_VDEV_CHECK "ESC_ZFS_vdev_check"
#define ESC_ZFS_VDEV_ONLINE "ESC_ZFS_vdev_online"
#define ESC_ZFS_CONFIG_SYNC "ESC_ZFS_config_sync"
#define ESC_ZFS_SCRUB_START "ESC_ZFS_scrub_start"
#define ESC_ZFS_SCRUB_FINISH "ESC_ZFS_scrub_finish"

View File

@ -59,6 +59,7 @@ dev/acpica/acpi_if.m optional acpi
dev/fdt/fdt_arm64.c optional fdt
dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
dev/kbd/kbd.c optional atkbd | sc | ukbd | vt
dev/mmc/host/dwmmc.c optional dwmmc
dev/mmc/host/dwmmc_hisi.c optional dwmmc soc_hisi_hi6220
dev/ofw/ofw_cpu.c optional fdt

View File

@ -82,6 +82,7 @@ sparc64/isa/isa_dma.c optional isa
sparc64/isa/ofw_isa.c optional ebus | isa
sparc64/pci/apb.c optional pci
sparc64/pci/fire.c optional pci
sparc64/pci/ofw_pci.c optional pci
sparc64/pci/ofw_pcib.c optional pci
sparc64/pci/ofw_pcib_subr.c optional pci
sparc64/pci/ofw_pcibus.c optional pci

View File

@ -821,10 +821,8 @@ dwc_intr(void *arg)
DWC_LOCK(sc);
reg = READ4(sc, INTERRUPT_STATUS);
if (reg) {
mii_mediachg(sc->mii_softc);
if (reg)
READ4(sc, SGMII_RGMII_SMII_CTRL_STATUS);
}
reg = READ4(sc, DMA_STATUS);
if (reg & DMA_STATUS_NIS) {

View File

@ -851,11 +851,17 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data);
if (!ret_val) {
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
if (ret_val)
DEBUGOUT("Error disabling far-end loopback\n");
} else
DEBUGOUT("Error disabling far-end loopback\n");
ret_val = e1000_get_auto_rd_done_generic(hw);
if (ret_val)
@ -911,11 +917,18 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
ret_val =
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
&kum_reg_data);
if (!ret_val) {
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
if (ret_val)
DEBUGOUT("Error disabling far-end loopback\n");
} else
DEBUGOUT("Error disabling far-end loopback\n");
/* Set the transmit descriptor write-back policy */
reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));

View File

@ -66,7 +66,7 @@ static s32 e1000_read_mac_addr_82540(struct e1000_hw *hw);
static s32 e1000_init_phy_params_82540(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
phy->addr = 1;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
@ -329,7 +329,7 @@ static s32 e1000_init_hw_82540(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 txdctl, ctrl_ext;
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
u16 i;
DEBUGFUNC("e1000_init_hw_82540");
@ -411,7 +411,7 @@ static s32 e1000_init_hw_82540(struct e1000_hw *hw)
static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
u16 data;
DEBUGFUNC("e1000_setup_copper_link_82540");
@ -498,7 +498,7 @@ static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw)
**/
static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
u16 nvm_data;
DEBUGFUNC("e1000_adjust_serdes_amplitude_82540");
@ -528,7 +528,7 @@ static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
**/
static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
u16 default_page = 0;
u16 phy_data;

View File

@ -85,7 +85,7 @@ static const u16 e1000_igp_cable_length_table[] = {
static s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
DEBUGFUNC("e1000_init_phy_params_82541");
@ -295,7 +295,7 @@ void e1000_init_function_pointers_82541(struct e1000_hw *hw)
**/
static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
{
u32 ledctl, ctrl, icr, manc;
u32 ledctl, ctrl, manc;
DEBUGFUNC("e1000_reset_hw_82541");
@ -317,6 +317,7 @@ static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
/* Must reset the Phy before resetting the MAC */
if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST));
E1000_WRITE_FLUSH(hw);
msec_delay(5);
}
@ -359,7 +360,7 @@ static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
/* Clear any pending interrupt events. */
icr = E1000_READ_REG(hw, E1000_ICR);
E1000_READ_REG(hw, E1000_ICR);
return E1000_SUCCESS;
}

View File

@ -317,7 +317,7 @@ static s32 e1000_init_hw_82542(struct e1000_hw *hw)
static s32 e1000_setup_link_82542(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = E1000_SUCCESS;
s32 ret_val;
DEBUGFUNC("e1000_setup_link_82542");
@ -565,7 +565,7 @@ static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw)
*
* Reads the device MAC address from the EEPROM and stores the value.
**/
static s32 e1000_read_mac_addr_82542(struct e1000_hw *hw)
s32 e1000_read_mac_addr_82542(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 offset, nvm_data, i;

View File

@ -900,7 +900,7 @@ static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw)
**/
static s32 e1000_reset_hw_82543(struct e1000_hw *hw)
{
u32 ctrl, icr;
u32 ctrl;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_reset_hw_82543");
@ -942,7 +942,7 @@ static s32 e1000_reset_hw_82543(struct e1000_hw *hw)
/* Masking off and clearing any pending interrupts */
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
E1000_READ_REG(hw, E1000_ICR);
return ret_val;
}

View File

@ -50,9 +50,10 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
#define E1000_IVAR_INT_ALLOC_VALID 0x8
#define E1000_RXCFGL 0x0B634 /* TimeSync Rx EtherType & Msg Type Reg - RW */
/* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF

View File

@ -1235,7 +1235,7 @@ static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
DEBUGFUNC("e1000_check_for_link_media_swap");
/* Check the copper medium. */
/* Check for copper. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
if (ret_val)
return ret_val;
@ -1247,7 +1247,7 @@ static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_COPPER;
/* Check the other medium. */
/* Check for other. */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
if (ret_val)
return ret_val;
@ -1256,11 +1256,6 @@ static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* reset page to 0 */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
if (ret_val)
return ret_val;
if (data & E1000_M88E1112_STATUS_LINK)
port = E1000_MEDIA_PORT_OTHER;
@ -1268,8 +1263,20 @@ static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
if (port && (hw->dev_spec._82575.media_port != port)) {
hw->dev_spec._82575.media_port = port;
hw->dev_spec._82575.media_changed = TRUE;
}
if (port == E1000_MEDIA_PORT_COPPER) {
/* reset page to 0 */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
if (ret_val)
return ret_val;
e1000_check_for_link_82575(hw);
} else {
ret_val = e1000_check_for_link_82575(hw);
e1000_check_for_link_82575(hw);
/* reset page to 0 */
ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
if (ret_val)
return ret_val;
}
return E1000_SUCCESS;
@ -2136,7 +2143,13 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
DEBUGFUNC("e1000_rx_fifo_workaround_82575");
DEBUGFUNC("e1000_rx_fifo_flush_82575");
/* disable IPv6 options as per hardware errata */
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
rfctl |= E1000_RFCTL_IPV6_EX_DIS;
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
if (hw->mac.type != e1000_82575 ||
!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
return;
@ -2164,7 +2177,6 @@ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
rlpml = E1000_READ_REG(hw, E1000_RLPML);
@ -2894,11 +2906,13 @@ s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
/**
* e1000_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1g: boolean flag enabling 1G EEE advertisement
* @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
**/
s32 e1000_set_eee_i350(struct e1000_hw *hw)
s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
u32 ipcnfg, eeer;
@ -2914,7 +2928,16 @@ s32 e1000_set_eee_i350(struct e1000_hw *hw)
if (!(hw->dev_spec._82575.eee_disable)) {
u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
if (adv100M)
ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
else
ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
if (adv1G)
ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
else
ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
@ -2938,11 +2961,13 @@ s32 e1000_set_eee_i350(struct e1000_hw *hw)
/**
* e1000_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1g: boolean flag enabling 1G EEE advertisement
* @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
**/
s32 e1000_set_eee_i354(struct e1000_hw *hw)
s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val = E1000_SUCCESS;
@ -2984,8 +3009,16 @@ s32 e1000_set_eee_i354(struct e1000_hw *hw)
if (ret_val)
goto out;
phy_data |= E1000_EEE_ADV_100_SUPPORTED |
E1000_EEE_ADV_1000_SUPPORTED;
if (adv100M)
phy_data |= E1000_EEE_ADV_100_SUPPORTED;
else
phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
if (adv1G)
phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
else
phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
E1000_EEE_ADV_DEV_I354,
phy_data);

View File

@ -495,8 +495,8 @@ void e1000_rlpml_set_vf(struct e1000_hw *, u16);
s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
u16 e1000_rxpbs_adjust_82580(u32 data);
s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_set_eee_i350(struct e1000_hw *);
s32 e1000_set_eee_i354(struct e1000_hw *);
s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);

View File

@ -299,6 +299,12 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_I218_V3:
mac->type = e1000_pch_lpt;
break;
case E1000_DEV_ID_PCH_SPT_I219_LM:
case E1000_DEV_ID_PCH_SPT_I219_V:
case E1000_DEV_ID_PCH_SPT_I219_LM2:
case E1000_DEV_ID_PCH_SPT_I219_V2:
mac->type = e1000_pch_spt;
break;
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
@ -449,6 +455,7 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
e1000_init_function_pointers_ich8lan(hw);
break;
case e1000_82575:
@ -928,21 +935,6 @@ s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
return e1000_mng_enable_host_if_generic(hw);
}
/**
* e1000_set_obff_timer - Set Optimized Buffer Flush/Fill timer
* @hw: pointer to the HW structure
* @itr: u32 indicating itr value
*
* Set the OBFF timer based on the given interrupt rate.
**/
s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr)
{
if (hw->mac.ops.set_obff_timer)
return hw->mac.ops.set_obff_timer(hw, itr);
return E1000_SUCCESS;
}
/**
* e1000_check_reset_block - Verifies PHY can be reset
* @hw: pointer to the HW structure
@ -1215,6 +1207,21 @@ s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
return e1000_read_pba_length_generic(hw, pba_num_size);
}
/**
* e1000_read_pba_num - Read device part number
* @hw: pointer to the HW structure
* @pba_num: pointer to device part number
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in pba_num.
* Currently no func pointer exists and all implementations are handled in the
* generic version of this function.
**/
s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
{
return e1000_read_pba_num_generic(hw, pba_num);
}
/**
* e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
* @hw: pointer to the HW structure

View File

@ -97,6 +97,7 @@ s32 e1000_phy_commit(struct e1000_hw *hw);
void e1000_power_up_phy(struct e1000_hw *hw);
void e1000_power_down_phy(struct e1000_hw *hw);
s32 e1000_read_mac_addr(struct e1000_hw *hw);
s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
void e1000_reload_nvm(struct e1000_hw *hw);

View File

@ -197,6 +197,8 @@
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define E1000_RCTL_RDMTS_HEX 0x00010000
#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
@ -565,9 +567,6 @@
#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
#define E1000_ITR_MASK 0x000FFFFF /* ITR value bitfield */
#define E1000_ITR_MULT 256 /* ITR mulitplier in nsec */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
@ -753,6 +752,12 @@
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
/* HH Time Sync */
#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
@ -1020,9 +1025,7 @@
/* NVM Addressing bits based on type 0=small, 1=large */
#define E1000_EECD_ADDR_BITS 0x00000400
#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
#ifndef E1000_NVM_GRANT_ATTEMPTS
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#endif
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
#define E1000_EECD_SIZE_EX_SHIFT 11
@ -1059,11 +1062,44 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
#define NVM_VERSION 0x0005
#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */
#define NVM_PHY_CLASS_WORD 0x0007
#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
#define NVM_FUTURE_INIT_WORD1 0x0019
#define NVM_ETRACK_WORD 0x0042
#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
/* NVM version defines */
#define NVM_MAJOR_MASK 0xF000
#define NVM_MINOR_MASK 0x0FF0
#define NVM_IMAGE_ID_MASK 0x000F
#define NVM_COMB_VER_MASK 0x00FF
#define NVM_MAJOR_SHIFT 12
#define NVM_MINOR_SHIFT 4
#define NVM_COMB_VER_SHFT 8
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETRACK_SHIFT 16
#define NVM_ETRACK_VALID 0x8000
#define NVM_NEW_DEC_MASK 0x0F00
#define NVM_HEX_CONV 16
#define NVM_HEX_TENS 10
/* FW version defines */
/* Offset of "Loader patch ptr" in Firmware Header */
#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
/* Patch generation hour & minutes */
#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
/* Patch generation month & day */
#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
/* Patch generation year */
#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
/* Patch major & minor numbers */
#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
#define NVM_MAC_ADDR 0x0000
#define NVM_SUB_DEV_ID 0x000B
#define NVM_SUB_VEN_ID 0x000C
@ -1440,8 +1476,6 @@
#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define E1000_DOBFFCTL_OBFFTHR_MASK 0x000000FF /* OBFF threshold */
#define E1000_DOBFFCTL_EXIT_ACT_MASK 0x01000000 /* Exit active CB */
/* Proxy Filter Control */
#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */

View File

@ -137,6 +137,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */
#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */
#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */
#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
@ -222,6 +226,7 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
e1000_pch_spt,
e1000_82575,
e1000_82576,
e1000_82580,
@ -703,7 +708,6 @@ struct e1000_mac_operations {
int (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
s32 (*validate_mdi_setting)(struct e1000_hw *);
s32 (*set_obff_timer)(struct e1000_hw *, u32);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
};
@ -805,7 +809,7 @@ struct e1000_mac_info {
enum e1000_serdes_link_state serdes_link_state;
bool serdes_has_link;
bool tx_pkt_filtering;
u32 max_frame_size;
u32 max_frame_size;
};
struct e1000_phy_info {

View File

@ -488,6 +488,105 @@ static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
return ret_val;
}
/**
* e1000_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
* @invm_ver: version structure for the version read
*
* Reads iNVM version and image type.
**/
s32 e1000_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver)
{
u32 *record = NULL;
u32 *next_record = NULL;
u32 i = 0;
u32 invm_dword = 0;
u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
E1000_INVM_RECORD_SIZE_IN_BYTES);
u32 buffer[E1000_INVM_SIZE];
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
u16 version = 0;
DEBUGFUNC("e1000_read_invm_version");
/* Read iNVM memory */
for (i = 0; i < E1000_INVM_SIZE; i++) {
invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
buffer[i] = invm_dword;
}
/* Read version number */
for (i = 1; i < invm_blocks; i++) {
record = &buffer[invm_blocks - i];
next_record = &buffer[invm_blocks - i + 1];
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
version = 0;
status = E1000_SUCCESS;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
status = E1000_SUCCESS;
break;
}
/*
* Check if we have odd version location
* used and it is the last one used
*/
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
(i != 1))) {
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
status = E1000_SUCCESS;
break;
}
/*
* Check if we have even version location
* used and it is the last one used
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
status = E1000_SUCCESS;
break;
}
}
if (status == E1000_SUCCESS) {
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
}
/* Read Image Type */
for (i = 1; i < invm_blocks; i++) {
record = &buffer[invm_blocks - i];
next_record = &buffer[invm_blocks - i + 1];
/* Check if we have image type in first location used */
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
invm_ver->invm_img_type = 0;
status = E1000_SUCCESS;
break;
}
/* Check if we have image type in first location used */
else if ((((*record & 0x3) == 0) &&
((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
status = E1000_SUCCESS;
break;
}
}
return status;
}
/**
* e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure

View File

@ -43,6 +43,8 @@ s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,

File diff suppressed because it is too large Load Diff

View File

@ -107,9 +107,25 @@
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
/* bit for disabling packet buffer read */
#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
#define E1000_SPT_B_STEP_REV 0x10 /*SPT B step Rev ID*/
#define E1000_TARC0_CB_MULTIQ_2_REQ (1 << 29)
#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@ -171,6 +187,8 @@
#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
/* SMBus Control Phy Register */
#define CV_SMB_CTRL PHY_REG(769, 23)
@ -281,36 +299,13 @@
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
/* Latency Tolerance Reporting */
#define E1000_LTRV 0x000F8
#define E1000_LTRV_VALUE_MASK 0x000003FF
#define E1000_LTRV_SCALE_MAX 5
#define E1000_LTRV_SCALE_FACTOR 5
#define E1000_LTRV_SCALE_SHIFT 10
#define E1000_LTRV_SCALE_MASK 0x00001C00
#define E1000_LTRV_REQ_SHIFT 15
#define E1000_LTRV_NOSNOOP_SHIFT 16
#define E1000_LTRV_SEND (1 << 30)
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
/* OBFF Control & Threshold Defines */
#define E1000_SVCR_OFF_EN 0x00000001
#define E1000_SVCR_OFF_MASKINT 0x00001000
#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
#define E1000_SVCR_OFF_TIMER_SHIFT 16
#define E1000_SVT_OFF_HWM_MASK 0x0000001F
#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
#define E1000_PCI_REVISION_ID_REG 0x08
#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */
void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);

View File

@ -70,7 +70,6 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw)
mac->ops.setup_link = e1000_null_ops_generic;
mac->ops.get_link_up_info = e1000_null_link_info;
mac->ops.check_for_link = e1000_null_ops_generic;
mac->ops.set_obff_timer = e1000_null_set_obff_timer;
/* Management */
mac->ops.check_mng_mode = e1000_null_mng_mode;
/* VLAN, MC, etc. */
@ -155,17 +154,6 @@ int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
return E1000_SUCCESS;
}
/**
* e1000_null_set_obff_timer - No-op function, return 0
* @hw: pointer to the HW structure
**/
s32 e1000_null_set_obff_timer(struct e1000_hw E1000_UNUSEDARG *hw,
u32 E1000_UNUSEDARG a)
{
DEBUGFUNC("e1000_null_set_obff_timer");
return E1000_SUCCESS;
}
/**
* e1000_get_bus_info_pci_generic - Get PCI(x) bus information
* @hw: pointer to the HW structure

View File

@ -36,9 +36,7 @@
#define _E1000_MAC_H_
void e1000_init_mac_ops_generic(struct e1000_hw *hw);
#ifndef E1000_REMOVED
#define E1000_REMOVED(a) (0)
#endif /* E1000_REMOVED */
void e1000_null_mac_generic(struct e1000_hw *hw);
s32 e1000_null_ops_generic(struct e1000_hw *hw);
s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
@ -46,7 +44,6 @@ bool e1000_null_mng_mode(struct e1000_hw *hw);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
s32 e1000_null_set_obff_timer(struct e1000_hw *hw, u32 a);
s32 e1000_blink_led_generic(struct e1000_hw *hw);
s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);

View File

@ -930,6 +930,41 @@ s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
return E1000_SUCCESS;
}
/**
* e1000_read_pba_num_generic - Read device part number
* @hw: pointer to the HW structure
* @pba_num: pointer to device part number
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
* the value in pba_num.
**/
s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
{
s32 ret_val;
u16 nvm_data;
DEBUGFUNC("e1000_read_pba_num_generic");
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
return ret_val;
} else if (nvm_data == NVM_PBA_PTR_GUARD) {
DEBUGOUT("NVM Not Supported\n");
return -E1000_NOT_IMPLEMENTED;
}
*pba_num = (u32)(nvm_data << 16);
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
return ret_val;
}
*pba_num |= nvm_data;
return E1000_SUCCESS;
}
/**
* e1000_read_pba_raw
@ -1232,4 +1267,115 @@ static void e1000_reload_nvm_generic(struct e1000_hw *hw)
E1000_WRITE_FLUSH(hw);
}
/**
* e1000_get_fw_version - Get firmware version information
* @hw: pointer to the HW structure
* @fw_vers: pointer to output version structure
*
* unsupported/not present features return 0 in version structure
**/
void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
u8 q, hval, rem, result;
u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
/* basic eeprom version numbers, bits used vary by part and by tool
* used to create the nvm images */
/* Check which data format we have */
switch (hw->mac.type) {
case e1000_i211:
e1000_read_invm_version(hw, fw_vers);
return;
case e1000_82575:
case e1000_82576:
case e1000_82580:
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
/* Use this format, unless EETRACK ID exists,
* then use alternate format
*/
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
>> NVM_MAJOR_SHIFT;
fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
>> NVM_MINOR_SHIFT;
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
goto etrack_id;
}
break;
case e1000_i210:
if (!(e1000_get_flash_presence_i210(hw))) {
e1000_read_invm_version(hw, fw_vers);
return;
}
/* fall through */
case e1000_i350:
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
if ((comb_offset != 0x0) &&
(comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1, &comb_verl);
/* get Option Rom version if it exists and is valid */
if ((comb_verh && comb_verl) &&
((comb_verh != NVM_VER_INVALID) &&
(comb_verl != NVM_VER_INVALID))) {
fw_vers->or_valid = TRUE;
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
(comb_verl << NVM_COMB_VER_SHFT)
| (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
return;
}
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
>> NVM_MAJOR_SHIFT;
/* check for old style version format in newer images*/
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
} else {
eeprom_verl = (fw_version & NVM_MINOR_MASK)
>> NVM_MINOR_SHIFT;
}
/* Convert minor value to hex before assigning to output struct
* Val to be converted will not be higher than 99, per tool output
*/
q = eeprom_verl / NVM_HEX_CONV;
hval = q * NVM_HEX_TENS;
rem = eeprom_verl % NVM_HEX_CONV;
result = hval + rem;
fw_vers->eep_minor = result;
etrack_id:
if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
} else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
eeprom_verl;
}
}

View File

@ -35,12 +35,26 @@
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
struct e1000_pba {
u16 word[2];
u16 *pba_block;
};
#endif
struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
u16 eep_build;
u8 invm_major;
u8 invm_minor;
u8 invm_img_type;
bool or_valid;
u16 or_major;
u16 or_build;
u16 or_patch;
};
void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
@ -52,6 +66,7 @@ s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
@ -76,6 +91,8 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
void e1000_stop_nvm(struct e1000_hw *hw);
void e1000_release_nvm_generic(struct e1000_hw *hw);
void e1000_get_fw_version(struct e1000_hw *hw,
struct e1000_fw_version *fw_vers);
#define E1000_STM_OPCODE 0xDB00

View File

@ -74,10 +74,6 @@
#define STATIC static
#define FALSE 0
#define TRUE 1
#ifndef __bool_true_false_are_defined
#define false FALSE
#define true TRUE
#endif
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
@ -99,9 +95,6 @@ typedef int64_t s64;
typedef int32_t s32;
typedef int16_t s16;
typedef int8_t s8;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
#define __le16 u16
#define __le32 u32

View File

@ -1827,9 +1827,9 @@ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
phy_data);
if (ret_val)
return ret_val;
}
DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
}
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)

View File

@ -65,6 +65,9 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
@ -107,6 +110,8 @@
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
@ -588,6 +593,10 @@
#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@ -680,7 +689,6 @@
#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
#define E1000_DOBFFCTL 0x3F24 /* DMA OBFF Control Register */
#endif

View File

@ -541,9 +541,9 @@ igb_attach(device_t dev)
"Disable Energy Efficient Ethernet");
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
if (adapter->hw.mac.type == e1000_i354)
e1000_set_eee_i354(&adapter->hw);
e1000_set_eee_i354(&adapter->hw, TRUE, TRUE);
else
e1000_set_eee_i350(&adapter->hw);
e1000_set_eee_i350(&adapter->hw, TRUE, TRUE);
}
}
@ -1330,9 +1330,9 @@ igb_init_locked(struct adapter *adapter)
/* Set Energy Efficient Ethernet */
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
if (adapter->hw.mac.type == e1000_i354)
e1000_set_eee_i354(&adapter->hw);
e1000_set_eee_i354(&adapter->hw, TRUE, TRUE);
else
e1000_set_eee_i350(&adapter->hw);
e1000_set_eee_i350(&adapter->hw, TRUE, TRUE);
}
}

View File

@ -3351,7 +3351,7 @@ rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
/* read EEPROM version */
val = rt2860_srom_read(sc, RT2860_EEPROM_VERSION);
DPRINTF(("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8));
DPRINTF(("EEPROM rev=%d, FAE=%d\n", val >> 8, val & 0xff));
/* read MAC address */
val = rt2860_srom_read(sc, RT2860_EEPROM_MAC01);

View File

@ -238,6 +238,7 @@ static const struct rl_hwrev re_hwrevs[] = {
{ RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K},
{ RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K},
{ RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K},
{ RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K},
{ 0, 0, NULL, 0 }
@ -1485,6 +1486,7 @@ re_attach(device_t dev)
break;
case RL_HWREV_8168EP:
case RL_HWREV_8168G:
case RL_HWREV_8168H:
case RL_HWREV_8411B:
sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR |
RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP |

View File

@ -195,6 +195,7 @@
#define RL_HWREV_8168G 0x4C000000
#define RL_HWREV_8168EP 0x50000000
#define RL_HWREV_8168GU 0x50800000
#define RL_HWREV_8168H 0x54000000
#define RL_HWREV_8411B 0x5C800000
#define RL_HWREV_8139 0x60000000
#define RL_HWREV_8139A 0x70000000

View File

@ -127,6 +127,7 @@ static const struct pci_id pci_ns8250_ids[] = {
24 * DEFAULT_RCLK, 2 },
{ 0x8086, 0x1c3d, 0xffff, 0, "Intel AMT - KT Controller", 0x10 },
{ 0x8086, 0x1d3d, 0xffff, 0, "Intel C600/X79 Series Chipset KT Controller", 0x10 },
{ 0x8086, 0x1e3d, 0xffff, 0, "Intel Panther Point KT Controller", 0x10 },
{ 0x8086, 0x2a07, 0xffff, 0, "Intel AMT - PM965/GM965 KT Controller", 0x10 },
{ 0x8086, 0x2a47, 0xffff, 0, "Mobile 4 Series Chipset KT Controller", 0x10 },
{ 0x8086, 0x2e17, 0xffff, 0, "4 Series Chipset Serial KT Controller", 0x10 },

View File

@ -223,7 +223,7 @@ uhci_pci_match(device_t self)
case 0x76028086:
return ("Intel 82372FB/82468GX USB controller");
case 0x3309103c:
case 0x3300103c:
return ("HP iLO Standard Virtual USB controller");
case 0x30381106:

View File

@ -113,6 +113,9 @@ xhci_pci_match(device_t self)
case 0x8cb18086:
return ("Intel Wildcat Point USB 3.0 controller");
case 0xa01b177d:
return ("Cavium ThunderX USB 3.0 controller");
default:
break;
}

View File

@ -525,6 +525,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(SIERRA, MC5727_2, 0),
U3G_DEV(SIERRA, MC5728, 0),
U3G_DEV(SIERRA, MC7354, 0),
U3G_DEV(SIERRA, MC7355, 0),
U3G_DEV(SIERRA, MC8700, 0),
U3G_DEV(SIERRA, MC8755, 0),
U3G_DEV(SIERRA, MC8755_2, 0),

Some files were not shown because too many files have changed in this diff Show More