diff --git a/cddl/contrib/dtracetoolkit/Proc/pidpersec.d b/cddl/contrib/dtracetoolkit/Proc/pidpersec.d index 71080b9e7527..ab57b66ee581 100755 --- a/cddl/contrib/dtracetoolkit/Proc/pidpersec.d +++ b/cddl/contrib/dtracetoolkit/Proc/pidpersec.d @@ -41,7 +41,7 @@ dtrace:::BEGIN { - printf("%-22s %8s %6s\n", "TIME", "LASTPID", "PID/s"); + printf("%-22s %6s\n", "TIME", "PID/s"); pids = 0; } @@ -52,6 +52,6 @@ proc:::exec-success profile:::tick-1sec { - printf("%-22Y %8d %6d\n", walltimestamp, `mpid, pids); + printf("%-22Y %6d\n", walltimestamp, pids); pids = 0; } diff --git a/cddl/contrib/opensolaris/common/ctf/ctf_create.c b/cddl/contrib/opensolaris/common/ctf/ctf_create.c index 736481482751..1c0988a38783 100644 --- a/cddl/contrib/opensolaris/common/ctf/ctf_create.c +++ b/cddl/contrib/opensolaris/common/ctf/ctf_create.c @@ -583,10 +583,10 @@ ctf_discard(ctf_file_t *fp) return (0); /* no update required */ for (dtd = ctf_list_prev(&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) { - if (dtd->dtd_type <= fp->ctf_dtoldid) + ntd = ctf_list_prev(dtd); + if (CTF_TYPE_TO_INDEX(dtd->dtd_type) <= fp->ctf_dtoldid) continue; /* skip types that have been committed */ - ntd = ctf_list_prev(dtd); ctf_dtd_delete(fp, dtd); } @@ -1313,10 +1313,13 @@ ctf_add_type(ctf_file_t *dst_fp, ctf_file_t *src_fp, ctf_id_t src_type) * unless dst_type is a forward declaration and src_type is a struct, * union, or enum (i.e. the definition of the previous forward decl). */ - if (dst_type != CTF_ERR && dst_kind != kind && ( - dst_kind != CTF_K_FORWARD || (kind != CTF_K_ENUM && - kind != CTF_K_STRUCT && kind != CTF_K_UNION))) - return (ctf_set_errno(dst_fp, ECTF_CONFLICT)); + if (dst_type != CTF_ERR && dst_kind != kind) { + if (dst_kind != CTF_K_FORWARD || (kind != CTF_K_ENUM && + kind != CTF_K_STRUCT && kind != CTF_K_UNION)) + return (ctf_set_errno(dst_fp, ECTF_CONFLICT)); + else + dst_type = CTF_ERR; + } /* * If the non-empty name was not found in the appropriate hash, search @@ -1328,7 +1331,7 @@ ctf_add_type(ctf_file_t *dst_fp, ctf_file_t *src_fp, ctf_id_t src_type) */ if (dst_type == CTF_ERR && name[0] != '\0') { for (dtd = ctf_list_prev(&dst_fp->ctf_dtdefs); dtd != NULL && - dtd->dtd_type > dst_fp->ctf_dtoldid; + CTF_TYPE_TO_INDEX(dtd->dtd_type) > dst_fp->ctf_dtoldid; dtd = ctf_list_prev(dtd)) { if (CTF_INFO_KIND(dtd->dtd_data.ctt_info) == kind && dtd->dtd_name != NULL && diff --git a/cddl/contrib/opensolaris/tools/ctf/cvt/output.c b/cddl/contrib/opensolaris/tools/ctf/cvt/output.c index 3131e7fb85ee..3385e4b2d09f 100644 --- a/cddl/contrib/opensolaris/tools/ctf/cvt/output.c +++ b/cddl/contrib/opensolaris/tools/ctf/cvt/output.c @@ -23,8 +23,6 @@ * Use is subject to license terms. */ -#pragma ident "%Z%%M% %I% %E% SMI" - /* * Routines for preparing tdata trees for conversion into CTF data, and * for placing the resulting data into an output file. diff --git a/lib/libc/net/sctp_sys_calls.c b/lib/libc/net/sctp_sys_calls.c index 6971c606dbcd..f07aa4321ca8 100644 --- a/lib/libc/net/sctp_sys_calls.c +++ b/lib/libc/net/sctp_sys_calls.c @@ -383,6 +383,9 @@ sctp_opt_info(int sd, sctp_assoc_t id, int opt, void *arg, socklen_t * size) case SCTP_PR_ASSOC_STATUS: ((struct sctp_prstatus *)arg)->sprstat_assoc_id = id; break; + case SCTP_MAX_CWND: + ((struct sctp_assoc_value *)arg)->assoc_id = id; + break; default: break; } diff --git a/lib/libc/powerpc/gen/_setjmp.S b/lib/libc/powerpc/gen/_setjmp.S index e28386c24b9c..f7f3d64e1b73 100644 --- a/lib/libc/powerpc/gen/_setjmp.S +++ b/lib/libc/powerpc/gen/_setjmp.S @@ -56,12 +56,54 @@ ENTRY(_setjmp) mr %r10,%r1 mr %r9,%r2 stmw %r9,20(%r3) + + /* FPRs */ + stfd %f14,92+0*8(%r3) + stfd %f15,92+1*8(%r3) + stfd %f16,92+2*8(%r3) + stfd %f17,92+3*8(%r3) + stfd %f18,92+4*8(%r3) + stfd %f19,92+5*8(%r3) + stfd %f20,92+6*8(%r3) + stfd %f21,92+7*8(%r3) + stfd %f22,92+8*8(%r3) + stfd %f23,92+9*8(%r3) + stfd %f24,92+10*8(%r3) + stfd %f25,92+11*8(%r3) + stfd %f26,92+12*8(%r3) + stfd %f27,92+13*8(%r3) + stfd %f28,93+13*8(%r3) + stfd %f29,93+14*8(%r3) + stfd %f30,93+15*8(%r3) + stfd %f31,93+16*8(%r3) + li %r3,0 blr END(_setjmp) ENTRY(_longjmp) lmw %r9,20(%r3) + + /* FPRs */ + lfd %f14,92+0*8(%r3) + lfd %f15,92+1*8(%r3) + lfd %f16,92+2*8(%r3) + lfd %f17,92+3*8(%r3) + lfd %f18,92+4*8(%r3) + lfd %f19,92+5*8(%r3) + lfd %f20,92+6*8(%r3) + lfd %f21,92+7*8(%r3) + lfd %f22,92+8*8(%r3) + lfd %f23,92+9*8(%r3) + lfd %f24,92+10*8(%r3) + lfd %f25,92+11*8(%r3) + lfd %f26,92+12*8(%r3) + lfd %f27,92+13*8(%r3) + lfd %f28,93+13*8(%r3) + lfd %f29,93+14*8(%r3) + lfd %f30,93+15*8(%r3) + lfd %f31,93+16*8(%r3) + mtlr %r11 mtcr %r12 mr %r1,%r10 diff --git a/lib/libc/powerpc/gen/setjmp.S b/lib/libc/powerpc/gen/setjmp.S index 9325fc2378cb..881c24e1e813 100644 --- a/lib/libc/powerpc/gen/setjmp.S +++ b/lib/libc/powerpc/gen/setjmp.S @@ -66,6 +66,27 @@ ENTRY(setjmp) mr %r10,%r1 /* r10 <- stackptr */ mr %r9,%r2 /* r9 <- global ptr */ stmw %r9,20(%r6) + + /* FPRs */ + stfd %f14,92+0*8(%r6) + stfd %f15,92+1*8(%r6) + stfd %f16,92+2*8(%r6) + stfd %f17,92+3*8(%r6) + stfd %f18,92+4*8(%r6) + stfd %f19,92+5*8(%r6) + stfd %f20,92+6*8(%r6) + stfd %f21,92+7*8(%r6) + stfd %f22,92+8*8(%r6) + stfd %f23,92+9*8(%r6) + stfd %f24,92+10*8(%r6) + stfd %f25,92+11*8(%r6) + stfd %f26,92+12*8(%r6) + stfd %f27,92+13*8(%r6) + stfd %f28,93+13*8(%r6) + stfd %f29,93+14*8(%r6) + stfd %f30,93+15*8(%r6) + stfd %f31,93+16*8(%r6) + li %r3,0 /* return (0) */ blr END(setjmp) @@ -73,6 +94,27 @@ END(setjmp) WEAK_REFERENCE(CNAME(__longjmp), longjmp) ENTRY(__longjmp) lmw %r9,20(%r3) /* restore regs */ + + /* FPRs */ + lfd %f14,92+0*8(%r3) + lfd %f15,92+1*8(%r3) + lfd %f16,92+2*8(%r3) + lfd %f17,92+3*8(%r3) + lfd %f18,92+4*8(%r3) + lfd %f19,92+5*8(%r3) + lfd %f20,92+6*8(%r3) + lfd %f21,92+7*8(%r3) + lfd %f22,92+8*8(%r3) + lfd %f23,92+9*8(%r3) + lfd %f24,92+10*8(%r3) + lfd %f25,92+11*8(%r3) + lfd %f26,92+12*8(%r3) + lfd %f27,92+13*8(%r3) + lfd %f28,93+13*8(%r3) + lfd %f29,93+14*8(%r3) + lfd %f30,93+15*8(%r3) + lfd %f31,93+16*8(%r3) + mr %r6,%r4 /* save val param */ mtlr %r11 /* r11 -> link reg */ mtcr %r12 /* r12 -> condition reg */ diff --git a/lib/libc/powerpc/gen/sigsetjmp.S b/lib/libc/powerpc/gen/sigsetjmp.S index c67afc610316..ec7460aacaa3 100644 --- a/lib/libc/powerpc/gen/sigsetjmp.S +++ b/lib/libc/powerpc/gen/sigsetjmp.S @@ -71,12 +71,54 @@ ENTRY(sigsetjmp) mr %r10,%r1 mr %r9,%r2 stmw %r9,20(%r6) + + /* FPRs */ + stfd %f14,92+0*8(%r6) + stfd %f15,92+1*8(%r6) + stfd %f16,92+2*8(%r6) + stfd %f17,92+3*8(%r6) + stfd %f18,92+4*8(%r6) + stfd %f19,92+5*8(%r6) + stfd %f20,92+6*8(%r6) + stfd %f21,92+7*8(%r6) + stfd %f22,92+8*8(%r6) + stfd %f23,92+9*8(%r6) + stfd %f24,92+10*8(%r6) + stfd %f25,92+11*8(%r6) + stfd %f26,92+12*8(%r6) + stfd %f27,92+13*8(%r6) + stfd %f28,93+13*8(%r6) + stfd %f29,93+14*8(%r6) + stfd %f30,93+15*8(%r6) + stfd %f31,93+16*8(%r6) + li %r3,0 blr END(sigsetjmp) ENTRY(siglongjmp) lmw %r9,20(%r3) + + /* FPRs */ + lfd %f14,92+0*8(%r3) + lfd %f15,92+1*8(%r3) + lfd %f16,92+2*8(%r3) + lfd %f17,92+3*8(%r3) + lfd %f18,92+4*8(%r3) + lfd %f19,92+5*8(%r3) + lfd %f20,92+6*8(%r3) + lfd %f21,92+7*8(%r3) + lfd %f22,92+8*8(%r3) + lfd %f23,92+9*8(%r3) + lfd %f24,92+10*8(%r3) + lfd %f25,92+11*8(%r3) + lfd %f26,92+12*8(%r3) + lfd %f27,92+13*8(%r3) + lfd %f28,93+13*8(%r3) + lfd %f29,93+14*8(%r3) + lfd %f30,93+15*8(%r3) + lfd %f31,93+16*8(%r3) + lwz %r7,0(%r3) mr %r6,%r4 mtlr %r11 diff --git a/lib/libc/powerpc64/gen/_setjmp.S b/lib/libc/powerpc64/gen/_setjmp.S index 207c4f7d10d9..f7689aeba350 100644 --- a/lib/libc/powerpc64/gen/_setjmp.S +++ b/lib/libc/powerpc64/gen/_setjmp.S @@ -56,23 +56,41 @@ ENTRY(_setjmp) mr %r10,%r1 mr %r9,%r2 std %r9,40 + 0*8(%r3) + stfd %f14,40 + 23*8(%r3) std %r10,40 + 1*8(%r3) + stfd %f15,40 + 24*8(%r3) std %r11,40 + 2*8(%r3) + stfd %f16,40 + 25*8(%r3) std %r12,40 + 3*8(%r3) + stfd %f17,40 + 26*8(%r3) std %r13,40 + 4*8(%r3) + stfd %f18,40 + 27*8(%r3) std %r14,40 + 5*8(%r3) + stfd %f19,40 + 28*8(%r3) std %r15,40 + 6*8(%r3) + stfd %f20,40 + 29*8(%r3) std %r16,40 + 7*8(%r3) + stfd %f21,40 + 30*8(%r3) std %r17,40 + 8*8(%r3) + stfd %f22,40 + 31*8(%r3) std %r18,40 + 9*8(%r3) + stfd %f23,40 + 32*8(%r3) std %r19,40 + 10*8(%r3) + stfd %f24,40 + 33*8(%r3) std %r20,40 + 11*8(%r3) + stfd %f25,40 + 34*8(%r3) std %r21,40 + 12*8(%r3) + stfd %f26,40 + 35*8(%r3) std %r22,40 + 13*8(%r3) + stfd %f27,40 + 36*8(%r3) std %r23,40 + 14*8(%r3) + stfd %f28,40 + 37*8(%r3) std %r24,40 + 15*8(%r3) + stfd %f29,40 + 38*8(%r3) std %r25,40 + 16*8(%r3) + stfd %f30,40 + 39*8(%r3) std %r26,40 + 17*8(%r3) + stfd %f31,40 + 40*8(%r3) std %r27,40 + 18*8(%r3) std %r28,40 + 19*8(%r3) std %r29,40 + 20*8(%r3) @@ -84,23 +102,41 @@ END(_setjmp) ENTRY(_longjmp) ld %r9,40 + 0*8(%r3) + lfd %f14,40 + 23*8(%r3) ld %r10,40 + 1*8(%r3) + lfd %f15,40 + 24*8(%r3) ld %r11,40 + 2*8(%r3) + lfd %f16,40 + 25*8(%r3) ld %r12,40 + 3*8(%r3) + lfd %f17,40 + 26*8(%r3) ld %r14,40 + 5*8(%r3) + lfd %f18,40 + 27*8(%r3) ld %r15,40 + 6*8(%r3) + lfd %f19,40 + 28*8(%r3) ld %r16,40 + 7*8(%r3) + lfd %f20,40 + 29*8(%r3) ld %r17,40 + 8*8(%r3) + lfd %f21,40 + 30*8(%r3) ld %r18,40 + 9*8(%r3) + lfd %f22,40 + 31*8(%r3) ld %r19,40 + 10*8(%r3) + lfd %f23,40 + 32*8(%r3) ld %r20,40 + 11*8(%r3) + lfd %f24,40 + 33*8(%r3) ld %r21,40 + 12*8(%r3) + lfd %f25,40 + 34*8(%r3) ld %r22,40 + 13*8(%r3) + lfd %f26,40 + 35*8(%r3) ld %r23,40 + 14*8(%r3) + lfd %f27,40 + 36*8(%r3) ld %r24,40 + 15*8(%r3) + lfd %f28,40 + 37*8(%r3) ld %r25,40 + 16*8(%r3) + lfd %f29,40 + 38*8(%r3) ld %r26,40 + 17*8(%r3) + lfd %f30,40 + 39*8(%r3) ld %r27,40 + 18*8(%r3) + lfd %f31,40 + 40*8(%r3) ld %r28,40 + 19*8(%r3) ld %r29,40 + 20*8(%r3) ld %r30,40 + 21*8(%r3) diff --git a/lib/libc/powerpc64/gen/setjmp.S b/lib/libc/powerpc64/gen/setjmp.S index 14954667b9b4..5eb395e7c30d 100644 --- a/lib/libc/powerpc64/gen/setjmp.S +++ b/lib/libc/powerpc64/gen/setjmp.S @@ -67,29 +67,49 @@ ENTRY(setjmp) mr %r9,%r2 /* r9 <- global ptr */ std %r9,40 + 0*8(%r6) + stfd %f14,40 + 23*8(%r6) std %r10,40 + 1*8(%r6) + stfd %f15,40 + 24*8(%r6) std %r11,40 + 2*8(%r6) + stfd %f16,40 + 25*8(%r6) std %r12,40 + 3*8(%r6) + stfd %f17,40 + 26*8(%r6) std %r13,40 + 4*8(%r6) + stfd %f18,40 + 27*8(%r6) std %r14,40 + 5*8(%r6) + stfd %f19,40 + 28*8(%r6) std %r15,40 + 6*8(%r6) + stfd %f20,40 + 29*8(%r6) std %r16,40 + 7*8(%r6) + stfd %f21,40 + 30*8(%r6) std %r17,40 + 8*8(%r6) + stfd %f22,40 + 31*8(%r6) std %r18,40 + 9*8(%r6) + stfd %f23,40 + 32*8(%r6) std %r19,40 + 10*8(%r6) + stfd %f24,40 + 33*8(%r6) std %r20,40 + 11*8(%r6) + stfd %f25,40 + 34*8(%r6) std %r21,40 + 12*8(%r6) + stfd %f26,40 + 35*8(%r6) std %r22,40 + 13*8(%r6) + stfd %f27,40 + 36*8(%r6) std %r23,40 + 14*8(%r6) + stfd %f28,40 + 37*8(%r6) std %r24,40 + 15*8(%r6) + stfd %f29,40 + 38*8(%r6) std %r25,40 + 16*8(%r6) + stfd %f30,40 + 39*8(%r6) std %r26,40 + 17*8(%r6) + stfd %f31,40 + 40*8(%r6) std %r27,40 + 18*8(%r6) std %r28,40 + 19*8(%r6) std %r29,40 + 20*8(%r6) std %r30,40 + 21*8(%r6) std %r31,40 + 22*8(%r6) + /* XXX Altivec regs */ + li %r3,0 /* return (0) */ blr END(setjmp) @@ -97,23 +117,41 @@ END(setjmp) WEAK_REFERENCE(__longjmp, longjmp) ENTRY(__longjmp) ld %r9,40 + 0*8(%r3) + lfd %f14,40 + 23*8(%r3) ld %r10,40 + 1*8(%r3) + lfd %f15,40 + 24*8(%r3) ld %r11,40 + 2*8(%r3) + lfd %f16,40 + 25*8(%r3) ld %r12,40 + 3*8(%r3) + lfd %f17,40 + 26*8(%r3) ld %r14,40 + 5*8(%r3) + lfd %f18,40 + 27*8(%r3) ld %r15,40 + 6*8(%r3) + lfd %f19,40 + 28*8(%r3) ld %r16,40 + 7*8(%r3) + lfd %f20,40 + 29*8(%r3) ld %r17,40 + 8*8(%r3) + lfd %f21,40 + 30*8(%r3) ld %r18,40 + 9*8(%r3) + lfd %f22,40 + 31*8(%r3) ld %r19,40 + 10*8(%r3) + lfd %f23,40 + 32*8(%r3) ld %r20,40 + 11*8(%r3) + lfd %f24,40 + 33*8(%r3) ld %r21,40 + 12*8(%r3) + lfd %f25,40 + 34*8(%r3) ld %r22,40 + 13*8(%r3) + lfd %f26,40 + 35*8(%r3) ld %r23,40 + 14*8(%r3) + lfd %f27,40 + 36*8(%r3) ld %r24,40 + 15*8(%r3) + lfd %f28,40 + 37*8(%r3) ld %r25,40 + 16*8(%r3) + lfd %f29,40 + 38*8(%r3) ld %r26,40 + 17*8(%r3) + lfd %f30,40 + 39*8(%r3) ld %r27,40 + 18*8(%r3) + lfd %f31,40 + 40*8(%r3) ld %r28,40 + 19*8(%r3) ld %r29,40 + 20*8(%r3) ld %r30,40 + 21*8(%r3) diff --git a/lib/libc/powerpc64/gen/sigsetjmp.S b/lib/libc/powerpc64/gen/sigsetjmp.S index 5cfd684d38da..c0648a632f1e 100644 --- a/lib/libc/powerpc64/gen/sigsetjmp.S +++ b/lib/libc/powerpc64/gen/sigsetjmp.S @@ -72,23 +72,41 @@ ENTRY(sigsetjmp) mr %r9,%r2 std %r9,40 + 0*8(%r6) + stfd %f14,40 + 23*8(%r6) std %r10,40 + 1*8(%r6) + stfd %f15,40 + 24*8(%r6) std %r11,40 + 2*8(%r6) + stfd %f16,40 + 25*8(%r6) std %r12,40 + 3*8(%r6) + stfd %f17,40 + 26*8(%r6) std %r13,40 + 4*8(%r6) + stfd %f18,40 + 27*8(%r6) std %r14,40 + 5*8(%r6) + stfd %f19,40 + 28*8(%r6) std %r15,40 + 6*8(%r6) + stfd %f20,40 + 29*8(%r6) std %r16,40 + 7*8(%r6) + stfd %f21,40 + 30*8(%r6) std %r17,40 + 8*8(%r6) + stfd %f22,40 + 31*8(%r6) std %r18,40 + 9*8(%r6) + stfd %f23,40 + 32*8(%r6) std %r19,40 + 10*8(%r6) + stfd %f24,40 + 33*8(%r6) std %r20,40 + 11*8(%r6) + stfd %f25,40 + 34*8(%r6) std %r21,40 + 12*8(%r6) + stfd %f26,40 + 35*8(%r6) std %r22,40 + 13*8(%r6) + stfd %f27,40 + 36*8(%r6) std %r23,40 + 14*8(%r6) + stfd %f28,40 + 37*8(%r6) std %r24,40 + 15*8(%r6) + stfd %f29,40 + 38*8(%r6) std %r25,40 + 16*8(%r6) + stfd %f30,40 + 39*8(%r6) std %r26,40 + 17*8(%r6) + stfd %f31,40 + 40*8(%r6) std %r27,40 + 18*8(%r6) std %r28,40 + 19*8(%r6) std %r29,40 + 20*8(%r6) @@ -101,23 +119,41 @@ END(sigsetjmp) ENTRY(siglongjmp) ld %r9,40 + 0*8(%r3) + lfd %f14,40 + 23*8(%r3) ld %r10,40 + 1*8(%r3) + lfd %f15,40 + 24*8(%r3) ld %r11,40 + 2*8(%r3) + lfd %f16,40 + 25*8(%r3) ld %r12,40 + 3*8(%r3) + lfd %f17,40 + 26*8(%r3) ld %r14,40 + 5*8(%r3) + lfd %f18,40 + 27*8(%r3) ld %r15,40 + 6*8(%r3) + lfd %f19,40 + 28*8(%r3) ld %r16,40 + 7*8(%r3) + lfd %f20,40 + 29*8(%r3) ld %r17,40 + 8*8(%r3) + lfd %f21,40 + 30*8(%r3) ld %r18,40 + 9*8(%r3) + lfd %f22,40 + 31*8(%r3) ld %r19,40 + 10*8(%r3) + lfd %f23,40 + 32*8(%r3) ld %r20,40 + 11*8(%r3) + lfd %f24,40 + 33*8(%r3) ld %r21,40 + 12*8(%r3) + lfd %f25,40 + 34*8(%r3) ld %r22,40 + 13*8(%r3) + lfd %f26,40 + 35*8(%r3) ld %r23,40 + 14*8(%r3) + lfd %f27,40 + 36*8(%r3) ld %r24,40 + 15*8(%r3) + lfd %f28,40 + 37*8(%r3) ld %r25,40 + 16*8(%r3) + lfd %f29,40 + 38*8(%r3) ld %r26,40 + 17*8(%r3) + lfd %f30,40 + 39*8(%r3) ld %r27,40 + 18*8(%r3) + lfd %f31,40 + 40*8(%r3) ld %r28,40 + 19*8(%r3) ld %r29,40 + 20*8(%r3) ld %r30,40 + 21*8(%r3) diff --git a/lib/libgpio/gpio.3 b/lib/libgpio/gpio.3 index 12b2d1eeed1f..70f6b38c0622 100644 --- a/lib/libgpio/gpio.3 +++ b/lib/libgpio/gpio.3 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd November 17, 2014 +.Dd March 8, 2015 .Dt GPIO 3 .Os .Sh NAME @@ -43,41 +43,43 @@ .Ft void .Fn gpio_close "gpio_handle_t handle" .Ft int -.Fn gpio_pin_list "gpio_handle_t handle, gpio_config_t **pcfgs" +.Fn gpio_pin_list "gpio_handle_t handle" "gpio_config_t **pcfgs" .Ft int -.Fn gpio_pin_config "gpio_handle_t handle, gpio_config *cfg" +.Fn gpio_pin_config "gpio_handle_t handle" "gpio_config_t *cfg" .Ft int -.Fn gpio_pin_set_flags "gpio_handle_t handle, gpio_config_t *cfg" +.Fn gpio_pin_set_name "gpio_handle_t handle" "gpio_pin_t pin" "char *name" +.Ft int +.Fn gpio_pin_set_flags "gpio_handle_t handle" "gpio_config_t *cfg" .Ft gpio_value_t -.Fn gpio_pin_get "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_get "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_set "gpio_handle_t handle, gpio_pin_t pin, gpio_value_t value" +.Fn gpio_pin_set "gpio_handle_t handle" "gpio_pin_t pin" "gpio_value_t value" .Ft int -.Fn gpio_pin_toggle "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_toggle "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_low "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_low "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_high "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_high "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_input "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_input "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_output "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_output "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_opendrain "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_opendrain "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_pushpull "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_pushpull "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_tristate "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_tristate "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_pullup "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_pullup "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_pulldown "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_pulldown "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_invin "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_invin "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_invout "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_invout "gpio_handle_t handle" "gpio_pin_t pin" .Ft int -.Fn gpio_pin_pulsate "gpio_handle_t handle, gpio_pin_t pin" +.Fn gpio_pin_pulsate "gpio_handle_t handle" "gpio_pin_t pin" .Sh DESCRIPTION The .Nm libgpio @@ -99,7 +101,7 @@ This function takes a pointer to a which is dynamically allocated. This pointer should be freed with .Xr free 3 -when it's no longer necessary. +when it is no longer necessary. .Pp The function .Fn gpio_pin_config @@ -111,6 +113,10 @@ variable which is part of the structure. .Pp The function +.Fn gpio_pin_set_name +sets the name used to describe a pin. +.Pp +The function .Fn gpio_pin_set_flags configures a pin with the flags passed in by the .Ft gpio_config_t diff --git a/lib/libgpio/gpio.c b/lib/libgpio/gpio.c index 8eb68712057a..7ec0955b096b 100644 --- a/lib/libgpio/gpio.c +++ b/lib/libgpio/gpio.c @@ -118,6 +118,22 @@ gpio_pin_config(gpio_handle_t handle, gpio_config_t *cfg) return (0); } +int +gpio_pin_set_name(gpio_handle_t handle, gpio_pin_t pin, char *name) +{ + struct gpio_pin gppin; + + if (name == NULL) + return (-1); + bzero(&gppin, sizeof(gppin)); + gppin.gp_pin = pin; + strlcpy(gppin.gp_name, name, GPIOMAXNAME); + if (ioctl(handle, GPIOSETNAME, &gppin) < 0) + return (-1); + + return (0); +} + int gpio_pin_set_flags(gpio_handle_t handle, gpio_config_t *cfg) { diff --git a/lib/libgpio/libgpio.h b/lib/libgpio/libgpio.h index b7486ebc5472..a832234ca13e 100644 --- a/lib/libgpio/libgpio.h +++ b/lib/libgpio/libgpio.h @@ -70,6 +70,11 @@ int gpio_pin_list(gpio_handle_t, gpio_config_t **); * passed through the gpio_config_t structure. */ int gpio_pin_config(gpio_handle_t, gpio_config_t *); +/* + * Sets the GPIO pin name. The pin number and pin name to be set are passed + * as parameters. + */ +int gpio_pin_set_name(gpio_handle_t, gpio_pin_t, char *); /* * Sets the GPIO flags on a specific GPIO pin. The pin number and the flags * to be set are passed through the gpio_config_t structure. diff --git a/lib/libnv/tests/dnv_tests.cc b/lib/libnv/tests/dnv_tests.cc index 9d6987fa255d..ad26f38bb073 100644 --- a/lib/libnv/tests/dnv_tests.cc +++ b/lib/libnv/tests/dnv_tests.cc @@ -450,7 +450,7 @@ ATF_TEST_CASE_BODY(dnvlist_take_nvlist__empty) nvl = nvlist_create(0); actual_val = dnvlist_take_nvlist(nvl, "123", NULL); - ATF_REQUIRE_EQ(actual_val, NULL); + ATF_REQUIRE_EQ(actual_val, static_cast(NULL)); free(actual_val); nvlist_destroy(nvl); diff --git a/lib/libnv/tests/nv_tests.cc b/lib/libnv/tests/nv_tests.cc index 94d8b56d297b..bfdc97231274 100644 --- a/lib/libnv/tests/nv_tests.cc +++ b/lib/libnv/tests/nv_tests.cc @@ -54,7 +54,7 @@ ATF_TEST_CASE_BODY(nvlist_create__is_empty) ATF_REQUIRE(nvlist_empty(nvl)); it = NULL; - ATF_REQUIRE_EQ(nvlist_next(nvl, &type, &it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type, &it), static_cast(NULL)); nvlist_destroy(nvl); } @@ -85,7 +85,7 @@ ATF_TEST_CASE_BODY(nvlist_add_null__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_NULL); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(nvl); } @@ -118,7 +118,7 @@ ATF_TEST_CASE_BODY(nvlist_add_bool__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_BOOL); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(nvl); } @@ -153,7 +153,7 @@ ATF_TEST_CASE_BODY(nvlist_add_number__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_NUMBER); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(nvl); } @@ -191,7 +191,7 @@ ATF_TEST_CASE_BODY(nvlist_add_string__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_STRING); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(nvl); } @@ -237,7 +237,7 @@ ATF_TEST_CASE_BODY(nvlist_add_nvlist__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_NVLIST); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(sublist); nvlist_destroy(nvl); @@ -303,7 +303,7 @@ ATF_TEST_CASE_BODY(nvlist_add_binary__single_insert) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_BINARY); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type,&it), static_cast(NULL)); nvlist_destroy(nvl); free(value); @@ -352,7 +352,7 @@ ATF_TEST_CASE_BODY(nvlist_clone__nonempty_nvlist) it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(clone, &type, &it), key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_NUMBER); - ATF_REQUIRE_EQ(nvlist_next(clone, &type, &it), NULL); + ATF_REQUIRE_EQ(nvlist_next(clone, &type, &it), static_cast(NULL)); nvlist_destroy(clone); nvlist_destroy(nvl); @@ -400,13 +400,13 @@ verify_test_nvlist(const nvlist_t *nvl) ATF_REQUIRE_EQ(strcmp(nvlist_next(value, &type, &it), test_string_key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_STRING); - ATF_REQUIRE_EQ(nvlist_next(value, &type, &it), NULL); + ATF_REQUIRE_EQ(nvlist_next(value, &type, &it), static_cast(NULL)); it = NULL; ATF_REQUIRE_EQ(strcmp(nvlist_next(nvl, &type, &it), test_subnvlist_key), 0); ATF_REQUIRE_EQ(type, NV_TYPE_NVLIST); - ATF_REQUIRE_EQ(nvlist_next(nvl, &type, &it), NULL); + ATF_REQUIRE_EQ(nvlist_next(nvl, &type, &it), static_cast(NULL)); } ATF_TEST_CASE_WITHOUT_HEAD(nvlist_clone__nested_nvlist); diff --git a/lib/libpmc/libpmc.c b/lib/libpmc/libpmc.c index 693e97711a21..17283d8f02d4 100644 --- a/lib/libpmc/libpmc.c +++ b/lib/libpmc/libpmc.c @@ -325,7 +325,7 @@ PMC_CLASS_TABLE_DESC(core2, IAP, core2, iap); PMC_CLASS_TABLE_DESC(corei7, IAP, corei7, iap); PMC_CLASS_TABLE_DESC(nehalem_ex, IAP, nehalem_ex, iap); PMC_CLASS_TABLE_DESC(haswell, IAP, haswell, iap); -PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell, iap); +PMC_CLASS_TABLE_DESC(haswell_xeon, IAP, haswell_xeon, iap); PMC_CLASS_TABLE_DESC(ivybridge, IAP, ivybridge, iap); PMC_CLASS_TABLE_DESC(ivybridge_xeon, IAP, ivybridge_xeon, iap); PMC_CLASS_TABLE_DESC(sandybridge, IAP, sandybridge, iap); diff --git a/lib/libpmc/pmc.3 b/lib/libpmc/pmc.3 index 8e6db485ae37..f51285e12586 100644 --- a/lib/libpmc/pmc.3 +++ b/lib/libpmc/pmc.3 @@ -527,6 +527,7 @@ API is .Xr pmc.core2 3 , .Xr pmc.haswell 3 , .Xr pmc.haswelluc 3 , +.Xr pmc.haswellxeon 3 , .Xr pmc.iaf 3 , .Xr pmc.ivybridge 3 , .Xr pmc.ivybridgexeon 3 , diff --git a/lib/libpmc/pmc.haswell.3 b/lib/libpmc/pmc.haswell.3 index e535d68824db..a85f7609c9df 100644 --- a/lib/libpmc/pmc.haswell.3 +++ b/lib/libpmc/pmc.haswell.3 @@ -529,73 +529,60 @@ instruction. .It Li ILD_STALL.IQ_FULL .Pq Event 87H , Umask 04H Stall cycles due to IQ is full. -.It Li BR_INST_EXEC.COND -.Pq Event 88H , Umask 01H -Qualify conditional near branch instructions -executed, but not necessarily retired. +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. .It Li BR_INST_EXEC.DIRECT_JMP -.Pq Event 88H , Umask 02H -Qualify all unconditional near branch instructions -excluding calls and indirect branches. +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and +indirect branches. .It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 88H , Umask 04H -Qualify executed indirect near branch instructions -that are not calls nor returns. +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor +returns. .It Li BR_INST_EXEC.RETURN_NEAR -.Pq Event 88H , Umask 08H -Qualify indirect near branches that have a return -mnemonic. +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. .It Li BR_INST_EXEC.DIRECT_NEAR_CALL -.Pq Event 88H , Umask 10H -Qualify unconditional near call branch instructions, -excluding non call branch, executed. +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call +branch, executed. .It Li BR_INST_EXEC.INDIRECT_NEAR_CALL -.Pq Event 88H , Umask 20H -Qualify indirect near calls, including both register and -memory indirect, executed. -.It Li BR_INST_EXEC.NONTAKEN -.Pq Event 88H , Umask 40H -Qualify non-taken near branches executed. -.It Li BR_INST_EXEC.TAKEN -.Pq Event 88H , Umask 80H -Qualify taken near branches executed. Must combine -with 01H,02H, 04H, 08H, 10H, 20H. +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, +executed. .It Li BR_INST_EXEC.ALL_BRANCHES .Pq Event 88H , Umask FFH -Counts all near executed branches (not necessarily -retired). -.It Li BR_MISP_EXEC.COND -.Pq Event 89H , Umask 01H -Qualify conditional near branch instructions -mispredicted. +Counts all near executed branches (not necessarily retired). +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. .It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 89H , Umask 04H -Qualify mispredicted indirect near branch -instructions that are not calls nor returns. +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls +nor returns. .It Li BR_MISP_EXEC.RETURN_NEAR -.Pq Event 89H , Umask 08H -Qualify mispredicted indirect near branches that -have a return mnemonic. +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. .It Li BR_MISP_EXEC.DIRECT_NEAR_CALL -.Pq Event 89H , Umask 10H -Qualify mispredicted unconditional near call branch -instructions, excluding non call branch, executed. +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding +non call branch, executed. .It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL -.Pq Event 89H , Umask 20H -Qualify mispredicted indirect near calls, including -both register and memory indirect, executed. -.It Li BR_MISP_EXEC.NONTAKEN -.Pq Event 89H , Umask 40H -Qualify mispredicted non-taken near branches -executed. -.It Li BR_MISP_EXEC.TAKEN -.Pq Event 89H , Umask 80H -Qualify mispredicted taken near branches executed. -Must combine with 01H,02H, 04H, 08H, 10H, 20H. +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory +indirect, executed. .It Li BR_MISP_EXEC.ALL_BRANCHES .Pq Event 89H , Umask FFH -Counts all near executed branches (not necessarily -retired). +Counts all mispredicted near executed branches (not necessarily retired). .It Li IDQ_UOPS_NOT_DELIVERED.CORE .Pq Event 9CH , Umask 01H Count number of non-delivered uops to RAT per @@ -821,30 +808,24 @@ Count cases of saving new LBR records by hardware. Randomly sampled loads whose latency is above a user defined threshold. A small fraction of the overall loads are sampled due to randomization. -.It Li MEM_UOP_RETIRED.LOADS -.Pq Event D0H , Umask 01H -Qualify retired memory uops that are loads. Combine Supports PEBS and -with umask 10H, 20H, 40H, 80H. -.It Li MEM_UOP_RETIRED.STORES -.Pq Event D0H , Umask 02H -Qualify retired memory uops that are stores. -Combine with umask 10H, 20H, 40H, 80H. -.It Li MEM_UOP_RETIRED.STLB_MISS -.Pq Event D0H , Umask 10H -Qualify retired memory uops with STLB miss. Must -combine with umask 01H, 02H, to produce counts. -.It Li MEM_UOP_RETIRED.LOCK -.Pq Event D0H , Umask 20H -Qualify retired memory uops with lock. Must combine Supports PEBS and -with umask 01H, 02H, to produce counts. -.It Li MEM_UOP_RETIRED.SPLIT -.Pq Event D0H , Umask 40H -Qualify retired memory uops with line split. Must -combine with umask 01H, 02H, to produce counts. -.It Li MEM_UOP_RETIRED.ALL -.Pq Event D0H , Umask 80H -Qualify any retired memory uops. Must combine with Supports PEBS and -umask 01H, 02H, to produce counts. +.It Li MEM_UOPS_RETIRED.STLB_MISS_LOADS +.Pq Event D0H , Umask 11H +Count retired load uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.STLB_MISS_STORES +.Pq Event D0H , Umask 12H +Count retired store uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.SPLIT_LOADS +.Pq Event D0H , Umask 41H +Count retired load uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.SPLIT_STORES +.Pq Event D0H , Umask 42H +Count retired store uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.ALL_LOADS +.Pq Event D0H , Umask 81H +Count all retired load uops. +.It Li MEM_UOPS_RETIRED.ALL_STORES +.Pq Event D0H , Umask 82H +Count all retired store uops. .It Li MEM_LOAD_UOPS_RETIRED.L1_HIT .Pq Event D1H , Umask 01H Retired load uops with L1 cache hits as data sources. diff --git a/lib/libpmc/pmc.haswellxeon.3 b/lib/libpmc/pmc.haswellxeon.3 new file mode 100644 index 000000000000..8eb5b7e8018f --- /dev/null +++ b/lib/libpmc/pmc.haswellxeon.3 @@ -0,0 +1,956 @@ +.\" +.\" Copyright (c) 2013 Hiren Panchasara +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD$ +.\" +.Dd 21 November, 2014 +.Dt PMC.HASWELLXEON 3 +.Os +.Sh NAME +.Nm pmc.haswellxeon +.Nd measurement events for +.Tn Intel +.Tn Haswell Xeon +family CPUs +.Sh LIBRARY +.Lb libpmc +.Sh SYNOPSIS +.In pmc.h +.Sh DESCRIPTION +.Tn Intel +.Tn "Haswell" +CPUs contain PMCs conforming to version 2 of the +.Tn Intel +performance measurement architecture. +These CPUs may contain up to two classes of PMCs: +.Bl -tag -width "Li PMC_CLASS_IAP" +.It Li PMC_CLASS_IAF +Fixed-function counters that count only one hardware event per counter. +.It Li PMC_CLASS_IAP +Programmable counters that may be configured to count one of a defined +set of hardware events. +.El +.Pp +The number of PMCs available in each class and their widths need to be +determined at run time by calling +.Xr pmc_cpuinfo 3 . +.Pp +Intel Haswell Xeon PMCs are documented in +.Rs +.%B "Intel(R) 64 and IA-32 Architectures Software Developer's Manual" +.%T "Combined Volumes: 1, 2A, 2B, 2C, 3A, 3B and 3C" +.%N "Order Number: 325462-052US" +.%D September 2014 +.%Q "Intel Corporation" +.Re +.Ss HASWELL FIXED FUNCTION PMCS +These PMCs and their supported events are documented in +.Xr pmc.iaf 3 . +.Ss HASWELL PROGRAMMABLE PMCS +The programmable PMCs support the following capabilities: +.Bl -column "PMC_CAP_INTERRUPT" "Support" +.It Em Capability Ta Em Support +.It PMC_CAP_CASCADE Ta \&No +.It PMC_CAP_EDGE Ta Yes +.It PMC_CAP_INTERRUPT Ta Yes +.It PMC_CAP_INVERT Ta Yes +.It PMC_CAP_READ Ta Yes +.It PMC_CAP_PRECISE Ta \&No +.It PMC_CAP_SYSTEM Ta Yes +.It PMC_CAP_TAGGING Ta \&No +.It PMC_CAP_THRESHOLD Ta Yes +.It PMC_CAP_USER Ta Yes +.It PMC_CAP_WRITE Ta Yes +.El +.Ss Event Qualifiers +Event specifiers for these PMCs support the following common +qualifiers: +.Bl -tag -width indent +.It Li rsp= Ns Ar value +Configure the Off-core Response bits. +.Bl -tag -width indent +.It Li DMND_DATA_RD +Counts the number of demand and DCU prefetch data reads of full +and partial cachelines as well as demand data page table entry +cacheline reads. Does not count L2 data read prefetches or +instruction fetches. +.It Li REQ_DMND_RFO +Counts the number of demand and DCU prefetch reads for ownership (RFO) +requests generated by a write to data cacheline. Does not count L2 RFO +prefetches. +.It Li REQ_DMND_IFETCH +Counts the number of demand and DCU prefetch instruction cacheline reads. +Does not count L2 code read prefetches. +.It Li REQ_WB +Counts the number of writeback (modified to exclusive) transactions. +.It Li REQ_PF_DATA_RD +Counts the number of data cacheline reads generated by L2 prefetchers. +.It Li REQ_PF_RFO +Counts the number of RFO requests generated by L2 prefetchers. +.It Li REQ_PF_IFETCH +Counts the number of code reads generated by L2 prefetchers. +.It Li REQ_PF_LLC_DATA_RD +L2 prefetcher to L3 for loads. +.It Li REQ_PF_LLC_RFO +RFO requests generated by L2 prefetcher +.It Li REQ_PF_LLC_IFETCH +L2 prefetcher to L3 for instruction fetches. +.It Li REQ_BUS_LOCKS +Bus lock and split lock requests. +.It Li REQ_STRM_ST +Streaming store requests. +.It Li REQ_OTHER +Any other request that crosses IDI, including I/O. +.It Li RES_ANY +Catch all value for any response types. +.It Li RES_SUPPLIER_NO_SUPP +No Supplier Information available. +.It Li RES_SUPPLIER_LLC_HITM +M-state initial lookup stat in L3. +.It Li RES_SUPPLIER_LLC_HITE +E-state. +.It Li RES_SUPPLIER_LLC_HITS +S-state. +.It Li RES_SUPPLIER_LLC_HITF +F-state. +.It Li RES_SUPPLIER_LOCAL +Local DRAM Controller. +.It Li RES_SNOOP_SNP_NONE +No details on snoop-related information. +.It Li RES_SNOOP_SNP_NO_NEEDED +No snoop was needed to satisfy the request. +.It Li RES_SNOOP_SNP_MISS +A snoop was needed and it missed all snooped caches: +-For LLC Hit, ReslHitl was returned by all cores +-For LLC Miss, Rspl was returned by all sockets and data was returned from +DRAM. +.It Li RES_SNOOP_HIT_NO_FWD +A snoop was needed and it hits in at least one snooped cache. Hit denotes a +cache-line was valid before snoop effect. This includes: +-Snoop Hit w/ Invalidation (LLC Hit, RFO) +-Snoop Hit, Left Shared (LLC Hit/Miss, IFetch/Data_RD) +-Snoop Hit w/ Invalidation and No Forward (LLC Miss, RFO Hit S) +In the LLC Miss case, data is returned from DRAM. +.It Li RES_SNOOP_HIT_FWD +A snoop was needed and data was forwarded from a remote socket. +This includes: +-Snoop Forward Clean, Left Shared (LLC Hit/Miss, IFetch/Data_RD/RFT). +.It Li RES_SNOOP_HITM +A snoop was needed and it HitM-ed in local or remote cache. HitM denotes a +cache-line was in modified state before effect as a results of snoop. This +includes: +-Snoop HitM w/ WB (LLC miss, IFetch/Data_RD) +-Snoop Forward Modified w/ Invalidation (LLC Hit/Miss, RFO) +-Snoop MtoS (LLC Hit, IFetch/Data_RD). +.It Li RES_NON_DRAM +Target was non-DRAM system address. This includes MMIO transactions. +.El +.It Li cmask= Ns Ar value +Configure the PMC to increment only if the number of configured +events measured in a cycle is greater than or equal to +.Ar value . +.It Li edge +Configure the PMC to count the number of de-asserted to asserted +transitions of the conditions expressed by the other qualifiers. +If specified, the counter will increment only once whenever a +condition becomes true, irrespective of the number of clocks during +which the condition remains true. +.It Li inv +Invert the sense of comparison when the +.Dq Li cmask +qualifier is present, making the counter increment when the number of +events per cycle is less than the value specified by the +.Dq Li cmask +qualifier. +.It Li os +Configure the PMC to count events happening at processor privilege +level 0. +.It Li usr +Configure the PMC to count events occurring at privilege levels 1, 2 +or 3. +.El +.Pp +If neither of the +.Dq Li os +or +.Dq Li usr +qualifiers are specified, the default is to enable both. +.Ss Event Specifiers (Programmable PMCs) +Haswell programmable PMCs support the following events: +.Bl -tag -width indent +.It Li LD_BLOCKS.STORE_FORWARD +.Pq Event 03H , Umask 02H +Loads blocked by overlapping with store buffer that +cannot be forwarded. +.It Li MISALIGN_MEM_REF.LOADS +.Pq Event 05H , Umask 01H +Speculative cache-line split load uops dispatched to +L1D. +.It Li MISALIGN_MEM_REF.STORES +.Pq Event 05H , Umask 02H +Speculative cache-line split Store-address uops +dispatched to L1D. +.It Li LD_BLOCKS_PARTIAL.ADDRESS_ALIAS +.Pq Event 07H , Umask 01H +False dependencies in MOB due to partial compare +on address. +.It Li DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK +.Pq Event 08H , Umask 01H +Misses in all TLB levels that cause a page walk of any +page size. +.It Li DTLB_LOAD_MISSES.WALK_COMPLETED_4K +.Pq Event 08H , Umask 02H +Completed page walks due to demand load misses +that caused 4K page walks in any TLB levels. +.It Li DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4K +.Pq Event 08H , Umask 02H +Completed page walks due to demand load misses +that caused 2M/4M page walks in any TLB levels. +.It Li DTLB_LOAD_MISSES.WALK_COMPLETED +.Pq Event 08H , Umask 0EH +Completed page walks in any TLB of any page size +due to demand load misses +.It Li DTLB_LOAD_MISSES.WALK_DURATION +.Pq Event 08H , Umask 10H +Cycle PMH is busy with a walk. +.It Li DTLB_LOAD_MISSES.STLB_HIT_4K +.Pq Event 08H , Umask 20H +Load misses that missed DTLB but hit STLB (4K). +.It Li DTLB_LOAD_MISSES.STLB_HIT_2M +.Pq Event 08H , Umask 40H +Load misses that missed DTLB but hit STLB (2M). +.It Li DTLB_LOAD_MISSES.STLB_HIT +.Pq Event 08H , Umask 60H +Number of cache load STLB hits. No page walk. +.It Li DTLB_LOAD_MISSES.PDE_CACHE_MISS +.Pq Event 08H , Umask 80H +DTLB demand load misses with low part of linear-to- +physical address translation missed +.It Li INT_MISC.RECOVERY_CYCLES +.Pq Event 0DH , Umask 03H +Cycles waiting to recover after Machine Clears +except JEClear. Set Cmask= 1. +.It Li UOPS_ISSUED.ANY +.Pq Event 0EH , Umask 01H +ncrements each cycle the # of Uops issued by the +RAT to RS. +Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles +of this core. +.It Li UOPS_ISSUED.FLAGS_MERGE +.Pq Event 0EH , Umask 10H +Number of flags-merge uops allocated. Such uops +adds delay. +.It Li UOPS_ISSUED.SLOW_LEA +.Pq Event 0EH , Umask 20H +Number of slow LEA or similar uops allocated. Such +uop has 3 sources (e.g. 2 sources + immediate) +regardless if as a result of LEA instruction or not. +.It Li UOPS_ISSUED.SiNGLE_MUL +.Pq Event 0EH , Umask 40H +Number of multiply packed/scalar single precision +uops allocated. +.It Li L2_RQSTS.DEMAND_DATA_RD_MISS +.Pq Event 24H , Umask 21H +Demand Data Read requests that missed L2, no +rejects. +.It Li L2_RQSTS.DEMAND_DATA_RD_HIT +.Pq Event 24H , Umask 41H +Demand Data Read requests that hit L2 cache. +.It Li L2_RQSTS.ALL_DEMAND_DATA_RD +.Pq Event 24H , Umask E1H +Counts any demand and L1 HW prefetch data load +requests to L2. +.It Li L2_RQSTS.RFO_HIT +.Pq Event 24H , Umask 42H +Counts the number of store RFO requests that hit +the L2 cache. +.It Li L2_RQSTS.RFO_MISS +.Pq Event 24H , Umask 22H +Counts the number of store RFO requests that miss +the L2 cache. +.It Li L2_RQSTS.ALL_RFO +.Pq Event 24H , Umask E2H +Counts all L2 store RFO requests. +.It Li L2_RQSTS.CODE_RD_HIT +.Pq Event 24H , Umask 44H +Number of instruction fetches that hit the L2 cache. +.It Li L2_RQSTS.CODE_RD_MISS +.Pq Event 24H , Umask 24H +Number of instruction fetches that missed the L2 +cache. +.It Li L2_RQSTS.ALL_DEMAND_MISS +.Pq Event 24H , Umask 27H +Demand requests that miss L2 cache. +.It Li L2_RQSTS.ALL_DEMAND_REFERENCES +.Pq Event 24H , Umask E7H +Demand requests to L2 cache. +.It Li L2_RQSTS.ALL_CODE_RD +.Pq Event 24H , Umask E4H +Counts all L2 code requests. +.It Li L2_RQSTS.L2_PF_HIT +.Pq Event 24H , Umask 50H +Counts all L2 HW prefetcher requests that hit L2. +.It Li L2_RQSTS.L2_PF_MISS +.Pq Event 24H , Umask 30H +Counts all L2 HW prefetcher requests that missed +L2. +.It Li L2_RQSTS.ALL_PF +.Pq Event 24H , Umask F8H +Counts all L2 HW prefetcher requests. +.It Li L2_RQSTS.MISS +.Pq Event 24H , Umask 3FH +All requests that missed L2. +.It Li L2_RQSTS.REFERENCES +.Pq Event 24H , Umask FFH +All requests to L2 cache. +.It Li L2_DEMAND_RQSTS.WB_HIT +.Pq Event 27H , Umask 50H +Not rejected writebacks that hit L2 cache +.It Li LONGEST_LAT_CACHE.REFERENCE +.Pq Event 2EH , Umask 4FH +This event counts requests originating from the core +that reference a cache line in the last level cache. +.It Li LONGEST_LAT_CACHE.MISS +.Pq Event 2EH , Umask 41H +This event counts each cache miss condition for +references to the last level cache. +.It Li CPU_CLK_UNHALTED.THREAD_P +.Pq Event 3CH , Umask 00H +Counts the number of thread cycles while the thread +is not in a halt state. The thread enters the halt state +when it is running the HLT instruction. The core +frequency may change from time to time due to +power or thermal throttling. +.It Li CPU_CLK_THREAD_UNHALTED.REF_XCLK +.Pq Event 3CH , Umask 01H +Increments at the frequency of XCLK (100 MHz) +when not halted. +.It Li L1D_PEND_MISS.PENDING +.Pq Event 48H , Umask 01H +Increments the number of outstanding L1D misses +every cycle. Set Cmaks = 1 and Edge =1 to count +occurrences. +.It Li DTLB_STORE_MISSES.MISS_CAUSES_A_WALK +.Pq Event 49H , Umask 01H +Miss in all TLB levels causes an page walk of any +page size (4K/2M/4M/1G). +.It Li DTLB_STORE_MISSES.WALK_COMPLETED_4K +.Pq Event 49H , Umask 02H +Completed page walks due to store misses in one or +more TLB levels of 4K page structure. +.It Li DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M +.Pq Event 49H , Umask 04H +Completed page walks due to store misses in one or +more TLB levels of 2M/4M page structure. +.It Li DTLB_STORE_MISSES.WALK_COMPLETED +.Pq Event 49H , Umask 0EH +Completed page walks due to store miss in any TLB +levels of any page size (4K/2M/4M/1G). +.It Li DTLB_STORE_MISSES.WALK_DURATION +.Pq Event 49H , Umask 10H +Cycles PMH is busy with this walk. +.It Li DTLB_STORE_MISSES.STLB_HIT_4K +.Pq Event 49H , Umask 20H +Store misses that missed DTLB but hit STLB (4K). +.It Li DTLB_STORE_MISSES.STLB_HIT_2M +.Pq Event 49H , Umask 40H +Store misses that missed DTLB but hit STLB (2M). +.It Li DTLB_STORE_MISSES.STLB_HIT +.Pq Event 49H , Umask 60H +Store operations that miss the first TLB level but hit +the second and do not cause page walks. +.It Li DTLB_STORE_MISSES.PDE_CACHE_MISS +.Pq Event 49H , Umask 80H +DTLB store misses with low part of linear-to-physical +address translation missed. +.It Li LOAD_HIT_PRE.SW_PF +.Pq Event 4CH , Umask 01H +Non-SW-prefetch load dispatches that hit fill buffer +allocated for S/W prefetch. +.It Li LOAD_HIT_PRE.HW_PF +.Pq Event 4CH , Umask 02H +Non-SW-prefetch load dispatches that hit fill buffer +allocated for H/W prefetch. +.It Li L1D.REPLACEMENT +.Pq Event 51H , Umask 01H +Counts the number of lines brought into the L1 data +cache. +.It Li MOVE_ELIMINATION.INT_NOT_ELIMINATED +.Pq Event 58H , Umask 04H +Number of integer Move Elimination candidate uops +that were not eliminated. +.It Li MOVE_ELIMINATION.SMID_NOT_ELIMINATED +.Pq Event 58H , Umask 08H +Number of SIMD Move Elimination candidate uops +that were not eliminated. +.It Li MOVE_ELIMINATION.INT_ELIMINATED +.Pq Event 58H , Umask 01H +Unhalted core cycles when the thread is in ring 0. +.It Li MOVE_ELIMINATION.SMID_ELIMINATED +.Pq Event 58H , Umask 02H +Number of SIMD Move Elimination candidate uops +that were eliminated. +.It Li CPL_CYCLES.RING0 +.Pq Event 5CH , Umask 02H +Unhalted core cycles when the thread is in ring 0. +.It Li CPL_CYCLES.RING123 +.Pq Event 5CH , Umask 01H +Unhalted core cycles when the thread is not in ring 0. +.It Li RS_EVENTS.EMPTY_CYCLES +.Pq Event 5EH , Umask 01H +Cycles the RS is empty for the thread. +.It Li OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD +.Pq Event 60H , Umask 01H +Offcore outstanding Demand Data Read transactions +in SQ to uncore. Set Cmask=1 to count cycles. +.It Li OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CORE_RD +.Pq Event 60H , Umask 02H +Offcore outstanding Demand code Read transactions +in SQ to uncore. Set Cmask=1 to count cycles. +.It Li OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO +.Pq Event 60H , Umask 04H +Offcore outstanding RFO store transactions in SQ to +uncore. Set Cmask=1 to count cycles. +.It Li OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD +.Pq Event 60H , Umask 08H +Offcore outstanding cacheable data read +transactions in SQ to uncore. Set Cmask=1 to count +cycles. +.It Li LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION +.Pq Event 63H , Umask 01H +Cycles in which the L1D and L2 are locked, due to a +UC lock or split lock. +.It Li LOCK_CYCLES.CACHE_LOCK_DURATION +.Pq Event 63H , Umask 02H +Cycles in which the L1D is locked. +.It Li IDQ.EMPTY +.Pq Event 79H , Umask 02H +Counts cycles the IDQ is empty. +.It Li IDQ.MITE_UOPS +.Pq Event 79H , Umask 04H +Increment each cycle # of uops delivered to IDQ from +MITE path. +Set Cmask = 1 to count cycles. +.It Li IDQ.DSB_UOPS +.Pq Event 79H , Umask 08H +Increment each cycle. # of uops delivered to IDQ +from DSB path. +Set Cmask = 1 to count cycles. +.It Li IDQ.MS_DSB_UOPS +.Pq Event 79H , Umask 10H +Increment each cycle # of uops delivered to IDQ +when MS_busy by DSB. Set Cmask = 1 to count +cycles. Add Edge=1 to count # of delivery. +.It Li IDQ.MS_MITE_UOPS +.Pq Event 79H , Umask 20H +ncrement each cycle # of uops delivered to IDQ +when MS_busy by MITE. Set Cmask = 1 to count +cycles. +.It Li IDQ.MS_UOPS +.Pq Event 79H , Umask 30H +Increment each cycle # of uops delivered to IDQ from +MS by either DSB or MITE. Set Cmask = 1 to count +cycles. +.It Li IDQ.ALL_DSB_CYCLES_ANY_UOPS +.Pq Event 79H , Umask 18H +Counts cycles DSB is delivered at least one uops. Set +Cmask = 1. +.It Li IDQ.ALL_DSB_CYCLES_4_UOPS +.Pq Event 79H , Umask 18H +Counts cycles DSB is delivered four uops. Set Cmask +=4. +.It Li IDQ.ALL_MITE_CYCLES_ANY_UOPS +.Pq Event 79H , Umask 24H +Counts cycles MITE is delivered at least one uops. Set +Cmask = 1. +.It Li IDQ.ALL_MITE_CYCLES_4_UOPS +.Pq Event 79H , Umask 24H +Counts cycles MITE is delivered four uops. Set Cmask +=4. +.It Li IDQ.MITE_ALL_UOPS +.Pq Event 79H , Umask 3CH +# of uops delivered to IDQ from any path. +.It Li ICACHE.MISSES +.Pq Event 80H , Umask 02H +Number of Instruction Cache, Streaming Buffer and +Victim Cache Misses. Includes UC accesses. +.It Li ITLB_MISSES.MISS_CAUSES_A_WALK +.Pq Event 85H , Umask 01H +Misses in ITLB that causes a page walk of any page +size. +.It Li ITLB_MISSES.WALK_COMPLETED_4K +.Pq Event 85H , Umask 02H +Completed page walks due to misses in ITLB 4K page +entries. +.It Li TLB_MISSES.WALK_COMPLETED_2M_4M +.Pq Event 85H , Umask 04H +Completed page walks due to misses in ITLB 2M/4M +page entries. +.It Li ITLB_MISSES.WALK_COMPLETED +.Pq Event 85H , Umask 0EH +Completed page walks in ITLB of any page size. +.It Li ITLB_MISSES.WALK_DURATION +.Pq Event 85H , Umask 10H +Cycle PMH is busy with a walk. +.It Li ITLB_MISSES.STLB_HIT_4K +.Pq Event 85H , Umask 20H +ITLB misses that hit STLB (4K). +.It Li ITLB_MISSES.STLB_HIT_2M +.Pq Event 85H , Umask 40H +ITLB misses that hit STLB (2K). +.It Li ITLB_MISSES.STLB_HIT +.Pq Event 85H , Umask 60H +TLB misses that hit STLB. No page walk. +.It Li ILD_STALL.LCP +.Pq Event 87H , Umask 01H +Stalls caused by changing prefix length of the +instruction. +.It Li ILD_STALL.IQ_FULL +.Pq Event 87H , Umask 04H +Stall cycles due to IQ is full. +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. +.It Li BR_INST_EXEC.DIRECT_JMP +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and +indirect branches. +.It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor +returns. +.It Li BR_INST_EXEC.RETURN_NEAR +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. +.It Li BR_INST_EXEC.DIRECT_NEAR_CALL +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call +branch, executed. +.It Li BR_INST_EXEC.INDIRECT_NEAR_CALL +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, +executed. +.It Li BR_INST_EXEC.ALL_BRANCHES +.Pq Event 88H , Umask FFH +Counts all near executed branches (not necessarily retired). +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. +.It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls +nor returns. +.It Li BR_MISP_EXEC.RETURN_NEAR +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. +.It Li BR_MISP_EXEC.DIRECT_NEAR_CALL +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding +non call branch, executed. +.It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory +indirect, executed. +.It Li BR_MISP_EXEC.ALL_BRANCHES +.Pq Event 89H , Umask FFH +Counts all mispredicted near executed branches (not necessarily retired). +.It Li IDQ_UOPS_NOT_DELIVERED.CORE +.Pq Event 9CH , Umask 01H +Count number of non-delivered uops to RAT per +thread. +.It Li UOPS_EXECUTED_PORT.PORT_0 +.Pq Event A1H , Umask 01H +Cycles which a Uop is dispatched on port 0 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_1 +.Pq Event A1H , Umask 02H +Cycles which a Uop is dispatched on port 1 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_2 +.Pq Event A1H , Umask 04H +Cycles which a Uop is dispatched on port 2 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_3 +.Pq Event A1H , Umask 08H +Cycles which a Uop is dispatched on port 3 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_4 +.Pq Event A1H , Umask 10H +Cycles which a Uop is dispatched on port 4 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_5 +.Pq Event A1H , Umask 20H +Cycles which a Uop is dispatched on port 5 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_6 +.Pq Event A1H , Umask 40H +Cycles which a Uop is dispatched on port 6 in this +thread. +.It Li UOPS_EXECUTED_PORT.PORT_7 +.Pq Event A1H , Umask 80H +Cycles which a Uop is dispatched on port 7 in this +thread. +.It Li RESOURCE_STALLS.ANY +.Pq Event A2H , Umask 01H +Cycles Allocation is stalled due to Resource Related +reason. +.It Li RESOURCE_STALLS.RS +.Pq Event A2H , Umask 04H +Cycles stalled due to no eligible RS entry available. +.It Li RESOURCE_STALLS.SB +.Pq Event A2H , Umask 08H +Cycles stalled due to no store buffers available (not +including draining form sync). +.It Li RESOURCE_STALLS.ROB +.Pq Event A2H , Umask 10H +Cycles stalled due to re-order buffer full. +.It Li CYCLE_ACTIVITY.CYCLES_L2_PENDING +.Pq Event A3H , Umask 01H +Cycles with pending L2 miss loads. Set Cmask=2 to +count cycle. +.It Li CYCLE_ACTIVITY.CYCLES_LDM_PENDING +.Pq Event A3H , Umask 02H +Cycles with pending memory loads. Set Cmask=2 to +count cycle. +.It Li CYCLE_ACTIVITY.STALLS_L2_PENDING +.Pq Event A3H , Umask 05H +Number of loads missed L2. +.It Li CYCLE_ACTIVITY.CYCLES_L1D_PENDING +.Pq Event A3H , Umask 08H +Cycles with pending L1 cache miss loads. Set +Cmask=8 to count cycle. +.It Li ITLB.ITLB_FLUSH +.Pq Event AEH , Umask 01H +Counts the number of ITLB flushes, includes +4k/2M/4M pages. +.It Li OFFCORE_REQUESTS.DEMAND_DATA_RD +.Pq Event B0H , Umask 01H +Demand data read requests sent to uncore. +.It Li OFFCORE_REQUESTS.DEMAND_CODE_RD +.Pq Event B0H , Umask 02H +Demand code read requests sent to uncore. +.It Li OFFCORE_REQUESTS.DEMAND_RFO +.Pq Event B0H , Umask 04H +Demand RFO read requests sent to uncore, including +regular RFOs, locks, ItoM. +.It Li OFFCORE_REQUESTS.ALL_DATA_RD +.Pq Event B0H , Umask 08H +Data read requests sent to uncore (demand and +prefetch). +.It Li UOPS_EXECUTED.CORE +.Pq Event B1H , Umask 02H +Counts total number of uops to be executed per-core +each cycle. +.It Li OFF_CORE_RESPONSE_0 +.Pq Event B7H , Umask 01H +Requires MSR 01A6H +.It Li OFF_CORE_RESPONSE_1 +.Pq Event BBH , Umask 01H +Requires MSR 01A7H +.It Li PAGE_WALKER_LOADS.DTLB_L1 +.Pq Event BCH , Umask 11H +Number of DTLB page walker loads that hit in the +L1+FB. +.It Li PAGE_WALKER_LOADS.ITLB_L1 +.Pq Event BCH , Umask 21H +Number of ITLB page walker loads that hit in the +L1+FB. +.It Li PAGE_WALKER_LOADS.DTLB_L2 +.Pq Event BCH , Umask 12H +Number of DTLB page walker loads that hit in the L2. +.It Li PAGE_WALKER_LOADS.ITLB_L2 +.Pq Event BCH , Umask 22H +Number of ITLB page walker loads that hit in the L2. +.It Li PAGE_WALKER_LOADS.DTLB_L3 +.Pq Event BCH , Umask 14H +Number of DTLB page walker loads that hit in the L3. +.It Li PAGE_WALKER_LOADS.ITLB_L3 +.Pq Event BCH , Umask 24H +Number of ITLB page walker loads that hit in the L3. +.It Li PAGE_WALKER_LOADS.DTLB_MEMORY +.Pq Event BCH , Umask 18H +Number of DTLB page walker loads from memory. +.It Li PAGE_WALKER_LOADS.ITLB_MEMORY +.Pq Event BCH , Umask 28H +Number of ITLB page walker loads from memory. +.It Li TLB_FLUSH.DTLB_THREAD +.Pq Event BDH , Umask 01H +DTLB flush attempts of the thread-specific entries. +.It Li TLB_FLUSH.STLB_ANY +.Pq Event BDH , Umask 20H +Count number of STLB flush attempts. +.It Li INST_RETIRED.ANY_P +.Pq Event C0H , Umask 00H +Number of instructions at retirement. +.It Li INST_RETIRED.ALL +.Pq Event C0H , Umask 01H +Precise instruction retired event with HW to reduce +effect of PEBS shadow in IP distribution. +.It Li OTHER_ASSISTS.AVX_TO_SSE +.Pq Event C1H , Umask 08H +Number of transitions from AVX-256 to legacy SSE +when penalty applicable. +.It Li OTHER_ASSISTS.SSE_TO_AVX +.Pq Event C1H , Umask 10H +Number of transitions from SSE to AVX-256 when +penalty applicable. +.It Li OTHER_ASSISTS.ANY_WB_ASSIST +.Pq Event C1H , Umask 40H +Number of microcode assists invoked by HW upon +uop writeback. +.It Li UOPS_RETIRED.ALL +.Pq Event C2H , Umask 01H +Counts the number of micro-ops retired, Use +cmask=1 and invert to count active cycles or stalled +cycles. +.It Li UOPS_RETIRED.RETIRE_SLOTS +.Pq Event C2H , Umask 02H +Counts the number of retirement slots used each +cycle. +.It Li MACHINE_CLEARS.MEMORY_ORDERING +.Pq Event C3H , Umask 02H +Counts the number of machine clears due to memory +order conflicts. +.It Li MACHINE_CLEARS.SMC +.Pq Event C3H , Umask 04H +Number of self-modifying-code machine clears +detected. +.It Li MACHINE_CLEARS.MASKMOV +.Pq Event C3H , Umask 20H +Counts the number of executed AVX masked load +operations that refer to an illegal address range with +the mask bits set to 0. +.It Li BR_INST_RETIRED.ALL_BRANCHES +.Pq Event C4H , Umask 00H +Branch instructions at retirement. +.It Li BR_INST_RETIRED.CONDITIONAL +.Pq Event C4H , Umask 01H +Counts the number of conditional branch instructions Supports PEBS +retired. +.It Li BR_INST_RETIRED.NEAR_CALL +.Pq Event C4H , Umask 02H +Direct and indirect near call instructions retired. +.It Li BR_INST_RETIRED.ALL_BRANCHES +.Pq Event C4H , Umask 04H +Counts the number of branch instructions retired. +.It Li BR_INST_RETIRED.NEAR_RETURN +.Pq Event C4H , Umask 08H +Counts the number of near return instructions +retired. +.It Li BR_INST_RETIRED.NOT_TAKEN +.Pq Event C4H , Umask 10H +Counts the number of not taken branch instructions +retired. + It Li BR_INST_RETIRED.NEAR_TAKEN +.Pq Event C4H , Umask 20H +Number of near taken branches retired. +.It Li BR_INST_RETIRED.FAR_BRANCH +.Pq Event C4H , Umask 40H +Number of far branches retired. +.It Li BR_MISP_RETIRED.ALL_BRANCHES +.Pq Event C5H , Umask 00H +Mispredicted branch instructions at retirement +.It Li BR_MISP_RETIRED.CONDITIONAL +.Pq Event C5H , Umask 01H +Mispredicted conditional branch instructions retired. +.It Li BR_MISP_RETIRED.CONDITIONAL +.Pq Event C5H , Umask 04H +Mispredicted macro branch instructions retired. +.It Li FP_ASSIST.X87_OUTPUT +.Pq Event CAH , Umask 02H +Number of X87 FP assists due to Output values. +.It Li FP_ASSIST.X87_INPUT +.Pq Event CAH , Umask 04H +Number of X87 FP assists due to input values. +.It Li FP_ASSIST.SIMD_OUTPUT +.Pq Event CAH , Umask 08H +Number of SIMD FP assists due to Output values. +.It Li FP_ASSIST.SIMD_INPUT +.Pq Event CAH , Umask 10H +Number of SIMD FP assists due to input values. +.It Li FP_ASSIST.ANY +.Pq Event CAH , Umask 1EH +Cycles with any input/output SSE* or FP assists. +.It Li ROB_MISC_EVENTS.LBR_INSERTS +.Pq Event CCH , Umask 20H +Count cases of saving new LBR records by hardware. +.It Li MEM_TRANS_RETIRED.LOAD_LATENCY +.Pq Event CDH , Umask 01H +Randomly sampled loads whose latency is above a +user defined threshold. A small fraction of the overall +loads are sampled due to randomization. +.It Li MEM_UOPS_RETIRED.STLB_MISS_LOADS +.Pq Event D0H , Umask 11H +Count retired load uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.STLB_MISS_STORES +.Pq Event D0H , Umask 12H +Count retired store uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.SPLIT_LOADS +.Pq Event D0H , Umask 41H +Count retired load uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.SPLIT_STORES +.Pq Event D0H , Umask 42H +Count retired store uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.ALL_LOADS +.Pq Event D0H , Umask 81H +Count all retired load uops. +.It Li MEM_UOPS_RETIRED.ALL_STORES +.Pq Event D0H , Umask 82H +Count all retired store uops. +.It Li MEM_LOAD_UOPS_RETIRED.L1_HIT +.Pq Event D1H , Umask 01H +Retired load uops with L1 cache hits as data sources. +.It Li MEM_LOAD_UOPS_RETIRED.L2_HIT +.Pq Event D1H , Umask 02H +Retired load uops with L2 cache hits as data sources. +.It Li MEM_LOAD_UOPS_RETIRED.LLC_HIT +.Pq Event D1H , Umask 04H +Retired load uops with LLC cache hits as data +sources. +.It Li MEM_LOAD_UOPS_RETIRED.L2_MISS +.Pq Event D1H , Umask 10H +Retired load uops missed L2. Unknown data source +excluded. +.It Li MEM_LOAD_UOPS_RETIRED.HIT_LFB +.Pq Event D1H , Umask 40H +Retired load uops which data sources were load uops +missed L1 but hit FB due to preceding miss to the +same cache line with data not ready. +.It Li MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS +.Pq Event D2H , Umask 01H +Retired load uops which data sources were LLC hit +and cross-core snoop missed in on-pkg core cache. +.It Li MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT +.Pq Event D2H , Umask 02H +Retired load uops which data sources were LLC and +cross-core snoop hits in on-pkg core cache. +.It Li MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM +.Pq Event D2H , Umask 04H +Retired load uops which data sources were HitM +responses from shared LLC. +.It Li MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE +.Pq Event D2H , Umask 08H +Retired load uops which data sources were hits in +LLC without snoops required. +.It Li MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM +.Pq Event D3H , Umask 01H +Retired load uops which data sources missed LLC but +serviced from local dram. +.It Li BACLEARS.ANY +.Pq Event E6H , Umask 1FH +Number of front end re-steers due to BPU +misprediction. +.It Li L2_TRANS.DEMAND_DATA_RD +.Pq Event F0H , Umask 01H +Demand Data Read requests that access L2 cache. +.It Li L2_TRANS.RFO +.Pq Event F0H , Umask 02H +RFO requests that access L2 cache. +.It Li L2_TRANS.CODE_RD +.Pq Event F0H , Umask 04H +L2 cache accesses when fetching instructions. +.It Li L2_TRANS.ALL_PF +.Pq Event F0H , Umask 08H +Any MLC or LLC HW prefetch accessing L2, including +rejects. +.It Li L2_TRANS.L1D_WB +.Pq Event F0H , Umask 10H +L1D writebacks that access L2 cache. +.It Li L2_TRANS.L2_FILL +.Pq Event F0H , Umask 20H +L2 fill requests that access L2 cache. +.It Li L2_TRANS.L2_WB +.Pq Event F0H , Umask 40H +L2 writebacks that access L2 cache. +.It Li L2_TRANS.ALL_REQUESTS +.Pq Event F0H , Umask 80H +Transactions accessing L2 pipe. +.It Li L2_LINES_IN.I +.Pq Event F1H , Umask 01H +L2 cache lines in I state filling L2. +.It Li L2_LINES_IN.S +.Pq Event F1H , Umask 02H +L2 cache lines in S state filling L2. +.It Li L2_LINES_IN.E +.Pq Event F1H , Umask 04H +L2 cache lines in E state filling L2. +.It Li L2_LINES_IN.ALL +.Pq Event F1H , Umask 07H +L2 cache lines filling L2. +.It Li L2_LINES_OUT.DEMAND_CLEAN +.Pq Event F2H , Umask 05H +Clean L2 cache lines evicted by demand. +.It Li L2_LINES_OUT.DEMAND_DIRTY +.Pq Event F2H , Umask 06H +Dirty L2 cache lines evicted by demand. +.El +.Sh SEE ALSO +.Xr pmc 3 , +.Xr pmc.atom 3 , +.Xr pmc.core 3 , +.Xr pmc.iaf 3 , +.Xr pmc.ucf 3 , +.Xr pmc.k7 3 , +.Xr pmc.k8 3 , +.Xr pmc.p4 3 , +.Xr pmc.p5 3 , +.Xr pmc.p6 3 , +.Xr pmc.corei7 3 , +.Xr pmc.corei7uc 3 , +.Xr pmc.haswell 3 , +.Xr pmc.haswelluc 3 , +.Xr pmc.ivybridge 3 , +.Xr pmc.ivybridgexeon 3 , +.Xr pmc.sandybridge 3 , +.Xr pmc.sandybridgeuc 3 , +.Xr pmc.sandybridgexeon 3 , +.Xr pmc.westmere 3 , +.Xr pmc.westmereuc 3 , +.Xr pmc.soft 3 , +.Xr pmc.tsc 3 , +.Xr pmc_cpuinfo 3 , +.Xr pmclog 3 , +.Xr hwpmc 4 +.Sh HISTORY +Support for the Haswell Xeon microarchitecture first appeared in +.Fx 10.2 . +.Sh AUTHORS +The +.Lb libpmc +library was written by +.An "Joseph Koshy" +.Aq jkoshy@FreeBSD.org . +The support for the Haswell Xeon +microarchitecture was written by +.An "Randall Stewart" +.Aq rrs@FreeBSD.org . diff --git a/lib/libpmc/pmc.ivybridge.3 b/lib/libpmc/pmc.ivybridge.3 index a4d9f949845b..b693b30ca73b 100644 --- a/lib/libpmc/pmc.ivybridge.3 +++ b/lib/libpmc/pmc.ivybridge.3 @@ -449,80 +449,60 @@ Stalls caused by changing prefix length of the instruction. .It Li ILD_STALL.IQ_FULL .Pq Event 87H , Umask 04H Stall cycles due to IQ is full. -.It Li BR_INST_EXEC.COND -.Pq Event 88H , Umask 01H -Qualify conditional near branch instructions executed, but not necessarily -retired. -Must combine with umask 40H, 80H. +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. .It Li BR_INST_EXEC.DIRECT_JMP -.Pq Event 88H , Umask 02H -Qualify all unconditional near branch instructions excluding calls and +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and indirect branches. -Must combine with umask 80H. .It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 88H , Umask 04H -Qualify executed indirect near branch instructions that are not calls nor +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor returns. -Must combine with umask 80H. .It Li BR_INST_EXEC.RETURN_NEAR -.Pq Event 88H , Umask 08H -Qualify indirect near branches that have a return mnemonic. -Must combine with umask 80H. +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. .It Li BR_INST_EXEC.DIRECT_NEAR_CALL -.Pq Event 88H , Umask 10H -Qualify unconditional near call branch instructions, excluding non call +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call branch, executed. -Must combine with umask 80H. .It Li BR_INST_EXEC.INDIRECT_NEAR_CALL -.Pq Event 88H , Umask 20H -Qualify indirect near calls, including both register and memory indirect, +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H. -.It Li BR_INST_EXEC.NONTAKEN -.Pq Event 88H , Umask 40H -Qualify non-taken near branches executed. -Applicable to umask 01H only. -.It Li BR_INST_EXEC.TAKEN -.Pq Event 88H , Umask 80H -Qualify taken near branches executed. Must combine with 01H,02H, 04H, 08H, -10H, 20H. .It Li BR_INST_EXEC.ALL_BRANCHES .Pq Event 88H , Umask FFH Counts all near executed branches (not necessarily retired). -.It Li BR_MISP_EXEC.COND -.Pq Event 89H , Umask 01H -Qualify conditional near branch instructions mispredicted. -Must combine with umask 40H, 80H. +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. .It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 89H , Umask 04H -Qualify mispredicted indirect near branch instructions that are not calls +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls nor returns. -Must combine with umask 80H. .It Li BR_MISP_EXEC.RETURN_NEAR -.Pq Event 89H , Umask 08H -Qualify mispredicted indirect near branches that have a return mnemonic. -Must combine with umask 80H. +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. .It Li BR_MISP_EXEC.DIRECT_NEAR_CALL -.Pq Event 89H , Umask 10H -Qualify mispredicted unconditional near call branch instructions, excluding +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding non call branch, executed. -Must combine with umask 80H. .It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL -.Pq Event 89H , Umask 20H -Qualify mispredicted indirect near calls, including both register and memory +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H. -.It Li BR_MISP_EXEC.NONTAKEN -.Pq Event 89H , Umask 40H -Qualify mispredicted non-taken near branches executed. -Applicable to umask 01H only. -.It Li BR_MISP_EXEC.TAKEN -.Pq Event 89H , Umask 80H -Qualify mispredicted taken near branches executed. Must combine with -01H,02H, 04H, 08H, 10H, 20H. .It Li BR_MISP_EXEC.ALL_BRANCHES .Pq Event 89H , Umask FFH -Counts all near executed branches (not necessarily retired). +Counts all mispredicted near executed branches (not necessarily retired). .It Li IDQ_UOPS_NOT_DELIVERED.CORE .Pq Event 9CH , Umask 01H Count number of non-delivered uops to RAT per thread. @@ -726,31 +706,24 @@ Specify threshold in MSR 0x3F6. .Pq Event CDH , Umask 02H Sample stores and collect precise store operation via PEBS record. PMC3 only. -.It Li MEM_UOP_RETIRED.LOADS -.Pq Event D0H , Umask 01H -Qualify retired memory uops that are loads. Combine with umask 10H, 20H, -40H, 80H. -Supports PEBS. -.It Li MEM_UOP_RETIRED.STORES -.Pq Event D0H , Umask 02H -Qualify retired memory uops that are stores. Combine with umask 10H, 20H, -40H, 80H. -.It Li MEM_UOP_RETIRED.STLB_MISS -.Pq Event D0H , Umask 10H -Qualify retired memory uops with STLB miss. Must combine with umask 01H, -02H, to produce counts. -.It Li MEM_UOP_RETIRED.LOCK -.Pq Event D0H , Umask 20H -Qualify retired memory uops with lock. Must combine with umask 01H, 02H, to -produce counts. -.It Li MEM_UOP_RETIRED.SPLIT -.Pq Event D0H , Umask 40H -Qualify retired memory uops with line split. Must combine with umask 01H, -02H, to produce counts. -.It Li MEM_UOP_RETIRED.ALL -.Pq Event D0H , Umask 80H -Qualify any retired memory uops. Must combine with umask 01H, 02H, to -produce counts. +.It Li MEM_UOPS_RETIRED.STLB_MISS_LOADS +.Pq Event D0H , Umask 11H +Count retired load uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.STLB_MISS_STORES +.Pq Event D0H , Umask 12H +Count retired store uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.SPLIT_LOADS +.Pq Event D0H , Umask 41H +Count retired load uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.SPLIT_STORES +.Pq Event D0H , Umask 42H +Count retired store uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.ALL_LOADS +.Pq Event D0H , Umask 81H +Count all retired load uops. +.It Li MEM_UOPS_RETIRED.ALL_STORES +.Pq Event D0H , Umask 82H +Count all retired store uops. .It Li MEM_LOAD_UOPS_RETIRED.L1_HIT .Pq Event D1H , Umask 01H Retired load uops with L1 cache hits as data sources. diff --git a/lib/libpmc/pmc.ivybridgexeon.3 b/lib/libpmc/pmc.ivybridgexeon.3 index 31a4caa6e342..2ee5b782cc42 100644 --- a/lib/libpmc/pmc.ivybridgexeon.3 +++ b/lib/libpmc/pmc.ivybridgexeon.3 @@ -449,80 +449,60 @@ Stalls caused by changing prefix length of the instruction. .It Li ILD_STALL.IQ_FULL .Pq Event 87H , Umask 04H Stall cycles due to IQ is full. -.It Li BR_INST_EXEC.COND -.Pq Event 88H , Umask 01H -Qualify conditional near branch instructions executed, but not necessarily -retired. -Must combine with umask 40H, 80H. +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. .It Li BR_INST_EXEC.DIRECT_JMP -.Pq Event 88H , Umask 02H -Qualify all unconditional near branch instructions excluding calls and +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and indirect branches. -Must combine with umask 80H. .It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 88H , Umask 04H -Qualify executed indirect near branch instructions that are not calls nor +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor returns. -Must combine with umask 80H. .It Li BR_INST_EXEC.RETURN_NEAR -.Pq Event 88H , Umask 08H -Qualify indirect near branches that have a return mnemonic. -Must combine with umask 80H. +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. .It Li BR_INST_EXEC.DIRECT_NEAR_CALL -.Pq Event 88H , Umask 10H -Qualify unconditional near call branch instructions, excluding non call +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call branch, executed. -Must combine with umask 80H. .It Li BR_INST_EXEC.INDIRECT_NEAR_CALL -.Pq Event 88H , Umask 20H -Qualify indirect near calls, including both register and memory indirect, +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H. -.It Li BR_INST_EXEC.NONTAKEN -.Pq Event 88H , Umask 40H -Qualify non-taken near branches executed. -Applicable to umask 01H only. -.It Li BR_INST_EXEC.TAKEN -.Pq Event 88H , Umask 80H -Qualify taken near branches executed. Must combine with 01H,02H, 04H, 08H, -10H, 20H. .It Li BR_INST_EXEC.ALL_BRANCHES .Pq Event 88H , Umask FFH Counts all near executed branches (not necessarily retired). -.It Li BR_MISP_EXEC.COND -.Pq Event 89H , Umask 01H -Qualify conditional near branch instructions mispredicted. -Must combine with umask 40H, 80H. +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. .It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 89H , Umask 04H -Qualify mispredicted indirect near branch instructions that are not calls +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls nor returns. -Must combine with umask 80H. .It Li BR_MISP_EXEC.RETURN_NEAR -.Pq Event 89H , Umask 08H -Qualify mispredicted indirect near branches that have a return mnemonic. -Must combine with umask 80H. +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. .It Li BR_MISP_EXEC.DIRECT_NEAR_CALL -.Pq Event 89H , Umask 10H -Qualify mispredicted unconditional near call branch instructions, excluding +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding non call branch, executed. -Must combine with umask 80H. .It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL -.Pq Event 89H , Umask 20H -Qualify mispredicted indirect near calls, including both register and memory +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H. -.It Li BR_MISP_EXEC.NONTAKEN -.Pq Event 89H , Umask 40H -Qualify mispredicted non-taken near branches executed. -Applicable to umask 01H only. -.It Li BR_MISP_EXEC.TAKEN -.Pq Event 89H , Umask 80H -Qualify mispredicted taken near branches executed. Must combine with -01H,02H, 04H, 08H, 10H, 20H. .It Li BR_MISP_EXEC.ALL_BRANCHES .Pq Event 89H , Umask FFH -Counts all near executed branches (not necessarily retired). +Counts all mispredicted near executed branches (not necessarily retired). .It Li IDQ_UOPS_NOT_DELIVERED.CORE .Pq Event 9CH , Umask 01H Count number of non-delivered uops to RAT per thread. @@ -738,31 +718,24 @@ Specify threshold in MSR 0x3F6. .Pq Event CDH , Umask 02H Sample stores and collect precise store operation via PEBS record. PMC3 only. -.It Li MEM_UOP_RETIRED.LOADS -.Pq Event D0H , Umask 01H -Qualify retired memory uops that are loads. Combine with umask 10H, 20H, -40H, 80H. -Supports PEBS. -.It Li MEM_UOP_RETIRED.STORES -.Pq Event D0H , Umask 02H -Qualify retired memory uops that are stores. Combine with umask 10H, 20H, -40H, 80H. -.It Li MEM_UOP_RETIRED.STLB_MISS -.Pq Event D0H , Umask 10H -Qualify retired memory uops with STLB miss. Must combine with umask 01H, -02H, to produce counts. -.It Li MEM_UOP_RETIRED.LOCK -.Pq Event D0H , Umask 20H -Qualify retired memory uops with lock. Must combine with umask 01H, 02H, to -produce counts. -.It Li MEM_UOP_RETIRED.SPLIT -.Pq Event D0H , Umask 40H -Qualify retired memory uops with line split. Must combine with umask 01H, -02H, to produce counts. -.It Li MEM_UOP_RETIRED.ALL -.Pq Event D0H , Umask 80H -Qualify any retired memory uops. Must combine with umask 01H, 02H, to -produce counts. +.It Li MEM_UOPS_RETIRED.STLB_MISS_LOADS +.Pq Event D0H , Umask 11H +Count retired load uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.STLB_MISS_STORES +.Pq Event D0H , Umask 12H +Count retired store uops that missed the STLB. +.It Li MEM_UOPS_RETIRED.SPLIT_LOADS +.Pq Event D0H , Umask 41H +Count retired load uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.SPLIT_STORES +.Pq Event D0H , Umask 42H +Count retired store uops that were split across a cache line. +.It Li MEM_UOPS_RETIRED.ALL_LOADS +.Pq Event D0H , Umask 81H +Count all retired load uops. +.It Li MEM_UOPS_RETIRED.ALL_STORES +.Pq Event D0H , Umask 82H +Count all retired store uops. .It Li MEM_LOAD_UOPS_RETIRED.L1_HIT .Pq Event D1H , Umask 01H Retired load uops with L1 cache hits as data sources. diff --git a/lib/libpmc/pmc.sandybridge.3 b/lib/libpmc/pmc.sandybridge.3 index 0b8f6b2f09a9..0e219ae3aeaa 100644 --- a/lib/libpmc/pmc.sandybridge.3 +++ b/lib/libpmc/pmc.sandybridge.3 @@ -497,80 +497,60 @@ Stalls caused by changing prefix length of the instruction. .It Li ILD_STALL.IQ_FULL .Pq Event 87H, Umask 04H Stall cycles due to IQ is full. -.It Li BR_INST_EXEC.COND -.Pq Event 88H, Umask 01H -Qualify conditional near branch instructions executed, but not necessarily -retired. -Must combine with umask 40H, 80H +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. .It Li BR_INST_EXEC.DIRECT_JMP -.Pq Event 88H, Umask 02H -Qualify all unconditional near branch instructions excluding calls and indirect -branches. -Must combine with umask 80H +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and +indirect branches. .It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 88H, Umask 04H -Qualify executed indirect near branch instructions that are not calls nor +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor returns. -Must combine with umask 80H .It Li BR_INST_EXEC.RETURN_NEAR -.Pq Event 88H, Umask 08H -Qualify indirect near branches that have a return mnemonic. -Must combine with umask 80H +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. .It Li BR_INST_EXEC.DIRECT_NEAR_CALL -.Pq Event 88H, Umask 10H -Qualify unconditional near call branch instructions, excluding non call branch, -executed. -Must combine with umask 80H +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call +branch, executed. .It Li BR_INST_EXEC.INDIRECT_NEAR_CALL -.Pq Event 88H, Umask 20H -Qualify indirect near calls, including both register and memory indirect, +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H -.It Li BR_INST_EXEC.NONTAKEN -.Pq Event 88H, Umask 40H -Qualify non-taken near branches executed. -Applicable to umask 01H only -.It Li BR_INST_EXEC.TAKEN -.Pq Event 88H, Umask 80H -Qualify taken near branches executed. -Must combine with 01H,02H, 04H, 08H, 10H, 20H -.It Li BR_INST_EXE.ALL_BRANCHES -.Pq Event 88H, Umask FFH +.It Li BR_INST_EXEC.ALL_BRANCHES +.Pq Event 88H , Umask FFH Counts all near executed branches (not necessarily retired). -.It Li BR_MISP_EXEC.COND -.Pq Event 89H, Umask 01H -Qualify conditional near branch instructions mispredicted. -Must combine with umask 40H, 80H +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. .It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 89H, Umask 04H -Qualify mispredicted indirect near branch instructions that are not calls nor -returns. -Must combine with umask 80H +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls +nor returns. .It Li BR_MISP_EXEC.RETURN_NEAR -.Pq Event 89H, Umask 08H -Qualify mispredicted indirect near branches that have a return mnemonic. -Must combine with umask 80H +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. .It Li BR_MISP_EXEC.DIRECT_NEAR_CALL -.Pq Event 89H, Umask 10H -Qualify mispredicted unconditional near call branch instructions, excluding non -call branch, executed. -Must combine with umask 80H +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding +non call branch, executed. .It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL -.Pq Event 89H, Umask 20H -Qualify mispredicted indirect near calls, including both register and memory +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory indirect, executed. -Must combine with umask 80H -.It Li BR_MISP_EXEC.NONTAKEN -.Pq Event 89H, Umask 40H -Qualify mispredicted non-taken near branches executed. -Applicable to umask 01H only -.It Li BR_MISP_EXEC.TAKEN -.Pq Event 89H, Umask 80H -Qualify mispredicted taken near branches executed. -Must combine with 01H,02H, 04H, 08H, 10H, 20H .It Li BR_MISP_EXEC.ALL_BRANCHES -.Pq Event 89H, Umask FFH -Counts all near executed branches (not necessarily retired). +.Pq Event 89H , Umask FFH +Counts all mispredicted near executed branches (not necessarily retired). .It Li IDQ_UOPS_NOT_DELIVERED.CORE .Pq Event 9CH, Umask 01H Count number of non-delivered uops to RAT per thread. diff --git a/lib/libpmc/pmc.sandybridgexeon.3 b/lib/libpmc/pmc.sandybridgexeon.3 index 17a1bf9fcd67..b334c16265fa 100644 --- a/lib/libpmc/pmc.sandybridgexeon.3 +++ b/lib/libpmc/pmc.sandybridgexeon.3 @@ -543,73 +543,60 @@ instruction. .It Li ILD_STALL.IQ_FULL .Pq Event 87H , Umask 04H Stall cycles due to IQ is full. -.It Li BR_INST_EXEC.COND -.Pq Event 88H , Umask 01H -Qualify conditional near branch instructions -executed, but not necessarily retired. +.It Li BR_INST_EXEC.NONTAKEN_COND +.Pq Event 88H , Umask 41H +Count conditional near branch instructions that were executed (but not +necessarily retired) and not taken. +.It Li BR_INST_EXEC.TAKEN_COND +.Pq Event 88H , Umask 81H +Count conditional near branch instructions that were executed (but not +necessarily retired) and taken. .It Li BR_INST_EXEC.DIRECT_JMP -.Pq Event 88H , Umask 02H -Qualify all unconditional near branch instructions -excluding calls and indirect branches. +.Pq Event 88H , Umask 82H +Count all unconditional near branch instructions excluding calls and +indirect branches. .It Li BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 88H , Umask 04H -Qualify executed indirect near branch instructions -that are not calls nor returns. +.Pq Event 88H , Umask 84H +Count executed indirect near branch instructions that are not calls nor +returns. .It Li BR_INST_EXEC.RETURN_NEAR -.Pq Event 88H , Umask 08H -Qualify indirect near branches that have a return -mnemonic. +.Pq Event 88H , Umask 88H +Count indirect near branches that have a return mnemonic. .It Li BR_INST_EXEC.DIRECT_NEAR_CALL -.Pq Event 88H , Umask 10H -Qualify unconditional near call branch instructions, -excluding non call branch, executed. +.Pq Event 88H , Umask 90H +Count unconditional near call branch instructions, excluding non call +branch, executed. .It Li BR_INST_EXEC.INDIRECT_NEAR_CALL -.Pq Event 88H , Umask 20H -Qualify indirect near calls, including both register -and memory indirect, executed. -.It Li BR_INST_EXEC.NONTAKEN -.Pq Event 88H , Umask 40H -Qualify non-taken near branches executed. -.It Li BR_INST_EXEC.TAKEN -.Pq Event 88H , Umask 80H -Qualify taken near branches executed. Must -combine with 01H,02H, 04H, 08H, 10H, 20H. -.It Li BR_INST_EXE.ALL_BRANCHES +.Pq Event 88H , Umask A0H +Count indirect near calls, including both register and memory indirect, +executed. +.It Li BR_INST_EXEC.ALL_BRANCHES .Pq Event 88H , Umask FFH -Counts all near executed branches (not necessarily -retired). -.It Li BR_MISP_EXEC.COND -.Pq Event 89H , Umask 01H -Qualify conditional near branch instructions -mispredicted. +Counts all near executed branches (not necessarily retired). +.It Li BR_MISP_EXEC.NONTAKEN_COND +.Pq Event 89H , Umask 41H +Count conditional near branch instructions mispredicted as nontaken. +.It Li BR_MISP_EXEC.TAKEN_COND +.Pq Event 89H , Umask 81H +Count conditional near branch instructions mispredicted as taken. .It Li BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET -.Pq Event 89H , Umask 04H -Qualify mispredicted indirect near branch -instructions that are not calls nor returns. +.Pq Event 89H , Umask 84H +Count mispredicted indirect near branch instructions that are not calls +nor returns. .It Li BR_MISP_EXEC.RETURN_NEAR -.Pq Event 89H , Umask 08H -Qualify mispredicted indirect near branches that -have a return mnemonic. +.Pq Event 89H , Umask 88H +Count mispredicted indirect near branches that have a return mnemonic. .It Li BR_MISP_EXEC.DIRECT_NEAR_CALL -.Pq Event 89H , Umask 10H -Qualify mispredicted unconditional near call branch -instructions, excluding non call branch, executed. +.Pq Event 89H , Umask 90H +Count mispredicted unconditional near call branch instructions, excluding +non call branch, executed. .It Li BR_MISP_EXEC.INDIRECT_NEAR_CALL -.Pq Event 89H , Umask 20H -Qualify mispredicted indirect near calls, including -both register and memory indirect, executed. -.It Li BR_MISP_EXEC.NONTAKEN -.Pq Event 89H , Umask 40H -Qualify mispredicted non-taken near branches -executed,. -.It Li BR_MISP_EXEC.TAKEN -.Pq Event 89H , Umask 80H -Qualify mispredicted taken near branches executed. -Must combine with 01H,02H, 04H, 08H, 10H, 20H +.Pq Event 89H , Umask A0H +Count mispredicted indirect near calls, including both register and memory +indirect, executed. .It Li BR_MISP_EXEC.ALL_BRANCHES .Pq Event 89H , Umask FFH -Counts all near executed branches (not necessarily -retired). +Counts all mispredicted near executed branches (not necessarily retired). .It Li IDQ_UOPS_NOT_DELIVERED.CORE .Pq Event 9CH , Umask 01H Count number of non-delivered uops to RAT per diff --git a/lib/libstand/Makefile b/lib/libstand/Makefile index fad9aa603c45..4a1fa2cca729 100644 --- a/lib/libstand/Makefile +++ b/lib/libstand/Makefile @@ -80,7 +80,6 @@ SRCS+= aeabi_memcmp.S aeabi_memcpy.S aeabi_memmove.S aeabi_memset.S .if ${MACHINE_CPUARCH} == "powerpc" .PATH: ${.CURDIR}/../libc/quad SRCS+= ashldi3.c ashrdi3.c -.PATH: ${.CURDIR}/../libc/powerpc/gen SRCS+= syncicache.c .endif @@ -89,11 +88,7 @@ SRCS+= syncicache.c SRCS+= uuid_equal.c uuid_is_nil.c # _setjmp/_longjmp -.if ${MACHINE_ARCH} == "powerpc64" -.PATH: ${.CURDIR}/powerpc -.else .PATH: ${.CURDIR}/${MACHINE_CPUARCH} -.endif SRCS+= _setjmp.S # decompression functionality from libbz2 diff --git a/lib/libstand/powerpc/_setjmp.S b/lib/libstand/powerpc/_setjmp.S index 3884b114274c..7c7c24b1237c 100644 --- a/lib/libstand/powerpc/_setjmp.S +++ b/lib/libstand/powerpc/_setjmp.S @@ -42,7 +42,7 @@ #define JMP_xer 24*REGWIDTH #define JMP_sig 25*REGWIDTH -ASENTRY_NOPROF(setjmp) +ASENTRY_NOPROF(_setjmp) ST_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ ST_REG 1, JMP_r1 (3) @@ -79,7 +79,7 @@ ASENTRY_NOPROF(setjmp) .extern sigsetmask -ASENTRY_NOPROF(longjmp) +ASENTRY_NOPROF(_longjmp) LD_REG 31, JMP_r31(3) /* r1, r2, r14-r30 */ LD_REG 1, JMP_r1 (3) diff --git a/lib/libstand/powerpc/syncicache.c b/lib/libstand/powerpc/syncicache.c new file mode 100644 index 000000000000..434dcec63416 --- /dev/null +++ b/lib/libstand/powerpc/syncicache.c @@ -0,0 +1,103 @@ +/*- + * Copyright (C) 1995-1997, 1999 Wolfgang Solfrank. + * Copyright (C) 1995-1997, 1999 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $NetBSD: syncicache.c,v 1.2 1999/05/05 12:36:40 tsubai Exp $ + */ + +#ifndef lint +static const char rcsid[] = + "$FreeBSD$"; +#endif /* not lint */ + +#include +#if defined(_KERNEL) || defined(_STANDALONE) +#include +#include +#include +#endif +#include + +#include +#include + +#ifdef _STANDALONE +int cacheline_size = 32; +#endif + +#if !defined(_KERNEL) && !defined(_STANDALONE) +#include + +int cacheline_size = 0; + +static void getcachelinesize(void); + +static void +getcachelinesize() +{ + static int cachemib[] = { CTL_MACHDEP, CPU_CACHELINE }; + int clen; + + clen = sizeof(cacheline_size); + + if (sysctl(cachemib, sizeof(cachemib) / sizeof(cachemib[0]), + &cacheline_size, &clen, NULL, 0) < 0 || !cacheline_size) { + abort(); + } +} +#endif + +void +__syncicache(void *from, int len) +{ + int l, off; + char *p; + +#if !defined(_KERNEL) && !defined(_STANDALONE) + if (!cacheline_size) + getcachelinesize(); +#endif + + off = (u_int)from & (cacheline_size - 1); + l = len += off; + p = (char *)from - off; + + do { + __asm __volatile ("dcbst 0,%0" :: "r"(p)); + p += cacheline_size; + } while ((l -= cacheline_size) > 0); + __asm __volatile ("sync"); + p = (char *)from - off; + do { + __asm __volatile ("icbi 0,%0" :: "r"(p)); + p += cacheline_size; + } while ((len -= cacheline_size) > 0); + __asm __volatile ("sync; isync"); +} + diff --git a/lib/libthr/thread/thr_private.h b/lib/libthr/thread/thr_private.h index 3cfbc6333dc6..ed24c3899123 100644 --- a/lib/libthr/thread/thr_private.h +++ b/lib/libthr/thread/thr_private.h @@ -337,7 +337,7 @@ struct pthread_key { /* * lwpid_t is 32bit but kernel thr API exports tid as long type - * in very earily date. + * to preserve the ABI for M:N model in very early date (r131431). */ #define TID(thread) ((uint32_t) ((thread)->tid)) diff --git a/lib/msun/man/j0.3 b/lib/msun/man/j0.3 index 91849da47a2f..7e1b79058e2e 100644 --- a/lib/msun/man/j0.3 +++ b/lib/msun/man/j0.3 @@ -28,7 +28,7 @@ .\" from: @(#)j0.3 6.7 (Berkeley) 4/19/91 .\" $FreeBSD$ .\" -.Dd February 18, 2008 +.Dd March 10, 2015 .Dt J0 3 .Os .Sh NAME @@ -77,24 +77,17 @@ The functions .Fn j0 , .Fn j0f , -.Fn j1 +.Fn j1 , and .Fn j1f -compute the -.Em Bessel function of the first kind of the order -0 and the -.Em order -1, respectively, -for the -real value +compute the Bessel function of the first kind of orders +0 and 1 for the real value .Fa x ; the functions .Fn jn and .Fn jnf -compute the -.Em Bessel function of the first kind of the integer -.Em order +compute the Bessel function of the first kind of the integer order .Fa n for the real value .Fa x . @@ -105,13 +98,8 @@ The functions .Fn y1 , and .Fn y1f -compute the linearly independent -.Em Bessel function of the second kind of the order -0 and the -.Em order -1, respectively, -for the -positive +compute the linearly independent Bessel function of the second kind +of orders 0 and 1 for the positive .Em real value .Fa x ; @@ -119,9 +107,7 @@ the functions .Fn yn and .Fn ynf -compute the -.Em Bessel function of the second kind for the integer -.Em order +compute the Bessel function of the second kind for the integer order .Fa n for the positive .Em real @@ -141,11 +127,20 @@ and .Fn ynf . If .Fa x -is negative, these routines will generate an invalid exception and -return \*(Na. +is negative, including -\*(If, these routines will generate an invalid +exception and return \*(Na. If .Fa x -is 0 or a sufficiently small positive number, these routines +is \*(Pm0, these routines +will generate a divide-by-zero exception and return -\*(If. +If +.Fa x +is a sufficiently small positive number, then +.Fn y1 , +.Fn y1f , +.Fn yn , +and +.Fn ynf will generate an overflow exception and return -\*(If. .Sh SEE ALSO .Xr math 3 diff --git a/lib/msun/src/e_j0.c b/lib/msun/src/e_j0.c index 9e269e79cf1c..365ffe5ee4bb 100644 --- a/lib/msun/src/e_j0.c +++ b/lib/msun/src/e_j0.c @@ -64,6 +64,8 @@ __FBSDID("$FreeBSD$"); static double pzero(double), qzero(double); +static const volatile double vone = 1, vzero = 0; + static const double huge = 1e300, one = 1.0, @@ -150,10 +152,16 @@ __ieee754_y0(double x) EXTRACT_WORDS(hx,lx,x); ix = 0x7fffffff&hx; - /* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */ - if(ix>=0x7ff00000) return one/(x+x*x); - if((ix|lx)==0) return -one/zero; - if(hx<0) return zero/zero; + /* + * y0(NaN) = NaN. + * y0(Inf) = 0. + * y0(-Inf) = NaN and raise invalid exception. + */ + if(ix>=0x7ff00000) return vone/(x+x*x); + /* y0(+-0) = -inf and raise divide-by-zero exception. */ + if((ix|lx)==0) return -one/vzero; + /* y0(x<0) = NaN and raise invalid exception. */ + if(hx<0) return vzero/vzero; if(ix >= 0x40000000) { /* |x| >= 2.0 */ /* y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0)) * where x0 = x-pi/4 diff --git a/lib/msun/src/e_j0f.c b/lib/msun/src/e_j0f.c index 0479957c9dbf..e86ed640b3c4 100644 --- a/lib/msun/src/e_j0f.c +++ b/lib/msun/src/e_j0f.c @@ -16,11 +16,17 @@ #include __FBSDID("$FreeBSD$"); +/* + * See e_j0.c for complete comments. + */ + #include "math.h" #include "math_private.h" static float pzerof(float), qzerof(float); +static const volatile float vone = 1, vzero = 0; + static const float huge = 1e30, one = 1.0, @@ -107,10 +113,9 @@ __ieee754_y0f(float x) GET_FLOAT_WORD(hx,x); ix = 0x7fffffff&hx; - /* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */ - if(ix>=0x7f800000) return one/(x+x*x); - if(ix==0) return -one/zero; - if(hx<0) return zero/zero; + if(ix>=0x7f800000) return vone/(x+x*x); + if(ix==0) return -one/vzero; + if(hx<0) return vzero/vzero; if(ix >= 0x40000000) { /* |x| >= 2.0 */ /* y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0)) * where x0 = x-pi/4 diff --git a/lib/msun/src/e_j1.c b/lib/msun/src/e_j1.c index 2dc0ba1c7f2b..49220586122c 100644 --- a/lib/msun/src/e_j1.c +++ b/lib/msun/src/e_j1.c @@ -64,6 +64,8 @@ __FBSDID("$FreeBSD$"); static double pone(double), qone(double); +static const volatile double vone = 1, vzero = 0; + static const double huge = 1e300, one = 1.0, @@ -147,10 +149,16 @@ __ieee754_y1(double x) EXTRACT_WORDS(hx,lx,x); ix = 0x7fffffff&hx; - /* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */ - if(ix>=0x7ff00000) return one/(x+x*x); - if((ix|lx)==0) return -one/zero; - if(hx<0) return zero/zero; + /* + * y1(NaN) = NaN. + * y1(Inf) = 0. + * y1(-Inf) = NaN and raise invalid exception. + */ + if(ix>=0x7ff00000) return vone/(x+x*x); + /* y1(+-0) = -inf and raise divide-by-zero exception. */ + if((ix|lx)==0) return -one/vzero; + /* y1(x<0) = NaN and raise invalid exception. */ + if(hx<0) return vzero/vzero; if(ix >= 0x40000000) { /* |x| >= 2.0 */ s = sin(x); c = cos(x); diff --git a/lib/msun/src/e_j1f.c b/lib/msun/src/e_j1f.c index 6077a6924d39..c39c548a0a2b 100644 --- a/lib/msun/src/e_j1f.c +++ b/lib/msun/src/e_j1f.c @@ -16,11 +16,17 @@ #include __FBSDID("$FreeBSD$"); +/* + * See e_j1.c for complete comments. + */ + #include "math.h" #include "math_private.h" static float ponef(float), qonef(float); +static const volatile float vone = 1, vzero = 0; + static const float huge = 1e30, one = 1.0, @@ -104,10 +110,9 @@ __ieee754_y1f(float x) GET_FLOAT_WORD(hx,x); ix = 0x7fffffff&hx; - /* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */ - if(ix>=0x7f800000) return one/(x+x*x); - if(ix==0) return -one/zero; - if(hx<0) return zero/zero; + if(ix>=0x7f800000) return vone/(x+x*x); + if(ix==0) return -one/vzero; + if(hx<0) return vzero/vzero; if(ix >= 0x40000000) { /* |x| >= 2.0 */ s = sinf(x); c = cosf(x); diff --git a/lib/msun/src/e_jn.c b/lib/msun/src/e_jn.c index 8b0bc62efc2c..eefd4ff751d0 100644 --- a/lib/msun/src/e_jn.c +++ b/lib/msun/src/e_jn.c @@ -43,6 +43,8 @@ __FBSDID("$FreeBSD$"); #include "math.h" #include "math_private.h" +static const volatile double vone = 1, vzero = 0; + static const double invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */ two = 2.00000000000000000000e+00, /* 0x40000000, 0x00000000 */ @@ -220,10 +222,12 @@ __ieee754_yn(int n, double x) EXTRACT_WORDS(hx,lx,x); ix = 0x7fffffff&hx; - /* if Y(n,NaN) is NaN */ + /* yn(n,NaN) = NaN */ if((ix|((u_int32_t)(lx|-lx))>>31)>0x7ff00000) return x+x; - if((ix|lx)==0) return -one/zero; - if(hx<0) return zero/zero; + /* yn(n,+-0) = -inf and raise divide-by-zero exception. */ + if((ix|lx)==0) return -one/vzero; + /* yn(n,x<0) = NaN and raise invalid exception. */ + if(hx<0) return vzero/vzero; sign = 1; if(n<0){ n = -n; diff --git a/lib/msun/src/e_jnf.c b/lib/msun/src/e_jnf.c index f564aeccd655..965feeb666d4 100644 --- a/lib/msun/src/e_jnf.c +++ b/lib/msun/src/e_jnf.c @@ -16,9 +16,15 @@ #include __FBSDID("$FreeBSD$"); +/* + * See e_jn.c for complete comments. + */ + #include "math.h" #include "math_private.h" +static const volatile float vone = 1, vzero = 0; + static const float two = 2.0000000000e+00, /* 0x40000000 */ one = 1.0000000000e+00; /* 0x3F800000 */ @@ -172,10 +178,9 @@ __ieee754_ynf(int n, float x) GET_FLOAT_WORD(hx,x); ix = 0x7fffffff&hx; - /* if Y(n,NaN) is NaN */ if(ix>0x7f800000) return x+x; - if(ix==0) return -one/zero; - if(hx<0) return zero/zero; + if(ix==0) return -one/vzero; + if(hx<0) return vzero/vzero; sign = 1; if(n<0){ n = -n; diff --git a/share/dtrace/Makefile b/share/dtrace/Makefile index 7de5ccc77b99..e4f69d4cab7d 100644 --- a/share/dtrace/Makefile +++ b/share/dtrace/Makefile @@ -16,9 +16,10 @@ SCRIPTS= disklatency \ disklatencycmd \ hotopen \ nfsclienttime \ + udptrack \ tcpstate \ tcptrack \ - tcpconn + tcpconn SCRIPTSDIR= ${SHAREDIR}/dtrace diff --git a/share/dtrace/tcpstate b/share/dtrace/tcpstate index 0af68aeb25a6..4528bd7fe432 100755 --- a/share/dtrace/tcpstate +++ b/share/dtrace/tcpstate @@ -41,6 +41,6 @@ tcp:kernel::state-change { newstate = args[3]->tcps_state; oldstate = args[5]->tcps_state; - printf("%d %s\t\t%s\n", args[1]->pid, tcp_state_string[oldstate], + printf("%s\t\t%s\n", tcp_state_string[oldstate], tcp_state_string[newstate]); } diff --git a/share/dtrace/udptrack b/share/dtrace/udptrack new file mode 100755 index 000000000000..ca7e8e90e747 --- /dev/null +++ b/share/dtrace/udptrack @@ -0,0 +1,53 @@ +#!/usr/sbin/dtrace -s +/* + * Copyright (c) 2015 George V. Neville-Neil + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + * + * The udptrack D script shows various information about UDP + * data that are sent and received on the host. + * + * Usage: udptrack + */ + +#pragma D option quiet +udp:kernel::receive +{ + printf("Received %d bytes of data from %s:%d\n", + args[4]->udp_length, + args[2]->ip_saddr, + args[4]->udp_sport); + tracemem(args[4]->udp_hdr, 64); +} + +udp:kernel::send +{ + printf("Sent %d bytes of data to %s:%d\n", + args[4]->udp_length, + args[2]->ip_daddr, + args[4]->udp_dport); + tracemem(args[4]->udp_hdr, 64); +} + diff --git a/share/man/man5/core.5 b/share/man/man5/core.5 index 5a827e2993f4..3f54f890ef9d 100644 --- a/share/man/man5/core.5 +++ b/share/man/man5/core.5 @@ -28,7 +28,7 @@ .\" @(#)core.5 8.3 (Berkeley) 12/11/93 .\" $FreeBSD$ .\" -.Dd November 22, 2012 +.Dd March 8, 2015 .Dt CORE 5 .Os .Sh NAME @@ -101,25 +101,23 @@ variable .Va kern.sugid_coredump to 1. .Pp -Corefiles can be compressed by the kernel if the following items -are included in the kernel configuration file: +Corefiles can be compressed by the kernel if the following item +is included in the kernel configuration file: .Bl -tag -width "1234567890" -compact -offset "12345" .It options -COMPRESS_USER_CORES -.It devices -gzio +GZIO .El .Pp -When COMPRESS_USER_CORES is included the following sysctls can control -if core files will be compressed: +When the GZIO option is included, the following sysctls control whether core +files will be compressed: .Bl -tag -width "kern.compress_user_cores_gzlevel" -compact -offset "12345" .It Em kern.compress_user_cores_gzlevel Gzip compression level. -Defaults to -1. +Defaults to 6. .It Em kern.compress_user_cores Actually compress user cores. -Core files will have the suffix -.Em .gz +Compressed core files will have a suffix of +.Ql .gz appended to them. .El .Sh EXAMPLES diff --git a/share/man/man9/SDT.9 b/share/man/man9/SDT.9 index c65b82402ba6..9016b4261a39 100644 --- a/share/man/man9/SDT.9 +++ b/share/man/man9/SDT.9 @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd September 18, 2014 +.Dd March 8, 2015 .Dt SDT 9 .Os .Sh NAME @@ -196,13 +196,13 @@ They are meant to be added to executable code and can be used to instrument the code in which they are called. .Sh EXAMPLES The following probe definition will create a DTrace probe called -.Ql icmp::unreach:pkt-receive , +.Ql icmp:::receive-unreachable , which would hypothetically be triggered when the kernel receives an ICMP packet of type Destination Unreachable: .Bd -literal -offset indent SDT_PROVIDER_DECLARE(icmp); -SDT_PROBE_DEFINE1(icmp, , unreach, pkt__receive, +SDT_PROBE_DEFINE1(icmp, , , receive__unreachable, "struct icmp *"); .Ed @@ -286,10 +286,10 @@ This manual page was written by .Sh BUGS The .Nm -macros allow the module name of a probe to be specified as part of a probe -definition. -However, the DTrace framework uses the module name of probes to determine -which probes should be destroyed when a kernel module is unloaded, so the module +macros allow the module and function names of a probe to be specified as part of +a probe definition. +The DTrace framework uses the module name of probes to determine which probes +should be destroyed when a kernel module is unloaded, so the module name of a probe should match the name of the module in which its defined. .Nm will set the module name properly if it is left unspecified in the probe diff --git a/share/man/man9/VOP_VPTOCNP.9 b/share/man/man9/VOP_VPTOCNP.9 index 740426018109..3e680ad12413 100644 --- a/share/man/man9/VOP_VPTOCNP.9 +++ b/share/man/man9/VOP_VPTOCNP.9 @@ -28,7 +28,7 @@ .\" .\" $FreeBSD$ .\" -.Dd November 19, 2011 +.Dd March 8, 2015 .Dt VOP_VPTOCNP 9 .Os .Sh NAME @@ -36,9 +36,10 @@ .Nd translate a vnode to its component name .Sh SYNOPSIS .In sys/param.h +.In sys/ucred.h .In sys/vnode.h .Ft int -.Fn VOP_VPTOCNP "struct vnode *vp" "struct vnode **dvp" "char *buf" "int *buflen" +.Fn VOP_VPTOCNP "struct vnode *vp" "struct vnode **dvp" "struct ucred *cred" "char *buf" "int *buflen" .Sh DESCRIPTION This translates a vnode into its component name, and writes that name to the head of the buffer specified by @@ -49,6 +50,8 @@ The vnode to translate. .It Fa dvp The vnode of the parent directory of .Fa vp . +.It Fa cred +The caller credentials. .It Fa buf The buffer into which to prepend the component name. .It Fa buflen @@ -59,7 +62,8 @@ The default implementation of .Nm scans through .Fa vp Ns 's -parent directory looking for a dirent with a matching file number. If +parent directory looking for a dirent with a matching file number. +If .Fa vp is not a directory, then .Nm diff --git a/share/misc/committers-src.dot b/share/misc/committers-src.dot index 3bce79b3acae..0d7177f78b44 100644 --- a/share/misc/committers-src.dot +++ b/share/misc/committers-src.dot @@ -178,6 +178,7 @@ ian [label="Ian Lepore\nian@FreeBSD.org\n2013/01/07"] iedowse [label="Ian Dowse\niedowse@FreeBSD.org\n2000/12/01"] imp [label="Warner Losh\nimp@FreeBSD.org\n1996/09/20"] ivoras [label="Ivan Voras\nivoras@FreeBSD.org\n2008/06/10"] +jah [label="Jason A. Harmening\njah@FreeBSD.org\n2015/03/08"] jamie [label="Jamie Gritton\njamie@FreeBSD.org\n2009/01/28"] jasone [label="Jason Evans\njasone@FreeBSD.org\n1999/03/03"] jceel [label="Jakub Klama\njceel@FreeBSD.org\n2011/09/25"] @@ -296,6 +297,7 @@ tuexen [label="Michael Tuexen\ntuexen@FreeBSD.org\n2009/06/06"] tychon [label="Tycho Nightingale\ntychon@FreeBSD.org\n2014/01/21"] ume [label="Hajimu UMEMOTO\nume@FreeBSD.org\n2000/02/26"] uqs [label="Ulrich Spoerlein\nuqs@FreeBSD.org\n2010/01/28"] +vangyzen [label="Eric van Gyzen\nvangyzen@FreeBSD.org\n2015/03/08"] vanhu [label="Yvan Vanhullebus\nvanhu@FreeBSD.org\n2008/07/21"] versus [label="Konrad Jankowski\nversus@FreeBSD.org\n2008/10/27"] weongyo [label="Weongyo Jeong\nweongyo@FreeBSD.org\n2007/12/21"] @@ -548,6 +550,7 @@ ken -> slm kib -> ae kib -> dchagin kib -> gjb +kib -> jah kib -> jlh kib -> jpaetzel kib -> lulf @@ -560,6 +563,7 @@ kib -> rmh kib -> stas kib -> tijl kib -> trociny +kib -> vangyzen kib -> zont kmacy -> lstewart diff --git a/sys/arm/arm/cpufunc_asm_armv7.S b/sys/arm/arm/cpufunc_asm_armv7.S index dee9a9a87a7b..25f052fd5553 100644 --- a/sys/arm/arm/cpufunc_asm_armv7.S +++ b/sys/arm/arm/cpufunc_asm_armv7.S @@ -247,8 +247,8 @@ ENTRY(armv7_idcache_wbinv_range) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_id_wbinv_next - isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ + isb /* instruction synchronization barrier */ RET END(armv7_idcache_wbinv_range) @@ -258,8 +258,8 @@ ENTRY_NP(armv7_icache_sync_all) #else mcr CP15_ICIALLU #endif - isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ + isb /* instruction synchronization barrier */ RET END(armv7_icache_sync_all) @@ -267,13 +267,13 @@ ENTRY_NP(armv7_icache_sync_range) ldr ip, .Larmv7_icache_line_size ldr ip, [ip] .Larmv7_sync_next: - mcr CP15_ICIMVAU(r0) mcr CP15_DCCMVAC(r0) + mcr CP15_ICIMVAU(r0) add r0, r0, ip subs r1, r1, ip bhi .Larmv7_sync_next - isb /* instruction synchronization barrier */ dsb /* data synchronization barrier */ + isb /* instruction synchronization barrier */ RET END(armv7_icache_sync_range) diff --git a/sys/arm/arm/cpuinfo.c b/sys/arm/arm/cpuinfo.c index b0b8a88f0222..46170033a0e6 100644 --- a/sys/arm/arm/cpuinfo.c +++ b/sys/arm/arm/cpuinfo.c @@ -34,7 +34,14 @@ __FBSDID("$FreeBSD$"); #include #include -struct cpuinfo cpuinfo; +struct cpuinfo cpuinfo = +{ + /* Use safe defaults for start */ + .dcache_line_size = 32, + .dcache_line_mask = 31, + .icache_line_size = 32, + .icache_line_mask = 31, +}; /* Read and parse CPU id scheme */ void @@ -122,4 +129,10 @@ cpuinfo_init(void) cpuinfo.generic_timer_ext = (cpuinfo.id_pfr1 >> 16) & 0xF; cpuinfo.virtualization_ext = (cpuinfo.id_pfr1 >> 12) & 0xF; cpuinfo.security_ext = (cpuinfo.id_pfr1 >> 4) & 0xF; + + /* L1 Cache sizes */ + cpuinfo.dcache_line_size = 1 << (CPU_CT_DMINLINE(cpuinfo.ctr ) + 2); + cpuinfo.dcache_line_mask = cpuinfo.dcache_line_size - 1; + cpuinfo.icache_line_size= 1 << (CPU_CT_IMINLINE(cpuinfo.ctr ) + 2); + cpuinfo.icache_line_mask = cpuinfo.icache_line_size - 1; } diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index 7f949e6c70b2..19c2ed3d3a28 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -146,3 +147,8 @@ ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXCPU, MAXCPU); ASSYM(NIRQ, NIRQ); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); + +ASSYM(DCACHE_LINE_SIZE, offsetof(struct cpuinfo, dcache_line_size)); +ASSYM(DCACHE_LINE_MASK, offsetof(struct cpuinfo, dcache_line_mask)); +ASSYM(ICACHE_LINE_SIZE, offsetof(struct cpuinfo, icache_line_size)); +ASSYM(ICACHE_LINE_MASK, offsetof(struct cpuinfo, icache_line_mask)); diff --git a/sys/arm/broadcom/bcm2835/bcm2835_fb.c b/sys/arm/broadcom/bcm2835/bcm2835_fb.c index 88156ed084c4..3270da1a1a4c 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_fb.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_fb.c @@ -240,7 +240,6 @@ bcm_fb_init(void *arg) } else { device_printf(sc->dev, "Failed to set framebuffer info\n"); - return; } config_intrhook_disestablish(&sc->init_hook); diff --git a/sys/arm/broadcom/bcm2835/bcm2835_fbd.c b/sys/arm/broadcom/bcm2835/bcm2835_fbd.c index ed625bc0f8f8..b3b0dd3686d8 100644 --- a/sys/arm/broadcom/bcm2835/bcm2835_fbd.c +++ b/sys/arm/broadcom/bcm2835/bcm2835_fbd.c @@ -186,17 +186,12 @@ bcm_fb_init(void *arg) fbd = device_add_child(sc->dev, "fbd", device_get_unit(sc->dev)); - if (fbd == NULL) { + if (fbd == NULL) device_printf(sc->dev, "Failed to add fbd child\n"); - return; - } - if (device_probe_and_attach(fbd) != 0) { + else if (device_probe_and_attach(fbd) != 0) device_printf(sc->dev, "Failed to attach fbd device\n"); - return; - } } else { device_printf(sc->dev, "Failed to set framebuffer info\n"); - return; } config_intrhook_disestablish(&sc->init_hook); diff --git a/sys/arm/broadcom/bcm2835/bcm283x_dwc_fdt.c b/sys/arm/broadcom/bcm2835/bcm283x_dwc_fdt.c new file mode 100644 index 000000000000..159820d5f856 --- /dev/null +++ b/sys/arm/broadcom/bcm2835/bcm283x_dwc_fdt.c @@ -0,0 +1,176 @@ +/* + * Copyright 2015 Andrew Turner. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "mbox_if.h" + +static device_probe_t bcm283x_dwc_otg_probe; +static device_attach_t bcm283x_dwc_otg_attach; + +static int +bcm283x_dwc_otg_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-usb")) + return (ENXIO); + + device_set_desc(dev, "DWC OTG 2.0 integrated USB controller (bcm283x)"); + + return (BUS_PROBE_VENDOR); +} + +static void +bcm283x_dwc_otg_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) +{ + bus_addr_t *addr; + + if (err) + return; + addr = (bus_addr_t *)arg; + *addr = PHYS_TO_VCBUS(segs[0].ds_addr); +} + +static int +bcm283x_dwc_otg_attach(device_t dev) +{ + struct msg_set_power_state *msg; + bus_dma_tag_t msg_tag; + bus_dmamap_t msg_map; + bus_addr_t msg_phys; + void *msg_buf; + uint32_t reg; + device_t mbox; + int err; + + /* get mbox device */ + mbox = devclass_get_device(devclass_find("mbox"), 0); + if (mbox == NULL) { + device_printf(dev, "can't find mbox\n"); + return (ENXIO); + } + + err = bus_dma_tag_create(bus_get_dma_tag(dev), 16, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + sizeof(struct msg_set_power_state), 1, + sizeof(struct msg_set_power_state), 0, + NULL, NULL, &msg_tag); + if (err != 0) { + device_printf(dev, "can't create DMA tag\n"); + return (ENXIO); + } + + err = bus_dmamem_alloc(msg_tag, (void **)&msg_buf, 0, &msg_map); + if (err != 0) { + bus_dma_tag_destroy(msg_tag); + device_printf(dev, "can't allocate dmamem\n"); + return (ENXIO); + } + + err = bus_dmamap_load(msg_tag, msg_map, msg_buf, + sizeof(struct msg_set_power_state), bcm283x_dwc_otg_cb, + &msg_phys, 0); + if (err != 0) { + bus_dmamem_free(msg_tag, msg_buf, msg_map); + bus_dma_tag_destroy(msg_tag); + device_printf(dev, "can't load DMA map\n"); + return (ENXIO); + } + + msg = msg_buf; + + memset(msg, 0, sizeof(*msg)); + msg->hdr.buf_size = sizeof(*msg); + msg->hdr.code = BCM2835_MBOX_CODE_REQ; + msg->tag_hdr.tag = BCM2835_MBOX_TAG_SET_POWER_STATE; + msg->tag_hdr.val_buf_size = sizeof(msg->body); + msg->tag_hdr.val_len = sizeof(msg->body.req); + msg->body.req.device_id = BCM2835_MBOX_POWER_ID_USB_HCD; + msg->body.req.state = BCM2835_MBOX_POWER_ON | BCM2835_MBOX_POWER_WAIT; + msg->end_tag = 0; + + bus_dmamap_sync(msg_tag, msg_map, + BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); + + MBOX_WRITE(mbox, BCM2835_MBOX_CHAN_PROP, (uint32_t)msg_phys); + MBOX_READ(mbox, BCM2835_MBOX_CHAN_PROP, ®); + + bus_dmamap_unload(msg_tag, msg_map); + bus_dmamem_free(msg_tag, msg_buf, msg_map); + bus_dma_tag_destroy(msg_tag); + + return (dwc_otg_attach(dev)); +} + +static device_method_t bcm283x_dwc_otg_methods[] = { + /* bus interface */ + DEVMETHOD(device_probe, bcm283x_dwc_otg_probe), + DEVMETHOD(device_attach, bcm283x_dwc_otg_attach), + + DEVMETHOD_END +}; + +static devclass_t bcm283x_dwc_otg_devclass; + +DEFINE_CLASS_1(bcm283x_dwcotg, bcm283x_dwc_otg_driver, bcm283x_dwc_otg_methods, + sizeof(struct dwc_otg_fdt_softc), dwc_otg_driver); +DRIVER_MODULE(bcm283x_dwcotg, simplebus, bcm283x_dwc_otg_driver, + bcm283x_dwc_otg_devclass, 0, 0); +MODULE_DEPEND(bcm283x_dwcotg, usb, 1, 1, 1); diff --git a/sys/arm/broadcom/bcm2835/files.bcm2835 b/sys/arm/broadcom/bcm2835/files.bcm2835 index 89d6584fa321..dd0c66053dc9 100644 --- a/sys/arm/broadcom/bcm2835/files.bcm2835 +++ b/sys/arm/broadcom/bcm2835/files.bcm2835 @@ -15,6 +15,8 @@ arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi arm/broadcom/bcm2835/bcm2835_systimer.c standard arm/broadcom/bcm2835/bcm2835_wdog.c standard +arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt + arm/arm/bus_space_base.c standard arm/arm/bus_space_generic.c standard arm/arm/bus_space_asm_generic.S standard diff --git a/sys/arm/conf/ARMADAXP b/sys/arm/conf/ARMADAXP index 32c610c061c4..f611e09da882 100644 --- a/sys/arm/conf/ARMADAXP +++ b/sys/arm/conf/ARMADAXP @@ -22,7 +22,6 @@ ident MV-88F78XX0 include "../mv/armadaxp/std.mv78x60" options SOC_MV_ARMADAXP -makeoptions MODULES_OVERRIDE="" makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/BEAGLEBONE b/sys/arm/conf/BEAGLEBONE index 65671bf783d8..8234533b87ef 100644 --- a/sys/arm/conf/BEAGLEBONE +++ b/sys/arm/conf/BEAGLEBONE @@ -25,13 +25,13 @@ ident BEAGLEBONE include "../ti/am335x/std.am335x" -makeoptions WITHOUT_MODULES="ahc" +makeoptions MODULES_EXTRA="dtb/am335x" # DTrace support options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # all architectures - kernel ELF linker loads CTF data makeoptions WITH_CTF=1 -makeoptions MODULES_OVERRIDE="opensolaris dtrace dtrace/lockstat dtrace/profile dtrace/fbt" +makeoptions MODULES_EXTRA+="opensolaris dtrace dtrace/lockstat dtrace/profile dtrace/fbt" options HZ=100 options SCHED_4BSD # 4BSD scheduler @@ -165,5 +165,3 @@ device usfs # Flattened Device Tree options FDT # Configure using FDT/DTB data -options FDT_DTB_STATIC -makeoptions FDT_DTS_FILE=beaglebone.dts diff --git a/sys/arm/conf/CUBIEBOARD b/sys/arm/conf/CUBIEBOARD index 2ca501584d4e..f137b5ac06cc 100644 --- a/sys/arm/conf/CUBIEBOARD +++ b/sys/arm/conf/CUBIEBOARD @@ -23,9 +23,6 @@ ident CUBIEBOARD include "../allwinner/std.a10" -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - options HZ=100 options SCHED_4BSD # 4BSD scheduler options PREEMPTION # Enable kernel thread preemption diff --git a/sys/arm/conf/CUBIEBOARD2 b/sys/arm/conf/CUBIEBOARD2 index 2ccdb59dcfb8..0aefc5fcad7a 100644 --- a/sys/arm/conf/CUBIEBOARD2 +++ b/sys/arm/conf/CUBIEBOARD2 @@ -23,9 +23,6 @@ ident CUBIEBOARD2 include "../allwinner/a20/std.a20" -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - options HZ=100 options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption diff --git a/sys/arm/conf/DB-78XXX b/sys/arm/conf/DB-78XXX index 2cfd5da0611e..4fee206e8ff5 100644 --- a/sys/arm/conf/DB-78XXX +++ b/sys/arm/conf/DB-78XXX @@ -8,7 +8,6 @@ ident DB-88F78XX include "../mv/discovery/std.db78xxx" options SOC_MV_DISCOVERY -makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/DB-88F5XXX b/sys/arm/conf/DB-88F5XXX index c2b4b5889ea4..6ccb360bd4d9 100644 --- a/sys/arm/conf/DB-88F5XXX +++ b/sys/arm/conf/DB-88F5XXX @@ -8,7 +8,6 @@ ident DB-88F5XXX include "../mv/orion/std.db88f5xxx" options SOC_MV_ORION -makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/DB-88F6XXX b/sys/arm/conf/DB-88F6XXX index c39e4a1e369e..c59f1c1130da 100644 --- a/sys/arm/conf/DB-88F6XXX +++ b/sys/arm/conf/DB-88F6XXX @@ -8,7 +8,6 @@ ident DB-88F6XXX include "../mv/kirkwood/std.db88f6xxx" options SOC_MV_KIRKWOOD -makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/DOCKSTAR b/sys/arm/conf/DOCKSTAR index ad1e74d6eafe..aea158ca8911 100644 --- a/sys/arm/conf/DOCKSTAR +++ b/sys/arm/conf/DOCKSTAR @@ -17,6 +17,7 @@ # # $FreeBSD$ # +#NO_UNIVERSE ident DOCKSTAR @@ -24,8 +25,6 @@ include "../mv/kirkwood/std.db88f6xxx" makeoptions FDT_DTS_FILE=dockstar.dts -makeoptions MODULES_OVERRIDE="" - options SOC_MV_KIRKWOOD options SCHED_4BSD # 4BSD scheduler diff --git a/sys/arm/conf/DREAMPLUG-1001 b/sys/arm/conf/DREAMPLUG-1001 index 135cba0cd6f6..0448d4f95646 100644 --- a/sys/arm/conf/DREAMPLUG-1001 +++ b/sys/arm/conf/DREAMPLUG-1001 @@ -20,6 +20,7 @@ # # $FreeBSD$ # +#NO_UNIVERSE ident DREAMPLUG-1001 @@ -27,8 +28,6 @@ include "../mv/kirkwood/std.db88f6xxx" makeoptions FDT_DTS_FILE=dreamplug-1001.dts -makeoptions MODULES_OVERRIDE="" - options SOC_MV_KIRKWOOD options SCHED_4BSD # 4BSD scheduler diff --git a/sys/arm/conf/EXYNOS5.common b/sys/arm/conf/EXYNOS5.common index d8fe5d6b68ff..cd6e76fb2b39 100644 --- a/sys/arm/conf/EXYNOS5.common +++ b/sys/arm/conf/EXYNOS5.common @@ -18,9 +18,6 @@ # # $FreeBSD$ -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - makeoptions WERROR="-Werror" options HZ=100 diff --git a/sys/arm/conf/PANDABOARD b/sys/arm/conf/PANDABOARD index 551de735cb24..e0b5d149d318 100644 --- a/sys/arm/conf/PANDABOARD +++ b/sys/arm/conf/PANDABOARD @@ -29,9 +29,6 @@ hints "PANDABOARD.hints" include "../ti/omap4/pandaboard/std.pandaboard" -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - options HZ=100 options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption diff --git a/sys/arm/conf/SHEEVAPLUG b/sys/arm/conf/SHEEVAPLUG index 10a46daa6535..9d8ea269892e 100644 --- a/sys/arm/conf/SHEEVAPLUG +++ b/sys/arm/conf/SHEEVAPLUG @@ -3,12 +3,12 @@ # # $FreeBSD$ # +#NO_UNIVERSE ident SHEEVAPLUG include "../mv/kirkwood/std.db88f6xxx" options SOC_MV_KIRKWOOD -makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/TS7800 b/sys/arm/conf/TS7800 index 484bc554bd60..87a40d588f8f 100644 --- a/sys/arm/conf/TS7800 +++ b/sys/arm/conf/TS7800 @@ -8,7 +8,6 @@ ident TS7800 include "../mv/orion/std.ts7800" options SOC_MV_ORION -makeoptions MODULES_OVERRIDE="" #makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WERROR="-Werror" diff --git a/sys/arm/conf/VYBRID b/sys/arm/conf/VYBRID index 6ac541e55161..eda4815d76ec 100644 --- a/sys/arm/conf/VYBRID +++ b/sys/arm/conf/VYBRID @@ -21,9 +21,6 @@ ident VYBRID include "../freescale/vybrid/std.vybrid" -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - makeoptions WERROR="-Werror" options HZ=100 diff --git a/sys/arm/conf/ZEDBOARD b/sys/arm/conf/ZEDBOARD index 355999178ecc..94148f4eb40f 100644 --- a/sys/arm/conf/ZEDBOARD +++ b/sys/arm/conf/ZEDBOARD @@ -23,9 +23,6 @@ ident ZEDBOARD include "../xilinx/zedboard/std.zedboard" -makeoptions MODULES_OVERRIDE="" -makeoptions WITHOUT_MODULES="ahc" - options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking diff --git a/sys/arm/include/cpu-v6.h b/sys/arm/include/cpu-v6.h index 1eafb62da7c3..a7f0b53e9a7b 100644 --- a/sys/arm/include/cpu-v6.h +++ b/sys/arm/include/cpu-v6.h @@ -37,6 +37,9 @@ #define CPU_ASID_KERNEL 0 +vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t); +vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t); + /* * Macros to generate CP15 (system control processor) read/write functions. */ @@ -302,7 +305,7 @@ icache_sync(vm_offset_t sva, vm_size_t size) vm_offset_t eva = sva + size; dsb(); - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else @@ -332,6 +335,19 @@ icache_inv_all(void) isb(); } +/* Invalidate branch predictor buffer */ +static __inline void +bpb_inv_all(void) +{ +#if __ARM_ARCH >= 7 && defined SMP + _CP15_BPIALLIS(); +#else + _CP15_BPIALL(); +#endif + dsb(); + isb(); +} + /* Write back D-cache to PoU */ static __inline void dcache_wb_pou(vm_offset_t sva, vm_size_t size) @@ -340,7 +356,7 @@ dcache_wb_pou(vm_offset_t sva, vm_size_t size) vm_offset_t eva = sva + size; dsb(); - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { #if __ARM_ARCH >= 7 && defined SMP _CP15_DCCMVAU(va); #else @@ -358,7 +374,7 @@ dcache_inv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) vm_offset_t eva = sva + size; /* invalidate L1 first */ - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); @@ -368,7 +384,7 @@ dcache_inv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) dsb(); /* then L1 again */ - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); @@ -383,7 +399,7 @@ dcache_wb_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) dsb(); - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); @@ -401,7 +417,7 @@ dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) dsb(); /* write back L1 first */ - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCCMVAC(va); } dsb(); @@ -410,7 +426,7 @@ dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size) cpu_l2cache_wbinv_range(pa, size); /* then invalidate L1 */ - for (va = sva; va < eva; va += arm_dcache_align) { + for (va = sva; va < eva; va += cpuinfo.dcache_line_size) { _CP15_DCIMVAC(va); } dsb(); diff --git a/sys/arm/include/cpuinfo.h b/sys/arm/include/cpuinfo.h index f347294e5683..ce0d8e6594b6 100644 --- a/sys/arm/include/cpuinfo.h +++ b/sys/arm/include/cpuinfo.h @@ -82,6 +82,12 @@ struct cpuinfo { int generic_timer_ext; int virtualization_ext; int security_ext; + + /* L1 cache info */ + int dcache_line_size; + int dcache_line_mask; + int icache_line_size; + int icache_line_mask; }; extern struct cpuinfo cpuinfo; diff --git a/sys/arm/ti/aintc.c b/sys/arm/ti/aintc.c index 675a5f35cd83..d54594c3c52e 100644 --- a/sys/arm/ti/aintc.c +++ b/sys/arm/ti/aintc.c @@ -78,6 +78,14 @@ static struct ti_aintc_softc *ti_aintc_sc = NULL; bus_space_write_4((_sc)->aintc_bst, (_sc)->aintc_bsh, (reg), (val)) +static void +aintc_post_filter(void *arg) +{ + + arm_irq_memory_barrier(0); + aintc_write_4(ti_aintc_sc, INTC_CONTROL, 1); /* EOI */ +} + static int ti_aintc_probe(device_t dev) { @@ -124,6 +132,8 @@ ti_aintc_attach(device_t dev) /*Set Priority Threshold */ aintc_write_4(sc, INTC_THRESHOLD, 0xFF); + arm_post_filter = aintc_post_filter; + return (0); } @@ -149,12 +159,6 @@ arm_get_next_irq(int last_irq) struct ti_aintc_softc *sc = ti_aintc_sc; uint32_t active_irq; - if (last_irq != -1) { - aintc_write_4(sc, INTC_ISR_CLEAR(last_irq >> 5), - 1UL << (last_irq & 0x1F)); - aintc_write_4(sc, INTC_CONTROL, 1); - } - /* Get the next active interrupt */ active_irq = aintc_read_4(sc, INTC_SIR_IRQ); @@ -178,6 +182,7 @@ arm_mask_irq(uintptr_t nb) struct ti_aintc_softc *sc = ti_aintc_sc; aintc_write_4(sc, INTC_MIR_SET(nb >> 5), (1UL << (nb & 0x1F))); + aintc_write_4(sc, INTC_CONTROL, 1); /* EOI */ } void diff --git a/sys/arm/ti/am335x/am335x_dmtimer.c b/sys/arm/ti/am335x/am335x_dmtimer.c index 240a7a77fa5b..09acb3d3ed02 100644 --- a/sys/arm/ti/am335x/am335x_dmtimer.c +++ b/sys/arm/ti/am335x/am335x_dmtimer.c @@ -200,7 +200,7 @@ am335x_dmtimer_et_write_4(struct am335x_dmtimer_softc *sc, uint32_t reg, */ #ifdef PPS_SYNC -#define PPS_CDEV_NAME "pps" +#define PPS_CDEV_NAME "dmtpps" static void am335x_dmtimer_set_capture_mode(struct am335x_dmtimer_softc *sc, bool force_off) diff --git a/sys/arm/ti/omap4/omap4_prcm_clks.c b/sys/arm/ti/omap4/omap4_prcm_clks.c index 589f15696e2f..5a8a327ed129 100644 --- a/sys/arm/ti/omap4/omap4_prcm_clks.c +++ b/sys/arm/ti/omap4/omap4_prcm_clks.c @@ -1423,5 +1423,6 @@ static driver_t omap4_prcm_driver = { static devclass_t omap4_prcm_devclass; -DRIVER_MODULE(omap4_prcm, simplebus, omap4_prcm_driver, omap4_prcm_devclass, 0, 0); +EARLY_DRIVER_MODULE(omap4_prcm, simplebus, omap4_prcm_driver, + omap4_prcm_devclass, 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_EARLY); MODULE_VERSION(omap4_prcm, 1); diff --git a/sys/boot/libstand32/Makefile b/sys/boot/libstand32/Makefile index 7e25fa69a711..a08a8e938739 100644 --- a/sys/boot/libstand32/Makefile +++ b/sys/boot/libstand32/Makefile @@ -83,7 +83,6 @@ SRCS+= aeabi_memcmp.S aeabi_memcpy.S aeabi_memmove.S aeabi_memset.S .if ${MACHINE_CPUARCH} == "powerpc" .PATH: ${LIBC}/quad SRCS+= ashldi3.c ashrdi3.c -.PATH: ${LIBC}/powerpc/gen SRCS+= syncicache.c .endif @@ -94,8 +93,6 @@ SRCS+= uuid_equal.c uuid_is_nil.c # _setjmp/_longjmp .if ${MACHINE_CPUARCH} == "amd64" .PATH: ${S}/i386 -.elif ${MACHINE_ARCH} == "powerpc64" -.PATH: ${S}/powerpc .else .PATH: ${S}/${MACHINE_CPUARCH} .endif diff --git a/sys/boot/ofw/common/main.c b/sys/boot/ofw/common/main.c index eb8ae0c08c81..0913f0c9c2cf 100644 --- a/sys/boot/ofw/common/main.c +++ b/sys/boot/ofw/common/main.c @@ -45,7 +45,7 @@ u_int32_t acells, scells; static char bootargs[128]; -#define HEAP_SIZE 0x80000 +#define HEAP_SIZE 0x100000 #define OF_puts(fd, text) OF_write(fd, text, strlen(text)) diff --git a/sys/boot/ofw/libofw/elf_freebsd.c b/sys/boot/ofw/libofw/elf_freebsd.c index 50fd8a66c58a..80ece7eeba46 100644 --- a/sys/boot/ofw/libofw/elf_freebsd.c +++ b/sys/boot/ofw/libofw/elf_freebsd.c @@ -67,7 +67,7 @@ int __elfN(ofw_exec)(struct preloaded_file *fp) { struct file_metadata *fmp; - vm_offset_t mdp; + vm_offset_t mdp, dtbp; Elf_Ehdr *e; int error; intptr_t entry; @@ -78,15 +78,21 @@ __elfN(ofw_exec)(struct preloaded_file *fp) e = (Elf_Ehdr *)&fmp->md_data; entry = e->e_entry; - if ((error = md_load(fp->f_args, &mdp)) != 0) + if ((error = md_load(fp->f_args, &mdp, &dtbp)) != 0) return (error); printf("Kernel entry at 0x%lx ...\n", e->e_entry); dev_cleanup(); ofw_release_heap(); - OF_chain((void *)reloc, end - (char *)reloc, (void *)entry, - (void *)mdp, sizeof(mdp)); + if (dtbp != 0) { + OF_quiesce(); + ((int (*)(u_long, u_long, u_long, void *, u_long))entry)(dtbp, 0, 0, + mdp, sizeof(mdp)); + } else { + OF_chain((void *)reloc, end - (char *)reloc, (void *)entry, + (void *)mdp, sizeof(mdp)); + } panic("exec returned"); } diff --git a/sys/boot/ofw/libofw/openfirm.c b/sys/boot/ofw/libofw/openfirm.c index d4eb9d6abbbb..a8626cc403fe 100644 --- a/sys/boot/ofw/libofw/openfirm.c +++ b/sys/boot/ofw/libofw/openfirm.c @@ -729,6 +729,20 @@ OF_exit() ; } +void +OF_quiesce() +{ + static struct { + cell_t name; + cell_t nargs; + cell_t nreturns; + } args = { + (cell_t)"quiesce", + }; + + openfirmware(&args); +} + /* Free bytes starting at , then call with . */ #if 0 void diff --git a/sys/boot/ofw/libofw/openfirm.h b/sys/boot/ofw/libofw/openfirm.h index c70c2e9314c8..ecb1f328347d 100644 --- a/sys/boot/ofw/libofw/openfirm.h +++ b/sys/boot/ofw/libofw/openfirm.h @@ -82,6 +82,7 @@ void OF_init(int (*openfirm)(void *)); /* Generic functions */ int OF_test(char *); +void OF_quiesce(); /* Disable firmware */ /* Device tree functions */ phandle_t OF_peer(phandle_t); diff --git a/sys/boot/ofw/libofw/ppc64_elf_freebsd.c b/sys/boot/ofw/libofw/ppc64_elf_freebsd.c index 84ea40611844..f85a77acbf8b 100644 --- a/sys/boot/ofw/libofw/ppc64_elf_freebsd.c +++ b/sys/boot/ofw/libofw/ppc64_elf_freebsd.c @@ -67,7 +67,7 @@ int ppc64_ofw_elf_exec(struct preloaded_file *fp) { struct file_metadata *fmp; - vm_offset_t mdp; + vm_offset_t mdp, dtbp; Elf_Ehdr *e; int error; intptr_t entry; @@ -80,7 +80,7 @@ ppc64_ofw_elf_exec(struct preloaded_file *fp) /* Handle function descriptor */ entry = *(uint64_t *)e->e_entry; - if ((error = md_load64(fp->f_args, &mdp)) != 0) + if ((error = md_load64(fp->f_args, &mdp, &dtbp)) != 0) return (error); printf("Kernel entry at 0x%lx ...\n", entry); @@ -88,8 +88,14 @@ ppc64_ofw_elf_exec(struct preloaded_file *fp) dev_cleanup(); ofw_release_heap(); - OF_chain((void *)reloc, end - (char *)reloc, (void *)entry, - (void *)mdp, sizeof(mdp)); + if (dtbp != 0) { + OF_quiesce(); + ((int (*)(u_long, u_long, u_long, void *, u_long))entry)(dtbp, 0, 0, + mdp, sizeof(mdp)); + } else { + OF_chain((void *)reloc, end - (char *)reloc, (void *)entry, + (void *)mdp, sizeof(mdp)); + } panic("exec returned"); } diff --git a/sys/boot/powerpc/ofw/Makefile b/sys/boot/powerpc/ofw/Makefile index c0ed14514761..60705d043b2d 100644 --- a/sys/boot/powerpc/ofw/Makefile +++ b/sys/boot/powerpc/ofw/Makefile @@ -21,6 +21,7 @@ LOADER_NFS_SUPPORT?= yes LOADER_TFTP_SUPPORT?= no LOADER_GZIP_SUPPORT?= yes LOADER_BZIP2_SUPPORT?= no +LOADER_FDT_SUPPORT?= yes .if ${LOADER_DISK_SUPPORT} == "yes" CFLAGS+= -DLOADER_DISK_SUPPORT @@ -49,6 +50,14 @@ CFLAGS+= -DLOADER_NFS_SUPPORT .if ${LOADER_TFTP_SUPPORT} == "yes" CFLAGS+= -DLOADER_TFTP_SUPPORT .endif +.if ${LOADER_FDT_SUPPORT} == "yes" +SRCS+= ofwfdt.c +CFLAGS+= -I${.CURDIR}/../../fdt +CFLAGS+= -I${.OBJDIR}/../../fdt +CFLAGS+= -I${.CURDIR}/../../../contrib/libfdt +CFLAGS+= -DLOADER_FDT_SUPPORT +LIBFDT= ${.OBJDIR}/../../fdt/libfdt.a +.endif .if ${MK_FORTH} != "no" # Enable BootForth @@ -89,13 +98,13 @@ CFLAGS+= -I${.CURDIR}/../../ofw/libofw LIBSTAND= ${.OBJDIR}/../../libstand32/libstand.a CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/ -DPADD= ${LIBFICL} ${LIBOFW} ${LIBSTAND} -LDADD= ${LIBFICL} ${LIBOFW} ${LIBSTAND} +DPADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND} +LDADD= ${LIBFICL} ${LIBOFW} ${LIBFDT} ${LIBSTAND} vers.c: ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version sh ${.CURDIR}/../../common/newvers.sh ${.CURDIR}/version ${NEWVERSWHAT} -loader.help: help.common help.ofw +loader.help: help.common help.ofw ${.CURDIR}/../../fdt/help.fdt cat ${.ALLSRC} | \ awk -f ${.CURDIR}/../../common/merge_help.awk > ${.TARGET} diff --git a/sys/boot/powerpc/ofw/metadata.c b/sys/boot/powerpc/ofw/metadata.c index 7db4030c7d1b..25d51f8251ce 100644 --- a/sys/boot/powerpc/ofw/metadata.c +++ b/sys/boot/powerpc/ofw/metadata.c @@ -34,11 +34,11 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include "bootstrap.h" -#include "libofw.h" int md_getboothowto(char *kargs) @@ -243,7 +243,7 @@ md_copymodules(vm_offset_t addr, int kern64) * - Module metadata are formatted and placed in kernel space. */ int -md_load_dual(char *args, vm_offset_t *modulep, int kern64) +md_load_dual(char *args, vm_offset_t *modulep, vm_offset_t *dtb, int kern64) { struct preloaded_file *kfp; struct preloaded_file *xp; @@ -251,6 +251,7 @@ md_load_dual(char *args, vm_offset_t *modulep, int kern64) vm_offset_t kernend; vm_offset_t addr; vm_offset_t envp; + vm_offset_t fdtp; vm_offset_t size; uint64_t scratch64; char *rootdevname; @@ -286,6 +287,14 @@ md_load_dual(char *args, vm_offset_t *modulep, int kern64) /* pad to a page boundary */ addr = roundup(addr, PAGE_SIZE); + /* Copy out FDT */ + *dtb = fdtp = 0; + if (getenv("usefdt") != NULL) { + size = fdt_copy(addr); + *dtb = fdtp = addr; + addr = roundup(addr + size, PAGE_SIZE); + } + kernend = 0; kfp = file_findfile(NULL, kern64 ? "elf64 kernel" : "elf32 kernel"); if (kfp == NULL) @@ -296,10 +305,16 @@ md_load_dual(char *args, vm_offset_t *modulep, int kern64) if (kern64) { scratch64 = envp; file_addmetadata(kfp, MODINFOMD_ENVP, sizeof scratch64, &scratch64); + if (fdtp != 0) { + scratch64 = fdtp; + file_addmetadata(kfp, MODINFOMD_DTBP, sizeof scratch64, &scratch64); + } scratch64 = kernend; file_addmetadata(kfp, MODINFOMD_KERNEND, sizeof scratch64, &scratch64); } else { file_addmetadata(kfp, MODINFOMD_ENVP, sizeof envp, &envp); + if (fdtp != 0) + file_addmetadata(kfp, MODINFOMD_DTBP, sizeof fdtp, &fdtp); file_addmetadata(kfp, MODINFOMD_KERNEND, sizeof kernend, &kernend); } @@ -321,14 +336,14 @@ md_load_dual(char *args, vm_offset_t *modulep, int kern64) } int -md_load(char *args, vm_offset_t *modulep) +md_load(char *args, vm_offset_t *modulep, vm_offset_t *dtb) { - return (md_load_dual(args, modulep, 0)); + return (md_load_dual(args, modulep, dtb, 0)); } int -md_load64(char *args, vm_offset_t *modulep) +md_load64(char *args, vm_offset_t *modulep, vm_offset_t *dtb) { - return (md_load_dual(args, modulep, 1)); + return (md_load_dual(args, modulep, dtb, 1)); } diff --git a/sys/boot/powerpc/ofw/ofwfdt.c b/sys/boot/powerpc/ofw/ofwfdt.c new file mode 100644 index 000000000000..71a5f059d78b --- /dev/null +++ b/sys/boot/powerpc/ofw/ofwfdt.c @@ -0,0 +1,202 @@ +/*- + * Copyright (C) 2014-2015 Nathan Whitehorn + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include "bootstrap.h" + +static int +OF_hasprop(phandle_t node, const char *prop) +{ + return (OF_getproplen(node, prop) > 0); +} + +static void +add_node_to_fdt(void *buffer, phandle_t node, int fdt_offset) +{ + int i, child_offset, error; + char name[2048], *lastprop, *subname; + void *propbuf; + size_t proplen; + + lastprop = NULL; + while (OF_nextprop(node, lastprop, name) > 0) { + proplen = OF_getproplen(node, name); + propbuf = malloc(proplen); + OF_getprop(node, name, propbuf, proplen); + error = fdt_setprop(buffer, fdt_offset, name, propbuf, proplen); + free(propbuf); + lastprop = name; + if (error) + printf("Error %d adding property %s to " + "node %d\n", error, name, fdt_offset); + } + + if (!OF_hasprop(node, "phandle") && !OF_hasprop(node, "linux,phandle") + && !OF_hasprop(node, "ibm,phandle")) + fdt_setprop(buffer, fdt_offset, "phandle", &node, sizeof(node)); + + for (node = OF_child(node); node > 0; node = OF_peer(node)) { + OF_package_to_path(node, name, sizeof(name)); + subname = strrchr(name, '/'); + subname++; + child_offset = fdt_add_subnode(buffer, fdt_offset, subname); + if (child_offset < 0) { + printf("Error %d adding node %s (%s), skipping\n", + child_offset, name, subname); + continue; + } + + add_node_to_fdt(buffer, node, child_offset); + } +} + +static void +ofwfdt_fixups(void *fdtp) +{ + int offset, len, i; + phandle_t node; + ihandle_t rtas; + const void *prop; + + /* + * Instantiate and add reservations for RTAS state if present + */ + + offset = fdt_path_offset(fdtp, "/rtas"); + if (offset > 0) { + uint32_t base; + void *rtasmem; + char path[255]; + + node = OF_finddevice("/rtas"); + OF_package_to_path(node, path, sizeof(path)); + OF_getprop(node, "rtas-size", &len, sizeof(len)); + + /* Allocate memory */ + rtasmem = OF_claim(0, len, 4096); + + /* Instantiate RTAS */ + rtas = OF_open(path); + base = 0; + OF_call_method("instantiate-rtas", rtas, 1, 1, (cell_t)rtas, + &base); + + /* Store info to FDT using Linux convention */ + base = cpu_to_fdt32(base); + fdt_setprop(fdtp, offset, "linux,rtas-entry", &base, + sizeof(base)); + base = cpu_to_fdt32((uint32_t)rtasmem); + offset = fdt_path_offset(fdtp, "/rtas"); + fdt_setprop(fdtp, offset, "linux,rtas-base", &base, + sizeof(base)); + + /* Mark RTAS private data area reserved */ + fdt_add_mem_rsv(fdtp, base, len); + } else { + /* + * Remove /memory/available properties, which reflect long-gone OF + * state. Note that this doesn't work if we need RTAS still, since + * that's part of the firmware. + */ + + offset = fdt_path_offset(fdtp, "/memory@0"); + if (offset > 0) + fdt_delprop(fdtp, offset, "available"); + } + + /* + + /* + * Convert stored ihandles under /chosen to xref phandles + */ + offset = fdt_path_offset(fdtp, "/chosen"); + if (offset > 0) { + const char *chosenprops[] = {"stdout", "stdin", "mmu", "cpu", + NULL}; + const uint32_t *ihand; + for (i = 0; chosenprops[i] != NULL; i++) { + ihand = fdt_getprop(fdtp, offset, chosenprops[i], &len); + if (ihand != NULL && len == sizeof(*ihand)) { + node = OF_instance_to_package( + fdt32_to_cpu(*ihand)); + if (OF_hasprop(node, "phandle")) + OF_getprop(node, "phandle", &node, + sizeof(node)); + else if (OF_hasprop(node, "linux,phandle")) + OF_getprop(node, "linux,phandle", &node, + sizeof(node)); + else if (OF_hasprop(node, "ibm,phandle")) + OF_getprop(node, "ibm,phandle", &node, + sizeof(node)); + node = cpu_to_fdt32(node); + fdt_setprop(fdtp, offset, chosenprops[i], &node, sizeof(node)); + } + + /* Refind node in case it moved */ + offset = fdt_path_offset(fdtp, "/chosen"); + } + } +} + +int +fdt_platform_load_dtb(void) +{ + void *buffer; + size_t buflen = 409600; + + buffer = malloc(buflen); + fdt_create_empty_tree(buffer, buflen); + add_node_to_fdt(buffer, OF_peer(0), fdt_path_offset(buffer, "/")); + ofwfdt_fixups(buffer); + fdt_pack(buffer); + + fdt_load_dtb_addr(buffer); + free(buffer); + + return (0); +} + +void +fdt_platform_fixups(void) +{ + +} + +static int +command_fdt(int argc, char *argv[]) +{ + + return (command_fdt_internal(argc, argv)); +} + +COMMAND_SET(fdt, "fdt", "flattened device tree handling", command_fdt); + diff --git a/sys/boot/sparc64/loader/metadata.c b/sys/boot/sparc64/loader/metadata.c index d15a75a0a956..a3b3eb5cedff 100644 --- a/sys/boot/sparc64/loader/metadata.c +++ b/sys/boot/sparc64/loader/metadata.c @@ -276,7 +276,7 @@ md_copymodules(vm_offset_t addr) * - Module metadata are formatted and placed in kernel space. */ int -md_load(char *args, vm_offset_t *modulep) +md_load(char *args, vm_offset_t *modulep, vm_offset_t *dtbp) { struct preloaded_file *kfp; struct preloaded_file *xp; @@ -289,6 +289,7 @@ md_load(char *args, vm_offset_t *modulep) int howto; howto = md_getboothowto(args); + *dtbp = 0; /* * Allow the environment variable 'rootdev' to override the supplied device diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 8b6a4c8b3c60..c0d587911825 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -2889,11 +2889,6 @@ options SHMMNI=33 # a single process at one time. options SHMSEG=9 -# Compress user core dumps. -options COMPRESS_USER_CORES -# required to compress file output from kernel for COMPRESS_USER_CORES. -device gzio - # Set the amount of time (in seconds) the system will wait before # rebooting automatically when a kernel panic occurs. If set to (-1), # the system will wait indefinitely until a key is pressed on the @@ -2983,3 +2978,7 @@ options RANDOM_DEBUG # Debugging messages # Module to enable execution of application via emulators like QEMU options IMAGACT_BINMISC + +# zlib I/O stream support +# This enables support for compressed core dumps. +options GZIO diff --git a/sys/conf/files b/sys/conf/files index 6820abd4323c..a351e9f4cd31 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -3140,6 +3140,7 @@ kern/uipc_debug.c optional ddb kern/uipc_domain.c standard kern/uipc_mbuf.c standard kern/uipc_mbuf2.c standard +kern/uipc_mbufhash.c standard kern/uipc_mqueue.c optional p1003_1b_mqueue kern/uipc_sem.c optional p1003_1b_semaphores kern/uipc_shm.c standard @@ -3857,9 +3858,6 @@ ofed/drivers/net/mlx4/sys_tune.c optional mlx4ib | mlxen \ ofed/drivers/net/mlx4/en_cq.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" -ofed/drivers/net/mlx4/utils.c optional mlxen \ - no-depend obj-prefix "mlx4_" \ - compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" ofed/drivers/net/mlx4/en_main.c optional mlxen \ no-depend obj-prefix "mlx4_" \ compile-with "${OFED_C_NOIMP} -I$S/ofed/drivers/net/mlx4/" diff --git a/sys/conf/options b/sys/conf/options index 2d6d54b2e24e..28d7950d6757 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -87,13 +87,13 @@ COMPAT_FREEBSD9 opt_compat.h COMPAT_FREEBSD10 opt_compat.h COMPAT_LINUXAPI opt_compat.h COMPILING_LINT opt_global.h -COMPRESS_USER_CORES opt_core.h CY_PCI_FASTINTR DEADLKRES opt_watchdog.h DIRECTIO FILEMON opt_dontuse.h FFCLOCK FULL_PREEMPTION opt_sched.h +GZIO opt_gzio.h IMAGACT_BINMISC opt_dontuse.h IPI_PREEMPTION opt_sched.h GEOM_AES opt_geom.h diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c index af4fddb9fa06..007ec63e65d2 100644 --- a/sys/dev/bge/if_bge.c +++ b/sys/dev/bge/if_bge.c @@ -1637,6 +1637,7 @@ bge_setmulti(struct bge_softc *sc) if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); + free(mta, M_DEVBUF); return; } diff --git a/sys/dev/cadence/if_cgem.c b/sys/dev/cadence/if_cgem.c index 84b947dfba1f..0bb989330369 100644 --- a/sys/dev/cadence/if_cgem.c +++ b/sys/dev/cadence/if_cgem.c @@ -52,7 +52,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include @@ -98,7 +97,7 @@ __FBSDID("$FreeBSD$"); CSUM_TCP_IPV6 | CSUM_UDP_IPV6) struct cgem_softc { - struct ifnet *ifp; + if_t ifp; struct mtx sc_mtx; device_t dev; device_t miibus; @@ -298,9 +297,10 @@ cgem_mac_hash(u_char eaddr[]) static void cgem_rx_filter(struct cgem_softc *sc) { - struct ifnet *ifp = sc->ifp; - struct ifmultiaddr *ifma; - int index; + if_t ifp = sc->ifp; + u_char *mta; + + int index, i, mcnt; uint32_t hash_hi, hash_lo; uint32_t net_cfg; @@ -313,28 +313,34 @@ cgem_rx_filter(struct cgem_softc *sc) CGEM_NET_CFG_NO_BCAST | CGEM_NET_CFG_COPY_ALL); - if ((ifp->if_flags & IFF_PROMISC) != 0) + if ((if_getflags(ifp) & IFF_PROMISC) != 0) net_cfg |= CGEM_NET_CFG_COPY_ALL; else { - if ((ifp->if_flags & IFF_BROADCAST) == 0) + if ((if_getflags(ifp) & IFF_BROADCAST) == 0) net_cfg |= CGEM_NET_CFG_NO_BCAST; - if ((ifp->if_flags & IFF_ALLMULTI) != 0) { + if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { hash_hi = 0xffffffff; hash_lo = 0xffffffff; } else { - if_maddr_rlock(ifp); - TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { - if (ifma->ifma_addr->sa_family != AF_LINK) - continue; + mcnt = if_multiaddr_count(ifp, -1); + mta = malloc(ETHER_ADDR_LEN * mcnt, M_DEVBUF, + M_NOWAIT); + if (mta == NULL) { + device_printf(sc->dev, + "failed to allocate temp mcast list\n"); + return; + } + if_multiaddr_array(ifp, mta, &mcnt, mcnt); + for (i = 0; i < mcnt; i++) { index = cgem_mac_hash( LLADDR((struct sockaddr_dl *) - ifma->ifma_addr)); + (mta + (i * ETHER_ADDR_LEN)))); if (index > 31) - hash_hi |= (1<<(index-32)); + hash_hi |= (1 << (index - 32)); else - hash_lo |= (1<rxring[i].addr = CGEM_RXDESC_OWN; sc->rxring[i].ctl = 0; sc->rxring_m[i] = NULL; - err = bus_dmamap_create(sc->mbuf_dma_tag, 0, - &sc->rxring_m_dmamap[i]); - if (err) - return (err); + sc->rxring_m_dmamap[i] = NULL; } sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; @@ -451,10 +454,7 @@ cgem_setup_descs(struct cgem_softc *sc) sc->txring[i].addr = 0; sc->txring[i].ctl = CGEM_TXDESC_USED; sc->txring_m[i] = NULL; - err = bus_dmamap_create(sc->mbuf_dma_tag, 0, - &sc->txring_m_dmamap[i]); - if (err) - return (err); + sc->txring_m_dmamap[i] = NULL; } sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; @@ -486,10 +486,19 @@ cgem_fill_rqueue(struct cgem_softc *sc) m->m_pkthdr.rcvif = sc->ifp; /* Load map and plug in physical address. */ + if (bus_dmamap_create(sc->mbuf_dma_tag, 0, + &sc->rxring_m_dmamap[sc->rxring_hd_ptr])) { + sc->rxdmamapfails++; + m_free(m); + break; + } if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, segs, &nsegs, BUS_DMA_NOWAIT)) { sc->rxdmamapfails++; + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_hd_ptr]); + sc->rxring_m_dmamap[sc->rxring_hd_ptr] = NULL; m_free(m); break; } @@ -517,7 +526,7 @@ cgem_fill_rqueue(struct cgem_softc *sc) static void cgem_recv(struct cgem_softc *sc) { - struct ifnet *ifp = sc->ifp; + if_t ifp = sc->ifp; struct mbuf *m, *m_hd, **m_tl; uint32_t ctl; @@ -540,9 +549,12 @@ cgem_recv(struct cgem_softc *sc) sc->rxring_m_dmamap[sc->rxring_tl_ptr], BUS_DMASYNC_POSTREAD); - /* Unload dmamap. */ + /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->rxring_m_dmamap[sc->rxring_tl_ptr]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[sc->rxring_tl_ptr]); + sc->rxring_m_dmamap[sc->rxring_tl_ptr] = NULL; /* Increment tail pointer. */ if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) @@ -571,7 +583,7 @@ cgem_recv(struct cgem_softc *sc) /* Are we using hardware checksumming? Check the * status in the receive descriptor. */ - if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { + if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { /* TCP or UDP checks out, IP checks out too. */ if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || @@ -605,7 +617,7 @@ cgem_recv(struct cgem_softc *sc) m_hd = m_hd->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); - (*ifp->if_input)(ifp, m); + if_input(ifp, m); } CGEM_LOCK(sc); } @@ -624,14 +636,17 @@ cgem_clean_tx(struct cgem_softc *sc) ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & CGEM_TXDESC_USED) != 0) { - /* Sync cache. nop? */ + /* Sync cache. */ bus_dmamap_sync(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_tl_ptr], BUS_DMASYNC_POSTWRITE); - /* Unload DMA map. */ + /* Unload and destroy DMA map. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_tl_ptr]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_tl_ptr]); + sc->txring_m_dmamap[sc->txring_tl_ptr] = NULL; /* Free up the mbuf. */ m = sc->txring_m[sc->txring_tl_ptr]; @@ -674,15 +689,15 @@ cgem_clean_tx(struct cgem_softc *sc) sc->txring_tl_ptr++; sc->txring_queued--; - sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if_setdrvflagbits(sc->ifp, 0, IFF_DRV_OACTIVE); } } /* Start transmits. */ static void -cgem_start_locked(struct ifnet *ifp) +cgem_start_locked(if_t ifp) { - struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mbuf *m; bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; uint32_t ctl; @@ -690,7 +705,7 @@ cgem_start_locked(struct ifnet *ifp) CGEM_ASSERT_LOCKED(sc); - if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) + if ((if_getdrvflags(ifp) & IFF_DRV_OACTIVE) != 0) return; for (;;) { @@ -704,18 +719,24 @@ cgem_start_locked(struct ifnet *ifp) /* Still no room? */ if (sc->txring_queued >= CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { - ifp->if_drv_flags |= IFF_DRV_OACTIVE; + if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); sc->txfull++; break; } } /* Grab next transmit packet. */ - IFQ_DRV_DEQUEUE(&ifp->if_snd, m); + m = if_dequeue(ifp); if (m == NULL) break; - /* Load DMA map. */ + /* Create and load DMA map. */ + if (bus_dmamap_create(sc->mbuf_dma_tag, 0, + &sc->txring_m_dmamap[sc->txring_hd_ptr])) { + m_freem(m); + sc->txdmamapfails++; + continue; + } err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, sc->txring_m_dmamap[sc->txring_hd_ptr], m, segs, &nsegs, BUS_DMA_NOWAIT); @@ -726,6 +747,9 @@ cgem_start_locked(struct ifnet *ifp) if (m2 == NULL) { sc->txdefragfails++; m_freem(m); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr]); + sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; continue; } m = m2; @@ -737,6 +761,9 @@ cgem_start_locked(struct ifnet *ifp) if (err) { /* Give up. */ m_freem(m); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[sc->txring_hd_ptr]); + sc->txring_m_dmamap[sc->txring_hd_ptr] = NULL; sc->txdmamapfails++; continue; } @@ -788,9 +815,9 @@ cgem_start_locked(struct ifnet *ifp) } static void -cgem_start(struct ifnet *ifp) +cgem_start(if_t ifp) { - struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); CGEM_LOCK(sc); cgem_start_locked(ifp); @@ -902,11 +929,12 @@ static void cgem_intr(void *arg) { struct cgem_softc *sc = (struct cgem_softc *)arg; + if_t ifp = sc->ifp; uint32_t istatus; CGEM_LOCK(sc); - if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { CGEM_UNLOCK(sc); return; } @@ -945,8 +973,8 @@ cgem_intr(void *arg) } /* Restart transmitter if needed. */ - if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) - cgem_start_locked(sc->ifp); + if (!if_sendq_empty(ifp)) + cgem_start_locked(ifp); CGEM_UNLOCK(sc); } @@ -982,9 +1010,10 @@ cgem_reset(struct cgem_softc *sc) static void cgem_config(struct cgem_softc *sc) { + if_t ifp = sc->ifp; uint32_t net_cfg; uint32_t dma_cfg; - u_char *eaddr = IF_LLADDR(sc->ifp); + u_char *eaddr = if_getlladdr(ifp); CGEM_ASSERT_LOCKED(sc); @@ -999,7 +1028,7 @@ cgem_config(struct cgem_softc *sc) CGEM_NET_CFG_SPEED100; /* Enable receive checksum offloading? */ - if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) + if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; WR4(sc, CGEM_NET_CFG, net_cfg); @@ -1012,7 +1041,7 @@ cgem_config(struct cgem_softc *sc) CGEM_DMA_CFG_DISC_WHEN_NO_AHB; /* Enable transmit checksum offloading? */ - if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) + if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; WR4(sc, CGEM_DMA_CFG, dma_cfg); @@ -1045,14 +1074,13 @@ cgem_init_locked(struct cgem_softc *sc) CGEM_ASSERT_LOCKED(sc); - if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) return; cgem_config(sc); cgem_fill_rqueue(sc); - sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; - sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if_setdrvflagbits(sc->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); mii = device_get_softc(sc->miibus); mii_mediachg(mii); @@ -1088,8 +1116,12 @@ cgem_stop(struct cgem_softc *sc) sc->txring[i].ctl = CGEM_TXDESC_USED; sc->txring[i].addr = 0; if (sc->txring_m[i]) { + /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, sc->txring_m_dmamap[i]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->txring_m_dmamap[i]); + sc->txring_m_dmamap[i] = NULL; m_freem(sc->txring_m[i]); sc->txring_m[i] = NULL; } @@ -1105,9 +1137,12 @@ cgem_stop(struct cgem_softc *sc) sc->rxring[i].addr = CGEM_RXDESC_OWN; sc->rxring[i].ctl = 0; if (sc->rxring_m[i]) { - /* Unload dmamap. */ + /* Unload and destroy dmamap. */ bus_dmamap_unload(sc->mbuf_dma_tag, - sc->rxring_m_dmamap[sc->rxring_tl_ptr]); + sc->rxring_m_dmamap[i]); + bus_dmamap_destroy(sc->mbuf_dma_tag, + sc->rxring_m_dmamap[i]); + sc->rxring_m_dmamap[i] = NULL; m_freem(sc->rxring_m[i]); sc->rxring_m[i] = NULL; @@ -1125,9 +1160,9 @@ cgem_stop(struct cgem_softc *sc) static int -cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +cgem_ioctl(if_t ifp, u_long cmd, caddr_t data) { - struct cgem_softc *sc = ifp->if_softc; + struct cgem_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0, mask; @@ -1135,27 +1170,27 @@ cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) switch (cmd) { case SIOCSIFFLAGS: CGEM_LOCK(sc); - if ((ifp->if_flags & IFF_UP) != 0) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { - if (((ifp->if_flags ^ sc->if_old_flags) & + if ((if_getflags(ifp) & IFF_UP) != 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if (((if_getflags(ifp) ^ sc->if_old_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { cgem_rx_filter(sc); } } else { cgem_init_locked(sc); } - } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { + if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); cgem_stop(sc); } - sc->if_old_flags = ifp->if_flags; + sc->if_old_flags = if_getflags(ifp); CGEM_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* Set up multi-cast filters. */ - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { CGEM_LOCK(sc); cgem_rx_filter(sc); CGEM_UNLOCK(sc); @@ -1170,23 +1205,23 @@ cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) case SIOCSIFCAP: CGEM_LOCK(sc); - mask = ifp->if_capenable ^ ifr->ifr_reqcap; + mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if ((mask & IFCAP_TXCSUM) != 0) { if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { /* Turn on TX checksumming. */ - ifp->if_capenable |= (IFCAP_TXCSUM | - IFCAP_TXCSUM_IPV6); - ifp->if_hwassist |= CGEM_CKSUM_ASSIST; + if_setcapenablebit(ifp, IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6, 0); + if_sethwassistbits(ifp, CGEM_CKSUM_ASSIST, 0); WR4(sc, CGEM_DMA_CFG, RD4(sc, CGEM_DMA_CFG) | CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); } else { /* Turn off TX checksumming. */ - ifp->if_capenable &= ~(IFCAP_TXCSUM | - IFCAP_TXCSUM_IPV6); - ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; + if_setcapenablebit(ifp, 0, IFCAP_TXCSUM | + IFCAP_TXCSUM_IPV6); + if_sethwassistbits(ifp, 0, CGEM_CKSUM_ASSIST); WR4(sc, CGEM_DMA_CFG, RD4(sc, CGEM_DMA_CFG) & @@ -1196,25 +1231,25 @@ cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) if ((mask & IFCAP_RXCSUM) != 0) { if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { /* Turn on RX checksumming. */ - ifp->if_capenable |= (IFCAP_RXCSUM | - IFCAP_RXCSUM_IPV6); + if_setcapenablebit(ifp, IFCAP_RXCSUM | + IFCAP_RXCSUM_IPV6, 0); WR4(sc, CGEM_NET_CFG, RD4(sc, CGEM_NET_CFG) | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); } else { /* Turn off RX checksumming. */ - ifp->if_capenable &= ~(IFCAP_RXCSUM | - IFCAP_RXCSUM_IPV6); + if_setcapenablebit(ifp, 0, IFCAP_RXCSUM | + IFCAP_RXCSUM_IPV6); WR4(sc, CGEM_NET_CFG, RD4(sc, CGEM_NET_CFG) & ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); } } - if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == + if ((if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == (IFCAP_RXCSUM | IFCAP_TXCSUM)) - ifp->if_capenable |= IFCAP_VLAN_HWCSUM; + if_setcapenablebit(ifp, IFCAP_VLAN_HWCSUM, 0); else - ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; + if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWCSUM); CGEM_UNLOCK(sc); break; @@ -1238,16 +1273,16 @@ cgem_child_detached(device_t dev, device_t child) } static int -cgem_ifmedia_upd(struct ifnet *ifp) +cgem_ifmedia_upd(if_t ifp) { - struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; int error = 0; mii = device_get_softc(sc->miibus); CGEM_LOCK(sc); - if ((ifp->if_flags & IFF_UP) != 0) { + if ((if_getflags(ifp) & IFF_UP) != 0) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); @@ -1258,9 +1293,9 @@ cgem_ifmedia_upd(struct ifnet *ifp) } static void -cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +cgem_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { - struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; + struct cgem_softc *sc = (struct cgem_softc *) if_getsoftc(ifp); struct mii_data *mii; mii = device_get_softc(sc->miibus); @@ -1606,7 +1641,7 @@ static int cgem_attach(device_t dev) { struct cgem_softc *sc = device_get_softc(dev); - struct ifnet *ifp = NULL; + if_t ifp = NULL; phandle_t node; pcell_t cell; int rid, err; @@ -1647,23 +1682,23 @@ cgem_attach(device_t dev) cgem_detach(dev); return (ENOMEM); } - ifp->if_softc = sc; + if_setsoftc(ifp, sc); if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_start = cgem_start; - ifp->if_ioctl = cgem_ioctl; - ifp->if_init = cgem_init; - ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | - IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; - /* Disable hardware checksumming by default. */ - ifp->if_hwassist = 0; - ifp->if_capenable = ifp->if_capabilities & - ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM); - ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS; - IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); - IFQ_SET_READY(&ifp->if_snd); + if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); + if_setinitfn(ifp, cgem_init); + if_setioctlfn(ifp, cgem_ioctl); + if_setstartfn(ifp, cgem_start); + if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM, 0); + if_setsendqlen(ifp, CGEM_NUM_TX_DESCS); + if_setsendqready(ifp); - sc->if_old_flags = ifp->if_flags; + /* Disable hardware checksumming by default. */ + if_sethwassist(ifp, 0); + if_setcapenable(ifp, if_getcapabilities(ifp) & + ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM)); + + sc->if_old_flags = if_getflags(ifp); sc->rxbufs = DEFAULT_NUM_RX_BUFS; sc->rxhangwar = 1; @@ -1726,7 +1761,7 @@ cgem_detach(device_t dev) cgem_stop(sc); CGEM_UNLOCK(sc); callout_drain(&sc->tick_ch); - sc->ifp->if_flags &= ~IFF_UP; + if_setflagbits(sc->ifp, 0, IFF_UP); ether_ifdetach(sc->ifp); } @@ -1752,7 +1787,8 @@ cgem_detach(device_t dev) /* Release DMA resources. */ if (sc->rxring != NULL) { if (sc->rxring_physaddr != 0) { - bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); + bus_dmamap_unload(sc->desc_dma_tag, + sc->rxring_dma_map); sc->rxring_physaddr = 0; } bus_dmamem_free(sc->desc_dma_tag, sc->rxring, @@ -1767,7 +1803,8 @@ cgem_detach(device_t dev) } if (sc->txring != NULL) { if (sc->txring_physaddr != 0) { - bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map); + bus_dmamap_unload(sc->desc_dma_tag, + sc->txring_dma_map); sc->txring_physaddr = 0; } bus_dmamem_free(sc->desc_dma_tag, sc->txring, diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c index 6908de93ff12..7c2cec42bd9a 100644 --- a/sys/dev/cxgbe/tom/t4_ddp.c +++ b/sys/dev/cxgbe/tom/t4_ddp.c @@ -65,6 +65,13 @@ __FBSDID("$FreeBSD$"); #include "common/t4_tcb.h" #include "tom/t4_tom.h" +VNET_DECLARE(int, tcp_do_autorcvbuf); +#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) +VNET_DECLARE(int, tcp_autorcvbuf_inc); +#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) +VNET_DECLARE(int, tcp_autorcvbuf_max); +#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) + #define PPOD_SZ(n) ((n) * sizeof(struct pagepod)) #define PPOD_SIZE (PPOD_SZ(1)) @@ -411,6 +418,21 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) else discourage_ddp(toep); + /* receive buffer autosize */ + if (sb->sb_flags & SB_AUTOSIZE && + V_tcp_do_autorcvbuf && + sb->sb_hiwat < V_tcp_autorcvbuf_max && + len > (sbspace(sb) / 8 * 7)) { + unsigned int hiwat = sb->sb_hiwat; + unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc, + V_tcp_autorcvbuf_max); + + if (!sbreserve_locked(sb, newsize, so, NULL)) + sb->sb_flags &= ~SB_AUTOSIZE; + else + toep->rx_credits += newsize - hiwat; + } + KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c index 129a77dad4a1..3b1d5750b0f4 100644 --- a/sys/dev/etherswitch/arswitch/arswitch.c +++ b/sys/dev/etherswitch/arswitch/arswitch.c @@ -149,8 +149,10 @@ arswitch_probe(device_t dev) DPRINTF(dev, "chipname=%s, id=%08x\n", chipname, id); if (chipname != NULL) { snprintf(desc, sizeof(desc), - "Atheros %s Ethernet Switch", - chipname); + "Atheros %s Ethernet Switch (ver %d rev %d)", + chipname, + sc->chip_ver, + sc->chip_rev); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } @@ -177,9 +179,11 @@ arswitch_attach_phys(struct arswitch_softc *sc) err = mii_attach(sc->sc_dev, &sc->miibus[phy], sc->ifp[phy], arswitch_ifmedia_upd, arswitch_ifmedia_sts, \ BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); +#if 0 DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n", device_get_nameunit(sc->miibus[phy]), sc->ifp[phy]->if_xname); +#endif if (err != 0) { device_printf(sc->sc_dev, "attaching PHY %d failed\n", @@ -299,12 +303,26 @@ arswitch_attach(device_t dev) sc->hal.arswitch_port_vlan_setup = ar8xxx_port_vlan_setup; sc->hal.arswitch_port_vlan_get = ar8xxx_port_vlan_get; sc->hal.arswitch_vlan_init_hw = ar8xxx_reset_vlans; + sc->hal.arswitch_vlan_getvgroup = ar8xxx_getvgroup; sc->hal.arswitch_vlan_setvgroup = ar8xxx_setvgroup; + sc->hal.arswitch_vlan_get_pvid = ar8xxx_get_pvid; sc->hal.arswitch_vlan_set_pvid = ar8xxx_set_pvid; + + sc->hal.arswitch_get_dot1q_vlan = ar8xxx_get_dot1q_vlan; + sc->hal.arswitch_set_dot1q_vlan = ar8xxx_set_dot1q_vlan; + sc->hal.arswitch_flush_dot1q_vlan = ar8xxx_flush_dot1q_vlan; + sc->hal.arswitch_purge_dot1q_vlan = ar8xxx_purge_dot1q_vlan; + sc->hal.arswitch_get_port_vlan = ar8xxx_get_port_vlan; + sc->hal.arswitch_set_port_vlan = ar8xxx_set_port_vlan; + sc->hal.arswitch_atu_flush = ar8xxx_atu_flush; + sc->hal.arswitch_phy_read = arswitch_readphy_internal; + sc->hal.arswitch_phy_write = arswitch_writephy_internal; + + /* * Attach switch related functions */ @@ -320,8 +338,10 @@ arswitch_attach(device_t dev) ar8316_attach(sc); else if (AR8X16_IS_SWITCH(sc, AR8327)) ar8327_attach(sc); - else + else { + DPRINTF(dev, "%s: unknown switch (%d)?\n", __func__, sc->sc_switchtype); return (ENXIO); + } /* Common defaults. */ sc->info.es_nports = 5; /* XXX technically 6, but 6th not used */ @@ -348,14 +368,18 @@ arswitch_attach(device_t dev) sc->numphys = AR8X16_NUM_PHYS; /* Reset the switch. */ - if (arswitch_reset(dev)) + if (arswitch_reset(dev)) { + DPRINTF(dev, "%s: arswitch_reset: failed\n", __func__); return (ENXIO); + } err = sc->hal.arswitch_hw_setup(sc); + DPRINTF(dev, "%s: hw_setup: err=%d\n", __func__, err); if (err != 0) return (err); err = sc->hal.arswitch_hw_global_setup(sc); + DPRINTF(dev, "%s: hw_global_setup: err=%d\n", __func__, err); if (err != 0) return (err); @@ -368,17 +392,20 @@ arswitch_attach(device_t dev) * Attach the PHYs and complete the bus enumeration. */ err = arswitch_attach_phys(sc); + DPRINTF(dev, "%s: attach_phys: err=%d\n", __func__, err); if (err != 0) return (err); /* Default to ingress filters off. */ err = arswitch_set_vlan_mode(sc, 0); + DPRINTF(dev, "%s: set_vlan_mode: err=%d\n", __func__, err); if (err != 0) return (err); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); err = bus_generic_attach(dev); + DPRINTF(dev, "%s: bus_generic_attach: err=%d\n", __func__, err); if (err != 0) return (err); @@ -611,6 +638,15 @@ ar8xxx_port_vlan_get(struct arswitch_softc *sc, etherswitch_port_t *p) return (0); } +static int +arswitch_is_cpuport(struct arswitch_softc *sc, int port) +{ + + return ((port == AR8X16_PORT_CPU) || + ((AR8X16_IS_SWITCH(sc, AR8327) && + port == AR8327_PORT_GMAC6))); +} + static int arswitch_getport(device_t dev, etherswitch_port_t *p) { @@ -620,7 +656,8 @@ arswitch_getport(device_t dev, etherswitch_port_t *p) int err; sc = device_get_softc(dev); - if (p->es_port < 0 || p->es_port > sc->numphys) + /* XXX +1 is for AR8327; should make this configurable! */ + if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); err = sc->hal.arswitch_port_vlan_get(sc, p); @@ -628,7 +665,7 @@ arswitch_getport(device_t dev, etherswitch_port_t *p) return (err); mii = arswitch_miiforport(sc, p->es_port); - if (p->es_port == AR8X16_PORT_CPU) { + if (arswitch_is_cpuport(sc, p->es_port)) { /* fill in fixed values for CPU port */ /* XXX is this valid in all cases? */ p->es_flags |= ETHERSWITCH_PORT_CPU; @@ -697,7 +734,7 @@ arswitch_setport(device_t dev, etherswitch_port_t *p) struct ifnet *ifp; sc = device_get_softc(dev); - if (p->es_port < 0 || p->es_port > sc->numphys) + if (p->es_port < 0 || p->es_port > sc->info.es_nports) return (ENXIO); /* Port flags. */ @@ -708,7 +745,7 @@ arswitch_setport(device_t dev, etherswitch_port_t *p) } /* Do not allow media changes on CPU port. */ - if (p->es_port == AR8X16_PORT_CPU) + if (arswitch_is_cpuport(sc, p->es_port)) return (0); mii = arswitch_miiforport(sc, p->es_port); @@ -803,6 +840,22 @@ arswitch_setvgroup(device_t dev, etherswitch_vlangroup_t *e) return (sc->hal.arswitch_vlan_setvgroup(sc, e)); } +static int +arswitch_readphy(device_t dev, int phy, int reg) +{ + struct arswitch_softc *sc = device_get_softc(dev); + + return (sc->hal.arswitch_phy_read(dev, phy, reg)); +} + +static int +arswitch_writephy(device_t dev, int phy, int reg, int val) +{ + struct arswitch_softc *sc = device_get_softc(dev); + + return (sc->hal.arswitch_phy_write(dev, phy, reg, val)); +} + static device_method_t arswitch_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arswitch_probe), diff --git a/sys/dev/etherswitch/arswitch/arswitch_8327.c b/sys/dev/etherswitch/arswitch/arswitch_8327.c index 642827d9916b..8b46b0c262b5 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_8327.c +++ b/sys/dev/etherswitch/arswitch/arswitch_8327.c @@ -57,6 +57,9 @@ #include #include #include +#include +#include + #include #include "mdio_if.h" @@ -290,7 +293,7 @@ ar8327_fetch_pdata_port(struct arswitch_softc *sc, sbuf, &val) == 0) pcfg->rxpause = val; -#if 0 +#if 1 device_printf(sc->sc_dev, "%s: port %d: speed=%d, duplex=%d, txpause=%d, rxpause=%d\n", __func__, @@ -562,6 +565,7 @@ ar8327_init_pdata(struct arswitch_softc *sc) /* SGMII config */ bzero(&scfg, sizeof(scfg)); if (ar8327_fetch_pdata_sgmii(sc, &scfg)) { + device_printf(sc->sc_dev, "%s: SGMII cfg?\n", __func__); t = scfg.sgmii_ctrl; if (sc->chip_rev == 1) t |= AR8327_SGMII_CTRL_EN_PLL | @@ -651,18 +655,23 @@ ar8327_hw_global_setup(struct arswitch_softc *sc) arswitch_writereg(sc->sc_dev, AR8327_REG_EEE_CTRL, t); /* Set the right number of ports */ - sc->info.es_nports = 6; + /* GMAC0 (CPU), GMAC1..5 (PHYs), GMAC6 (CPU) */ + sc->info.es_nports = 7; return (0); } /* - * Port setup. + * Port setup. Called at attach time. */ static void ar8327_port_init(struct arswitch_softc *sc, int port) { uint32_t t; + int ports; + + /* For now, port can see all other ports */ + ports = 0x7f; if (port == AR8X16_PORT_CPU) t = sc->ar8327.port0_status; @@ -696,7 +705,7 @@ ar8327_port_init(struct arswitch_softc *sc, int port) t |= AR8X16_PORT_CTRL_STATE_FORWARD << AR8327_PORT_LOOKUP_STATE_S; /* So this allows traffic to any port except ourselves */ - t |= (0x7f & ~(1 << port)); + t |= (ports & ~(1 << port)); arswitch_writereg(sc->sc_dev, AR8327_REG_PORT_LOOKUP(port), t); } @@ -704,17 +713,43 @@ static int ar8327_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p) { - /* XXX stub for now */ - device_printf(sc->sc_dev, "%s: called\n", __func__); + /* Check: ADDTAG/STRIPTAG - exclusive */ + + ARSWITCH_LOCK(sc); + + /* Set the PVID. */ + if (p->es_pvid != 0) + sc->hal.arswitch_vlan_set_pvid(sc, p->es_port, p->es_pvid); + + /* + * DOUBLE_TAG + * VLAN_MODE_ADD + * VLAN_MODE_STRIP + */ + ARSWITCH_UNLOCK(sc); return (0); } +/* + * Get the port VLAN configuration. + */ static int ar8327_port_vlan_get(struct arswitch_softc *sc, etherswitch_port_t *p) { - /* XXX stub for now */ - device_printf(sc->sc_dev, "%s: called\n", __func__); + ARSWITCH_LOCK(sc); + + /* Retrieve the PVID */ + sc->hal.arswitch_vlan_get_pvid(sc, p->es_port, &p->es_pvid); + + /* Retrieve the current port configuration */ + /* + * DOUBLE_TAG + * VLAN_MODE_ADD + * VLAN_MODE_STRIP + */ + + ARSWITCH_UNLOCK(sc); return (0); } @@ -723,6 +758,13 @@ ar8327_reset_vlans(struct arswitch_softc *sc) { int i; uint32_t mode, t; + int ports; + + ARSWITCH_LOCK_ASSERT(sc, MA_NOTOWNED); + ARSWITCH_LOCK(sc); + + /* Clear the existing VLAN configuration */ + memset(sc->vid, 0, sizeof(sc->vid)); /* * Disable mirroring. @@ -732,11 +774,25 @@ ar8327_reset_vlans(struct arswitch_softc *sc) (0xF << AR8327_FWD_CTRL0_MIRROR_PORT_S)); /* - * For now, let's default to one portgroup, just so traffic - * flows. All ports can see other ports. + * XXX TODO: disable any Q-in-Q port configuration, + * tagging, egress filters, etc. */ + + /* + * For now, let's default to one portgroup, just so traffic + * flows. All ports can see other ports. There are two CPU GMACs + * (GMAC0, GMAC6), GMAC1..GMAC5 are external PHYs. + * + * (ETHERSWITCH_VLAN_PORT) + */ + ports = 0x7f; + for (i = 0; i < AR8327_NUM_PORTS; i++) { - /* set pvid = 1; there's only one vlangroup */ + + if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) + sc->vid[i] = i | ETHERSWITCH_VID_VALID; + + /* set pvid = 1; there's only one vlangroup to start with */ t = 1 << AR8327_PORT_VLAN0_DEF_SVID_S; t |= 1 << AR8327_PORT_VLAN0_DEF_CVID_S; arswitch_writereg(sc->sc_dev, AR8327_REG_PORT_VLAN0(i), t); @@ -749,7 +805,7 @@ ar8327_reset_vlans(struct arswitch_softc *sc) arswitch_writereg(sc->sc_dev, AR8327_REG_PORT_VLAN1(i), t); /* Ports can see other ports */ - t = (0x7f & ~(1 << i)); /* all ports besides us */ + t = (ports & ~(1 << i)); /* all ports besides us */ t |= AR8327_PORT_LOOKUP_LEARN; /* in_port_only, forward */ @@ -769,36 +825,95 @@ ar8327_reset_vlans(struct arswitch_softc *sc) AR8327_PORT_HOL_CTRL1_EG_MIRROR_EN, 0); } + + ARSWITCH_UNLOCK(sc); +} + +static int +ar8327_vlan_get_port(struct arswitch_softc *sc, uint32_t *ports, int vid) +{ + int port; + uint32_t reg; + + ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); + + /* For port based vlans the vlanid is the same as the port index. */ + port = vid & ETHERSWITCH_VID_MASK; + reg = arswitch_readreg(sc->sc_dev, AR8327_REG_PORT_LOOKUP(port)); + *ports = reg & 0x7f; + return (0); +} + +static int +ar8327_vlan_set_port(struct arswitch_softc *sc, uint32_t ports, int vid) +{ + int err, port; + + ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); + + /* For port based vlans the vlanid is the same as the port index. */ + port = vid & ETHERSWITCH_VID_MASK; + + err = arswitch_modifyreg(sc->sc_dev, AR8327_REG_PORT_LOOKUP(port), + 0x7f, /* vlan membership mask */ + (ports & 0x7f)); + + if (err) + return (err); + return (0); } static int ar8327_vlan_getvgroup(struct arswitch_softc *sc, etherswitch_vlangroup_t *vg) { - device_printf(sc->sc_dev, "%s: called\n", __func__); - return (0); + + /* XXX for now, no dot1q vlans */ + if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) + return (EINVAL); + return (ar8xxx_getvgroup(sc, vg)); } static int ar8327_vlan_setvgroup(struct arswitch_softc *sc, etherswitch_vlangroup_t *vg) { - device_printf(sc->sc_dev, "%s: called\n", __func__); - return (0); + /* XXX for now, no dot1q vlans */ + if (sc->vlan_mode == ETHERSWITCH_VLAN_DOT1Q) + return (EINVAL); + return (ar8xxx_setvgroup(sc, vg)); } static int ar8327_get_pvid(struct arswitch_softc *sc, int port, int *pvid) { + uint32_t reg; - device_printf(sc->sc_dev, "%s: called\n", __func__); + ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); + + /* + * XXX for now, assuming it's CVID; likely very wrong! + */ + port = port & ETHERSWITCH_VID_MASK; + reg = arswitch_readreg(sc->sc_dev, AR8327_REG_PORT_VLAN0(port)); + reg = reg >> AR8327_PORT_VLAN0_DEF_CVID_S; + reg = reg & 0xfff; + + *pvid = reg; return (0); } static int ar8327_set_pvid(struct arswitch_softc *sc, int port, int pvid) { + uint32_t t; + + /* Limit pvid to valid values */ + pvid &= 0x7f; + + t = pvid << AR8327_PORT_VLAN0_DEF_SVID_S; + t |= pvid << AR8327_PORT_VLAN0_DEF_CVID_S; + arswitch_writereg(sc->sc_dev, AR8327_REG_PORT_VLAN0(port), t); - device_printf(sc->sc_dev, "%s: called\n", __func__); return (0); } @@ -832,17 +947,32 @@ ar8327_attach(struct arswitch_softc *sc) sc->hal.arswitch_hw_global_setup = ar8327_hw_global_setup; sc->hal.arswitch_port_init = ar8327_port_init; + + sc->hal.arswitch_vlan_getvgroup = ar8327_vlan_getvgroup; + sc->hal.arswitch_vlan_setvgroup = ar8327_vlan_setvgroup; sc->hal.arswitch_port_vlan_setup = ar8327_port_vlan_setup; sc->hal.arswitch_port_vlan_get = ar8327_port_vlan_get; sc->hal.arswitch_vlan_init_hw = ar8327_reset_vlans; - sc->hal.arswitch_vlan_getvgroup = ar8327_vlan_getvgroup; - sc->hal.arswitch_vlan_setvgroup = ar8327_vlan_setvgroup; sc->hal.arswitch_vlan_get_pvid = ar8327_get_pvid; sc->hal.arswitch_vlan_set_pvid = ar8327_set_pvid; + sc->hal.arswitch_get_port_vlan = ar8327_vlan_get_port; + sc->hal.arswitch_set_port_vlan = ar8327_vlan_set_port; + sc->hal.arswitch_atu_flush = ar8327_atu_flush; + /* + * Reading the PHY via the MDIO interface currently doesn't + * work correctly. + * + * So for now, just go direct to the PHY registers themselves. + * This has always worked on external devices, but not internal + * devices (AR934x, AR724x, AR933x.) + */ + sc->hal.arswitch_phy_read = arswitch_readphy_external; + sc->hal.arswitch_phy_write = arswitch_writephy_external; + /* Set the switch vlan capabilities. */ sc->info.es_vlan_caps = ETHERSWITCH_VLAN_DOT1Q | ETHERSWITCH_VLAN_PORT | ETHERSWITCH_VLAN_DOUBLE_TAG; diff --git a/sys/dev/etherswitch/arswitch/arswitch_phy.c b/sys/dev/etherswitch/arswitch/arswitch_phy.c index 928ca03dbe43..43abaa12d076 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_phy.c +++ b/sys/dev/etherswitch/arswitch/arswitch_phy.c @@ -67,11 +67,46 @@ static SYSCTL_NODE(_debug, OID_AUTO, arswitch, CTLFLAG_RD, 0, "arswitch"); #endif /* - * access PHYs integrated into the switch chip through the switch's MDIO + * Access PHYs integrated into the switch by going direct + * to the PHY space itself, rather than through the switch + * MDIO register. + */ +int +arswitch_readphy_external(device_t dev, int phy, int reg) +{ + int ret; + struct arswitch_softc *sc; + + sc = device_get_softc(dev); + + ARSWITCH_LOCK(sc); + ret = (MDIO_READREG(device_get_parent(dev), phy, reg)); + ARSWITCH_UNLOCK(sc); + + return (ret); +} + +int +arswitch_writephy_external(device_t dev, int phy, int reg, int data) +{ + struct arswitch_softc *sc; + + sc = device_get_softc(dev); + + ARSWITCH_LOCK(sc); + (void) MDIO_WRITEREG(device_get_parent(dev), phy, + reg, data); + ARSWITCH_UNLOCK(sc); + + return (0); +} + +/* + * Access PHYs integrated into the switch chip through the switch's MDIO * control register. */ int -arswitch_readphy(device_t dev, int phy, int reg) +arswitch_readphy_internal(device_t dev, int phy, int reg) { struct arswitch_softc *sc; uint32_t data = 0, ctrl; @@ -105,8 +140,10 @@ arswitch_readphy(device_t dev, int phy, int reg) if ((ctrl & AR8X16_MDIO_CTRL_BUSY) == 0) break; } - if (timeout < 0) + if (timeout < 0) { + DPRINTF(dev, "arswitch_readphy(): phy=%d.%02x; timeout=%d\n", phy, reg, timeout); goto fail; + } data = arswitch_readreg_lsb(dev, a) & AR8X16_MDIO_CTRL_DATA_MASK; ARSWITCH_UNLOCK(sc); @@ -118,7 +155,7 @@ arswitch_readphy(device_t dev, int phy, int reg) } int -arswitch_writephy(device_t dev, int phy, int reg, int data) +arswitch_writephy_internal(device_t dev, int phy, int reg, int data) { struct arswitch_softc *sc; uint32_t ctrl; diff --git a/sys/dev/etherswitch/arswitch/arswitch_phy.h b/sys/dev/etherswitch/arswitch/arswitch_phy.h index a3b762746df1..885be2711279 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_phy.h +++ b/sys/dev/etherswitch/arswitch/arswitch_phy.h @@ -28,7 +28,10 @@ #ifndef __ARSWITCH_PHY_H__ #define __ARSWITCH_PHY_H__ -extern int arswitch_readphy(device_t dev, int phy, int reg); -extern int arswitch_writephy(device_t dev, int phy, int reg, int data); +extern int arswitch_readphy_external(device_t dev, int phy, int reg); +extern int arswitch_writephy_external(device_t dev, int phy, int reg, int data); + +extern int arswitch_readphy_internal(device_t dev, int phy, int reg); +extern int arswitch_writephy_internal(device_t dev, int phy, int reg, int data); #endif /* __ARSWITCH_PHY_H__ */ diff --git a/sys/dev/etherswitch/arswitch/arswitch_reg.c b/sys/dev/etherswitch/arswitch/arswitch_reg.c index 3251da8a2eec..d6797a8c572f 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_reg.c +++ b/sys/dev/etherswitch/arswitch/arswitch_reg.c @@ -68,12 +68,13 @@ arswitch_split_setpage(device_t dev, uint32_t addr, uint16_t *phy, struct arswitch_softc *sc = device_get_softc(dev); uint16_t page; - page = ((addr) >> 9) & 0xffff; - *phy = (((addr) >> 6) & 0x07) | 0x10; - *reg = ((addr) >> 1) & 0x1f; + page = (addr >> 9) & 0x1ff; + *phy = (addr >> 6) & 0x7; + *reg = (addr >> 1) & 0x1f; if (sc->page != page) { MDIO_WRITEREG(device_get_parent(dev), 0x18, 0, page); + DELAY(2000); sc->page = page; } } @@ -87,9 +88,21 @@ static inline int arswitch_readreg16(device_t dev, int addr) { uint16_t phy, reg; - + arswitch_split_setpage(dev, addr, &phy, ®); - return (MDIO_READREG(device_get_parent(dev), phy, reg)); + return (MDIO_READREG(device_get_parent(dev), 0x10 | phy, reg)); +} + +/* + * Write half a register. See above! + */ +static inline int +arswitch_writereg16(device_t dev, int addr, int data) +{ + uint16_t phy, reg; + + arswitch_split_setpage(dev, addr, &phy, ®); + return (MDIO_WRITEREG(device_get_parent(dev), 0x10 | phy, reg, data)); } /* @@ -121,18 +134,70 @@ arswitch_writemmd(device_t dev, int phy, uint16_t dbg_addr, MII_ATH_MMD_DATA, dbg_data); } -/* - * Write half a register - */ -static inline int -arswitch_writereg16(device_t dev, int addr, int data) +static uint32_t +arswitch_reg_read32(device_t dev, int phy, int reg) { - uint16_t phy, reg; - - arswitch_split_setpage(dev, addr, &phy, ®); - return (MDIO_WRITEREG(device_get_parent(dev), phy, reg, data)); + uint16_t lo, hi; + lo = MDIO_READREG(device_get_parent(dev), phy, reg); + hi = MDIO_READREG(device_get_parent(dev), phy, reg + 1); + + return (hi << 16) | lo; } +static int +arswitch_reg_write32(device_t dev, int phy, int reg, uint32_t value) +{ + struct arswitch_softc *sc; + int r; + uint16_t lo, hi; + + sc = device_get_softc(dev); + lo = value & 0xffff; + hi = (uint16_t) (value >> 16); + + if (sc->mii_lo_first) { + r = MDIO_WRITEREG(device_get_parent(dev), + phy, reg, lo); + r |= MDIO_WRITEREG(device_get_parent(dev), + phy, reg + 1, hi); + } else { + r = MDIO_WRITEREG(device_get_parent(dev), + phy, reg + 1, hi); + r |= MDIO_WRITEREG(device_get_parent(dev), + phy, reg, lo); + } + + return r; +} + +int +arswitch_readreg(device_t dev, int addr) +{ + uint16_t phy, reg; + + arswitch_split_setpage(dev, addr, &phy, ®); + return arswitch_reg_read32(dev, 0x10 | phy, reg); +} + +int +arswitch_writereg(device_t dev, int addr, int value) +{ + struct arswitch_softc *sc; + uint16_t phy, reg; + + sc = device_get_softc(dev); + + arswitch_split_setpage(dev, addr, &phy, ®); + return (arswitch_reg_write32(dev, 0x10 | phy, reg, value)); +} + +/* + * Read/write 16 bit values in the switch register space. + * + * Some of the registers are control registers (eg the MDIO + * data versus control space) and so need to be treated + * differently. + */ int arswitch_readreg_lsb(device_t dev, int addr) { @@ -161,53 +226,31 @@ arswitch_writereg_msb(device_t dev, int addr, int data) return (arswitch_writereg16(dev, addr + 2, (data >> 16) & 0xffff)); } -int -arswitch_readreg(device_t dev, int addr) -{ - - return (arswitch_readreg_lsb(dev, addr) | - arswitch_readreg_msb(dev, addr)); -} - -int -arswitch_writereg(device_t dev, int addr, int value) -{ - struct arswitch_softc *sc; - int r; - - sc = device_get_softc(dev); - - /* XXX Check the first write too? */ - if (sc->mii_lo_first) { - r = arswitch_writereg_lsb(dev, addr, value); - r |= arswitch_writereg_msb(dev, addr, value); - } else { - r = arswitch_writereg_msb(dev, addr, value); - r |= arswitch_writereg_lsb(dev, addr, value); - } - - return r; -} - int arswitch_modifyreg(device_t dev, int addr, int mask, int set) { int value; - - value = arswitch_readreg(dev, addr); + uint16_t phy, reg; + + arswitch_split_setpage(dev, addr, &phy, ®); + + value = arswitch_reg_read32(dev, 0x10 | phy, reg); value &= ~mask; value |= set; - return (arswitch_writereg(dev, addr, value)); + return (arswitch_reg_write32(dev, 0x10 | phy, reg, value)); } int arswitch_waitreg(device_t dev, int addr, int mask, int val, int timeout) { int err, v; + uint16_t phy, reg; + + arswitch_split_setpage(dev, addr, &phy, ®); err = -1; while (1) { - v = arswitch_readreg(dev, addr); + v = arswitch_reg_read32(dev, 0x10 | phy, reg); v &= mask; if (v == val) { err = 0; diff --git a/sys/dev/etherswitch/arswitch/arswitch_vlans.c b/sys/dev/etherswitch/arswitch/arswitch_vlans.c index 5e24e57fa13f..4cccde87a691 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_vlans.c +++ b/sys/dev/etherswitch/arswitch/arswitch_vlans.c @@ -53,11 +53,10 @@ /* * XXX TODO: teach about the AR933x SoC switch * XXX TODO: teach about the AR934x SoC switch - * XXX TODO: teach about the AR8327 external switch */ static int -arswitch_vlan_op(struct arswitch_softc *sc, uint32_t op, uint32_t vid, +ar8xxx_vlan_op(struct arswitch_softc *sc, uint32_t op, uint32_t vid, uint32_t data) { int err; @@ -87,30 +86,30 @@ arswitch_vlan_op(struct arswitch_softc *sc, uint32_t op, uint32_t vid, return (0); } -static int -arswitch_flush_dot1q_vlan(struct arswitch_softc *sc) +int +ar8xxx_flush_dot1q_vlan(struct arswitch_softc *sc) { ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); - return (arswitch_vlan_op(sc, AR8X16_VLAN_OP_FLUSH, 0, 0)); + return (ar8xxx_vlan_op(sc, AR8X16_VLAN_OP_FLUSH, 0, 0)); } -static int -arswitch_purge_dot1q_vlan(struct arswitch_softc *sc, int vid) +int +ar8xxx_purge_dot1q_vlan(struct arswitch_softc *sc, int vid) { ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); - return (arswitch_vlan_op(sc, AR8X16_VLAN_OP_PURGE, vid, 0)); + return (ar8xxx_vlan_op(sc, AR8X16_VLAN_OP_PURGE, vid, 0)); } -static int -arswitch_get_dot1q_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) +int +ar8xxx_get_dot1q_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) { uint32_t reg; int err; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); - err = arswitch_vlan_op(sc, AR8X16_VLAN_OP_GET, vid, 0); + err = ar8xxx_vlan_op(sc, AR8X16_VLAN_OP_GET, vid, 0); if (err) return (err); @@ -124,20 +123,20 @@ arswitch_get_dot1q_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) return (0); } -static int -arswitch_set_dot1q_vlan(struct arswitch_softc *sc, uint32_t ports, int vid) +int +ar8xxx_set_dot1q_vlan(struct arswitch_softc *sc, uint32_t ports, int vid) { int err; ARSWITCH_LOCK_ASSERT(sc, MA_OWNED); - err = arswitch_vlan_op(sc, AR8X16_VLAN_OP_LOAD, vid, ports); + err = ar8xxx_vlan_op(sc, AR8X16_VLAN_OP_LOAD, vid, ports); if (err) return (err); return (0); } -static int -arswitch_get_port_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) +int +ar8xxx_get_port_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) { int port; uint32_t reg; @@ -151,8 +150,8 @@ arswitch_get_port_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid) return (0); } -static int -arswitch_set_port_vlan(struct arswitch_softc *sc, uint32_t ports, int vid) +int +ar8xxx_set_port_vlan(struct arswitch_softc *sc, uint32_t ports, int vid) { int err, port; @@ -193,7 +192,7 @@ ar8xxx_reset_vlans(struct arswitch_softc *sc) } } - if (arswitch_flush_dot1q_vlan(sc)) { + if (sc->hal.arswitch_flush_dot1q_vlan(sc)) { ARSWITCH_UNLOCK(sc); return; } @@ -224,7 +223,7 @@ ar8xxx_reset_vlans(struct arswitch_softc *sc) ports = 0; for (i = 0; i <= sc->numphys; i++) ports |= (1 << i); - arswitch_set_dot1q_vlan(sc, ports, sc->vid[0]); + sc->hal.arswitch_set_dot1q_vlan(sc, ports, sc->vid[0]); sc->vid[0] |= ETHERSWITCH_VID_VALID; } else if (sc->vlan_mode == ETHERSWITCH_VLAN_PORT) { /* Initialize the port based vlans. */ @@ -286,11 +285,11 @@ ar8xxx_getvgroup(struct arswitch_softc *sc, etherswitch_vlangroup_t *vg) /* Member Ports. */ switch (sc->vlan_mode) { case ETHERSWITCH_VLAN_DOT1Q: - err = arswitch_get_dot1q_vlan(sc, &vg->es_member_ports, + err = sc->hal.arswitch_get_dot1q_vlan(sc, &vg->es_member_ports, vg->es_vid); break; case ETHERSWITCH_VLAN_PORT: - err = arswitch_get_port_vlan(sc, &vg->es_member_ports, + err = sc->hal.arswitch_get_port_vlan(sc, &vg->es_member_ports, vg->es_vid); break; default: @@ -323,7 +322,7 @@ ar8xxx_setvgroup(struct arswitch_softc *sc, etherswitch_vlangroup_t *vg) (vid & ETHERSWITCH_VID_VALID) != 0 && (vid & ETHERSWITCH_VID_MASK) != (vg->es_vid & ETHERSWITCH_VID_MASK)) { - err = arswitch_purge_dot1q_vlan(sc, vid); + err = sc->hal.arswitch_purge_dot1q_vlan(sc, vid); if (err) { ARSWITCH_UNLOCK(sc); return (err); @@ -345,10 +344,10 @@ ar8xxx_setvgroup(struct arswitch_softc *sc, etherswitch_vlangroup_t *vg) /* Member Ports. */ switch (sc->vlan_mode) { case ETHERSWITCH_VLAN_DOT1Q: - err = arswitch_set_dot1q_vlan(sc, vg->es_member_ports, vid); + err = sc->hal.arswitch_set_dot1q_vlan(sc, vg->es_member_ports, vid); break; case ETHERSWITCH_VLAN_PORT: - err = arswitch_set_port_vlan(sc, vg->es_member_ports, vid); + err = sc->hal.arswitch_set_port_vlan(sc, vg->es_member_ports, vid); break; default: err = -1; diff --git a/sys/dev/etherswitch/arswitch/arswitch_vlans.h b/sys/dev/etherswitch/arswitch/arswitch_vlans.h index 0cd1af832007..79853bd8fee7 100644 --- a/sys/dev/etherswitch/arswitch/arswitch_vlans.h +++ b/sys/dev/etherswitch/arswitch/arswitch_vlans.h @@ -35,4 +35,11 @@ int ar8xxx_setvgroup(struct arswitch_softc *, etherswitch_vlangroup_t *); int ar8xxx_get_pvid(struct arswitch_softc *, int, int *); int ar8xxx_set_pvid(struct arswitch_softc *, int, int); +int ar8xxx_flush_dot1q_vlan(struct arswitch_softc *sc); +int ar8xxx_purge_dot1q_vlan(struct arswitch_softc *sc, int vid); +int ar8xxx_get_dot1q_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid); +int ar8xxx_set_dot1q_vlan(struct arswitch_softc *sc, uint32_t ports, int vid); +int ar8xxx_get_port_vlan(struct arswitch_softc *sc, uint32_t *ports, int vid); +int ar8xxx_set_port_vlan(struct arswitch_softc *sc, uint32_t ports, int vid); + #endif /* __ARSWITCH_VLANS_H__ */ diff --git a/sys/dev/etherswitch/arswitch/arswitchreg.h b/sys/dev/etherswitch/arswitch/arswitchreg.h index b341c2859988..80f95b6502ab 100644 --- a/sys/dev/etherswitch/arswitch/arswitchreg.h +++ b/sys/dev/etherswitch/arswitch/arswitchreg.h @@ -370,6 +370,9 @@ #define AR8327_NUM_PHYS 5 #define AR8327_PORTS_ALL 0x7f +#define AR8327_PORT_GMAC0 0 +#define AR8327_PORT_GMAC6 6 + #define AR8327_REG_MASK 0x000 #define AR8327_REG_PAD0_MODE 0x004 diff --git a/sys/dev/etherswitch/arswitch/arswitchvar.h b/sys/dev/etherswitch/arswitch/arswitchvar.h index e533e87e8ade..d9b4aaeb567d 100644 --- a/sys/dev/etherswitch/arswitch/arswitchvar.h +++ b/sys/dev/etherswitch/arswitch/arswitchvar.h @@ -60,7 +60,7 @@ struct arswitch_softc { int is_internal_switch; int chip_ver; int chip_rev; - int mii_lo_first; + int mii_lo_first; /* Send low data DWORD before high */ ar8x16_switch_type sc_switchtype; /* should be the max of both pre-AR8327 and AR8327 ports */ char *ifname[ARSWITCH_NUM_PHYS]; @@ -98,6 +98,22 @@ struct arswitch_softc { int *); int (* arswitch_vlan_set_pvid) (struct arswitch_softc *, int, int); + + int (* arswitch_flush_dot1q_vlan) (struct arswitch_softc *sc); + int (* arswitch_purge_dot1q_vlan) (struct arswitch_softc *sc, + int vid); + int (* arswitch_get_dot1q_vlan) (struct arswitch_softc *, + uint32_t *ports, int vid); + int (* arswitch_set_dot1q_vlan) (struct arswitch_softc *sc, + uint32_t ports, int vid); + int (* arswitch_get_port_vlan) (struct arswitch_softc *sc, + uint32_t *ports, int vid); + int (* arswitch_set_port_vlan) (struct arswitch_softc *sc, + uint32_t ports, int vid); + + /* PHY functions */ + int (* arswitch_phy_read) (device_t, int, int); + int (* arswitch_phy_write) (device_t, int, int, int); } hal; struct { diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index 11a33d73e614..bf5632b42c80 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -185,9 +185,9 @@ gpiobus_init_softc(device_t dev) /* Pins = GPIO_PIN_MAX() + 1 */ sc->sc_npins++; - sc->sc_pins_mapped = malloc(sizeof(int) * sc->sc_npins, M_DEVBUF, + sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF, M_NOWAIT | M_ZERO); - if (sc->sc_pins_mapped == NULL) + if (sc->sc_pins == NULL) return (ENOMEM); /* Initialize the bus lock. */ @@ -242,11 +242,11 @@ gpiobus_map_pin(device_t bus, uint32_t pin) return (-1); } /* Mark pin as mapped and give warning if it's already mapped. */ - if (sc->sc_pins_mapped[pin]) { + if (sc->sc_pins[pin].mapped) { device_printf(bus, "warning: pin %d is already mapped\n", pin); return (-1); } - sc->sc_pins_mapped[pin] = 1; + sc->sc_pins[pin].mapped = 1; return (0); } @@ -281,6 +281,9 @@ gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask) return (EINVAL); } devi->pins[npins++] = i; + /* Use the child name as pin name. */ + GPIOBUS_PIN_SETNAME(sc->sc_busdev, i, + device_get_nameunit(child)); } return (0); @@ -340,10 +343,14 @@ gpiobus_detach(device_t dev) gpiobus_free_ivars(devi); } free(devlist, M_TEMP); - - if (sc->sc_pins_mapped) { - free(sc->sc_pins_mapped, M_DEVBUF); - sc->sc_pins_mapped = NULL; + if (sc->sc_pins) { + for (i = 0; i < sc->sc_npins; i++) { + if (sc->sc_pins[i].name != NULL) + free(sc->sc_pins[i].name, M_DEVBUF); + sc->sc_pins[i].name = NULL; + } + free(sc->sc_pins, M_DEVBUF); + sc->sc_pins = NULL; } return (0); @@ -664,6 +671,43 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin) return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]); } +static int +gpiobus_pin_getname(device_t dev, uint32_t pin, char *name) +{ + struct gpiobus_softc *sc; + + sc = GPIOBUS_SOFTC(dev); + if (pin > sc->sc_npins) + return (EINVAL); + /* Did we have a name for this pin ? */ + if (sc->sc_pins[pin].name != NULL) { + memcpy(name, sc->sc_pins[pin].name, GPIOMAXNAME); + return (0); + } + + /* Return the default pin name. */ + return (GPIO_PIN_GETNAME(device_get_parent(dev), pin, name)); +} + +static int +gpiobus_pin_setname(device_t dev, uint32_t pin, const char *name) +{ + struct gpiobus_softc *sc; + + sc = GPIOBUS_SOFTC(dev); + if (pin > sc->sc_npins) + return (EINVAL); + if (name == NULL) + return (EINVAL); + /* Save the pin name. */ + if (sc->sc_pins[pin].name == NULL) + sc->sc_pins[pin].name = malloc(GPIOMAXNAME, M_DEVBUF, + M_WAITOK | M_ZERO); + strlcpy(sc->sc_pins[pin].name, name, GPIOMAXNAME); + + return (0); +} + static device_method_t gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gpiobus_probe), @@ -699,6 +743,8 @@ static device_method_t gpiobus_methods[] = { DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get), DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set), DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle), + DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname), + DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname), DEVMETHOD_END }; diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m index 242502005428..ee926d5b96f0 100644 --- a/sys/dev/gpio/gpiobus_if.m +++ b/sys/dev/gpio/gpiobus_if.m @@ -106,3 +106,21 @@ METHOD int pin_setflags { uint32_t pin_num; uint32_t flags; }; + +# +# Get the pin name +# +METHOD int pin_getname { + device_t dev; + uint32_t pin_num; + char *name; +}; + +# +# Set the pin name +# +METHOD int pin_setname { + device_t dev; + uint32_t pin_num; + const char *name; +}; diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h index 315709a448ae..6a614cf8a6af 100644 --- a/sys/dev/gpio/gpiobusvar.h +++ b/sys/dev/gpio/gpiobusvar.h @@ -60,6 +60,12 @@ #define GPIOBUS_WAIT 1 #define GPIOBUS_DONTWAIT 2 +struct gpiobus_pin_data +{ + int mapped; /* pin is mapped/reserved. */ + char *name; /* pin name. */ +}; + struct gpiobus_softc { struct mtx sc_mtx; /* bus mutex */ @@ -68,7 +74,7 @@ struct gpiobus_softc device_t sc_owner; /* bus owner */ device_t sc_dev; /* driver device */ int sc_npins; /* total pins on bus */ - int *sc_pins_mapped; /* mark mapped pins */ + struct gpiobus_pin_data *sc_pins; /* pin data */ }; struct gpiobus_pin diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c index 2fad4dffb286..708a3c2043a5 100644 --- a/sys/dev/gpio/gpioc.c +++ b/sys/dev/gpio/gpioc.c @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include #include "gpio_if.h" +#include "gpiobus_if.h" #undef GPIOC_DEBUG #ifdef GPIOC_DEBUG @@ -112,12 +113,16 @@ static int gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, struct thread *td) { + device_t bus; int max_pin, res; struct gpioc_softc *sc = cdev->si_drv1; struct gpio_pin pin; struct gpio_req req; uint32_t caps; + bus = GPIO_GET_BUS(sc->sc_pdev); + if (bus == NULL) + return (EINVAL); switch (cmd) { case GPIOMAXPIN: max_pin = -1; @@ -133,7 +138,7 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, if (res) break; GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps); - GPIO_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name); + GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name); bcopy(&pin, arg, sizeof(pin)); break; case GPIOSETCONFIG: @@ -167,6 +172,12 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag, req.gp_pin); res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin); break; + case GPIOSETNAME: + bcopy(arg, &pin, sizeof(pin)); + dprintf("set name on pin %d\n", pin.gp_pin); + res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin, + pin.gp_name); + break; default: return (ENOTTY); break; diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c index da72d4dfed20..368528de18f0 100644 --- a/sys/dev/gpio/ofw_gpiobus.c +++ b/sys/dev/gpio/ofw_gpiobus.c @@ -39,6 +39,8 @@ __FBSDID("$FreeBSD$"); #include #include +#include "gpiobus_if.h" + static struct ofw_gpiobus_devinfo *ofw_gpiobus_setup_devinfo(device_t, device_t, phandle_t); static void ofw_gpiobus_destroy_devinfo(device_t, struct ofw_gpiobus_devinfo *); @@ -49,6 +51,8 @@ device_t ofw_gpiobus_add_fdt_child(device_t bus, const char *drvname, phandle_t child) { device_t childdev; + int i; + struct gpiobus_ivar *devi; struct ofw_gpiobus_devinfo *dinfo; /* @@ -67,6 +71,11 @@ ofw_gpiobus_add_fdt_child(device_t bus, const char *drvname, phandle_t child) device_delete_child(bus, childdev); return (NULL); } + /* Use the child name as pin name. */ + devi = &dinfo->opd_dinfo; + for (i = 0; i < devi->npins; i++) + GPIOBUS_PIN_SETNAME(bus, devi->pins[i], + device_get_nameunit(childdev)); return (childdev); } @@ -159,7 +168,7 @@ ofw_gpiobus_destroy_devinfo(device_t bus, struct ofw_gpiobus_devinfo *dinfo) for (i = 0; i < devi->npins; i++) { if (devi->pins[i] > sc->sc_npins) continue; - sc->sc_pins_mapped[devi->pins[i]] = 0; + sc->sc_pins[devi->pins[i]].mapped = 0; } gpiobus_free_ivars(devi); resource_list_free(&dinfo->opd_dinfo.rl); diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c index 6c241422d9cd..50d51c7f7fe7 100644 --- a/sys/dev/hwpmc/hwpmc_core.c +++ b/sys/dev/hwpmc/hwpmc_core.c @@ -1126,6 +1126,10 @@ static struct iap_event_descr iap_events[] = { IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(79H_18H, 0x79, 0x18, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(79H_24H, 0x79, 0x24, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | + IAP_F_HWX), + IAPDESCR(79H_3CH, 0x79, 0x3C, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | + IAP_F_HWX), IAPDESCR(7AH, 0x7A, IAP_M_AGENT, IAP_F_CA | IAP_F_CC2), @@ -1188,46 +1192,57 @@ static struct iap_event_descr iap_events[] = { IAPDESCR(87H_0FH, 0x87, 0x0F, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(88H_00H, 0x88, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2), - IAPDESCR(88H_01H, 0x88, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(88H_02H, 0x88, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(88H_04H, 0x88, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_01H, 0x88, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(88H_02H, 0x88, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(88H_04H, 0x88, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(88H_07H, 0x88, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(88H_08H, 0x88, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(88H_10H, 0x88, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(88H_20H, 0x88, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_08H, 0x88, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(88H_10H, 0x88, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(88H_20H, 0x88, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(88H_30H, 0x88, 0x30, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(88H_40H, 0x88, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_40H, 0x88, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(88H_7FH, 0x88, 0x7F, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(88H_80H, 0x88, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAPDESCR(88H_41H, 0x88, 0x41, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_81H, 0x88, 0x81, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_82H, 0x88, 0x82, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_84H, 0x88, 0x84, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_88H, 0x88, 0x88, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_90H, 0x88, 0x90, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(88H_A0H, 0x88, 0xA0, IAP_F_FM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(88H_FFH, 0x88, 0xFF, IAP_F_FM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(89H_00H, 0x89, 0x00, IAP_F_FM | IAP_F_ALLCPUSCORE2), - IAPDESCR(89H_01H, 0x89, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_01H, 0x89, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(89H_02H, 0x89, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(89H_04H, 0x89, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_04H, 0x89, 0x04, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(89H_07H, 0x89, 0x07, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(89H_08H, 0x89, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(89H_10H, 0x89, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(89H_20H, 0x89, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_08H, 0x89, 0x08, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(89H_10H, 0x89, 0x10, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(89H_20H, 0x89, 0x20, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(89H_30H, 0x89, 0x30, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(89H_40H, 0x89, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM | - IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_40H, 0x89, 0x40, IAP_F_FM | IAP_F_I7 | IAP_F_WM), IAPDESCR(89H_7FH, 0x89, 0x7F, IAP_F_FM | IAP_F_I7 | IAP_F_WM), - IAPDESCR(89H_80H, 0x89, 0x80, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAPDESCR(89H_41H, 0x89, 0x41, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_81H, 0x89, 0x81, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_82H, 0x89, 0x82, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_84H, 0x89, 0x84, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_88H, 0x89, 0x88, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_90H, 0x89, 0x90, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(89H_A0H, 0x89, 0xA0, IAP_F_FM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(89H_FFH, 0x89, 0xFF, IAP_F_FM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), @@ -1261,7 +1276,8 @@ static struct iap_event_descr iap_events[] = { IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(A1H_08H, 0xA1, 0x08, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | /* No desc in IB for this*/ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(A1H_0CH, 0xA1, 0x0C, IAP_F_IB | IAP_F_IBX), + IAPDESCR(A1H_0CH, 0xA1, 0x0C, IAP_F_FM | IAP_F_SB | IAP_F_IB | + IAP_F_SBX | IAP_F_IBX), IAPDESCR(A1H_10H, 0xA1, 0x10, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | /* No desc in IB for this*/ IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(A1H_20H, 0xA1, 0x20, IAP_F_FM | IAP_F_CA | IAP_F_CC2 | /* No desc in IB for this*/ @@ -1586,29 +1602,21 @@ static struct iap_event_descr iap_events[] = { /* Sandy Bridge / Sandy Bridge Xeon - 11, 12, 21, 41, 42, 81, 82 */ IAPDESCR(D0H_00H, 0xD0, 0x00, IAP_F_FM | IAP_F_CC), - IAPDESCR(D0H_01H, 0xD0, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM | IAP_F_IB | - IAP_F_IBX | IAP_F_HW | IAP_F_HWX), - IAPDESCR(D0H_02H, 0xD0, 0x02, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | - IAP_F_HWX), - IAPDESCR(D0H_10H, 0xD0, 0x10, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | - IAP_F_HWX), - IAPDESCR(D0H_11H, 0xD0, 0x11, IAP_F_FM | IAP_F_SB | IAP_F_SBX), - IAPDESCR(D0H_12H, 0xD0, 0x12, IAP_F_FM | IAP_F_SB | IAP_F_SBX), - IAPDESCR(D0H_20H, 0xD0, 0x20, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | - IAP_F_HWX), + IAPDESCR(D0H_01H, 0xD0, 0x01, IAP_F_FM | IAP_F_I7 | IAP_F_WM), + IAPDESCR(D0H_11H, 0xD0, 0x11, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(D0H_12H, 0xD0, 0x12, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(D0H_21H, 0xD0, 0x21, IAP_F_FM | IAP_F_SB | IAP_F_SBX), - IAPDESCR(D0H_40H, 0xD0, 0x40, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | - IAP_F_HWX), - IAPDESCR(D0H_41H, 0xD0, 0x41, IAP_F_FM | IAP_F_SB | IAP_F_SBX | - IAP_F_IB | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), /* Not in spec but in linux and Vtune guide */ - IAPDESCR(D0H_42H, 0xD0, 0x42, IAP_F_FM | IAP_F_SB | IAP_F_SBX | - IAP_F_IB | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), /* Not in spec but in linux and Vtune guide */ - IAPDESCR(D0H_80H, 0xD0, 0x80, IAP_F_FM | IAP_F_IB | IAP_F_IBX | IAP_F_HW | - IAP_F_HWX), - IAPDESCR(D0H_81H, 0xD0, 0x81, IAP_F_FM | IAP_F_SB | IAP_F_SBX | - IAP_F_IB | IAP_F_IBX), /* Not in spec but in linux and Vtune guide */ - IAPDESCR(D0H_82H, 0xD0, 0x82, IAP_F_FM | IAP_F_SB | IAP_F_SBX | - IAP_F_IB | IAP_F_IBX), /* Not in spec but in linux and Vtune guide */ + IAPDESCR(D0H_41H, 0xD0, 0x41, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(D0H_42H, 0xD0, 0x42, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(D0H_81H, 0xD0, 0x81, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(D0H_82H, 0xD0, 0x82, IAP_F_FM | IAP_F_SB | IAP_F_SBX | IAP_F_IB | + IAP_F_IBX | IAP_F_HW | IAP_F_HWX), + IAPDESCR(D1H_01H, 0xD1, 0x01, IAP_F_FM | IAP_F_WM | IAP_F_SB | IAP_F_IB | IAP_F_SBX | IAP_F_IBX | IAP_F_HW | IAP_F_HWX), IAPDESCR(D1H_02H, 0xD1, 0x02, IAP_F_FM | IAP_F_I7 | IAP_F_WM | @@ -1965,15 +1973,15 @@ iap_event_sb_sbx_ib_ibx_ok_on_counter(enum pmc_event pe, int ri) break; /* Events valid only on counter 1. */ case PMC_EV_IAP_EVENT_C0H_01H: - mask = 0x1; + mask = 0x2; break; /* Events valid only on counter 2. */ case PMC_EV_IAP_EVENT_48H_01H: case PMC_EV_IAP_EVENT_A2H_02H: + case PMC_EV_IAP_EVENT_A3H_08H: mask = 0x4; break; /* Events valid only on counter 3. */ - case PMC_EV_IAP_EVENT_A3H_08H: case PMC_EV_IAP_EVENT_BBH_01H: case PMC_EV_IAP_EVENT_CDH_01H: case PMC_EV_IAP_EVENT_CDH_02H: diff --git a/sys/dev/hwpmc/pmc_events.h b/sys/dev/hwpmc/pmc_events.h index 5ecfd81b72da..400a39401fab 100644 --- a/sys/dev/hwpmc/pmc_events.h +++ b/sys/dev/hwpmc/pmc_events.h @@ -856,7 +856,14 @@ __PMC_EV(IAP, EVENT_88H_10H) \ __PMC_EV(IAP, EVENT_88H_20H) \ __PMC_EV(IAP, EVENT_88H_30H) \ __PMC_EV(IAP, EVENT_88H_40H) \ +__PMC_EV(IAP, EVENT_88H_41H) \ __PMC_EV(IAP, EVENT_88H_80H) \ +__PMC_EV(IAP, EVENT_88H_81H) \ +__PMC_EV(IAP, EVENT_88H_82H) \ +__PMC_EV(IAP, EVENT_88H_84H) \ +__PMC_EV(IAP, EVENT_88H_88H) \ +__PMC_EV(IAP, EVENT_88H_90H) \ +__PMC_EV(IAP, EVENT_88H_A0H) \ __PMC_EV(IAP, EVENT_88H_7FH) \ __PMC_EV(IAP, EVENT_88H_FFH) \ __PMC_EV(IAP, EVENT_89H_00H) \ @@ -869,7 +876,14 @@ __PMC_EV(IAP, EVENT_89H_10H) \ __PMC_EV(IAP, EVENT_89H_20H) \ __PMC_EV(IAP, EVENT_89H_30H) \ __PMC_EV(IAP, EVENT_89H_40H) \ +__PMC_EV(IAP, EVENT_89H_41H) \ __PMC_EV(IAP, EVENT_89H_80H) \ +__PMC_EV(IAP, EVENT_89H_81H) \ +__PMC_EV(IAP, EVENT_89H_82H) \ +__PMC_EV(IAP, EVENT_89H_84H) \ +__PMC_EV(IAP, EVENT_89H_88H) \ +__PMC_EV(IAP, EVENT_89H_90H) \ +__PMC_EV(IAP, EVENT_89H_A0H) \ __PMC_EV(IAP, EVENT_89H_7FH) \ __PMC_EV(IAP, EVENT_89H_FFH) \ __PMC_EV(IAP, EVENT_8AH_00H) \ @@ -2578,6 +2592,7 @@ __PMC_EV_ALIAS("SIMD_INT_64.SHUFFLE_MOVE", IAP_EVENT_FDH_40H) * Aliases for Haswell core PMC events */ #define __PMC_EV_ALIAS_HASWELL_XEON() \ +__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \ __PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \ @@ -2585,7 +2600,7 @@ __PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_08H_01H)\ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_4K", IAP_EVENT_08H_02H) \ -__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4K", \ +__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", \ IAP_EVENT_08H_04H) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_0EH) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_DURATION", IAP_EVENT_08H_10H) \ @@ -2641,8 +2656,8 @@ __PMC_EV_ALIAS("MOVE_ELIMINATION.SMID_NOT_ELIMINATED", \ IAP_EVENT_58H_08H) \ __PMC_EV_ALIAS("MOVE_ELIMINATION.INT_ELIMINATED", IAP_EVENT_58H_01H) \ __PMC_EV_ALIAS("MOVE_ELIMINATION.SMID_ELIMINATED", IAP_EVENT_58H_02H) \ -__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_02H) \ -__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_01H) \ +__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \ +__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \ __PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \ __PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", \ IAP_EVENT_60H_01H) \ @@ -2677,24 +2692,22 @@ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT_2M", IAP_EVENT_85H_40H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_60H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ __PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_EXECUTED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ @@ -2762,14 +2775,12 @@ __PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \ __PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \ __PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_LOADS", IAP_EVENT_D0H_11H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_STORES", IAP_EVENT_D0H_12H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \ @@ -2807,6 +2818,7 @@ __PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_06H) #define __PMC_EV_ALIAS_HASWELL() \ +__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \ __PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \ @@ -2814,7 +2826,7 @@ __PMC_EV_ALIAS("MISALIGN_MEM_REF.STORES", IAP_EVENT_05H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS_PARTIAL.ADDRESS_ALIAS", IAP_EVENT_07H_01H) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK", IAP_EVENT_08H_01H)\ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_4K", IAP_EVENT_08H_02H) \ -__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4K", \ +__PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M", \ IAP_EVENT_08H_04H) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_COMPLETED", IAP_EVENT_08H_0EH) \ __PMC_EV_ALIAS("DTLB_LOAD_MISSES.WALK_DURATION", IAP_EVENT_08H_10H) \ @@ -2870,8 +2882,8 @@ __PMC_EV_ALIAS("MOVE_ELIMINATION.SMID_NOT_ELIMINATED", \ IAP_EVENT_58H_08H) \ __PMC_EV_ALIAS("MOVE_ELIMINATION.INT_ELIMINATED", IAP_EVENT_58H_01H) \ __PMC_EV_ALIAS("MOVE_ELIMINATION.SMID_ELIMINATED", IAP_EVENT_58H_02H) \ -__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_02H) \ -__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_01H) \ +__PMC_EV_ALIAS("CPL_CYCLES.RING0", IAP_EVENT_5CH_01H) \ +__PMC_EV_ALIAS("CPL_CYCLES.RING123", IAP_EVENT_5CH_02H) \ __PMC_EV_ALIAS("RS_EVENTS.EMPTY_CYCLES", IAP_EVENT_5EH_01H) \ __PMC_EV_ALIAS("OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD", \ IAP_EVENT_60H_01H) \ @@ -2906,24 +2918,22 @@ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT_2M", IAP_EVENT_85H_40H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_60H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ __PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_EXECUTED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ @@ -2991,14 +3001,12 @@ __PMC_EV_ALIAS("FP_ASSIST.SIMD_INPUT", IAP_EVENT_CAH_10H) \ __PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \ __PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_LOADS", IAP_EVENT_D0H_11H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_STORES", IAP_EVENT_D0H_12H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \ @@ -3036,6 +3044,7 @@ __PMC_EV_ALIAS("L2_LINES_OUT.DEMAND_DIRTY", IAP_EVENT_F2H_06H) #define __PMC_EV_ALIAS_IVYBRIDGE() \ +__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \ __PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \ @@ -3132,24 +3141,22 @@ __PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ __PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ @@ -3218,15 +3225,12 @@ __PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \ __PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_LOADS", IAP_EVENT_D0H_11H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_STORES", IAP_EVENT_D0H_12H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \ @@ -3268,6 +3272,7 @@ __PMC_EV_ALIAS("L2_LINES_OUT.PF_DIRTY", IAP_EVENT_F2H_08H) * Aliases for Ivy Bridge Xeon PMC events (325462-045US January 2013) */ #define __PMC_EV_ALIAS_IVYBRIDGE_XEON() \ +__PMC_EV_ALIAS_INTEL_ARCHITECTURAL() \ __PMC_EV_ALIAS("LD_BLOCKS.STORE_FORWARD", IAP_EVENT_03H_02H) \ __PMC_EV_ALIAS("LD_BLOCKS.NO_SR", IAP_EVENT_03H_08H) \ __PMC_EV_ALIAS("MISALIGN_MEM_REF.LOADS", IAP_EVENT_05H_01H) \ @@ -3363,24 +3368,22 @@ __PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ __PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ @@ -3449,15 +3452,12 @@ __PMC_EV_ALIAS("FP_ASSIST.ANY", IAP_EVENT_CAH_1EH) \ __PMC_EV_ALIAS("ROB_MISC_EVENTS.LBR_INSERTS", IAP_EVENT_CCH_20H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.LOAD_LATENCY", IAP_EVENT_CDH_01H) \ __PMC_EV_ALIAS("MEM_TRANS_RETIRED.PRECISE_STORE", IAP_EVENT_CDH_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOADS", IAP_EVENT_D0H_01H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STORES", IAP_EVENT_D0H_02H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.STLB_MISS", IAP_EVENT_D0H_10H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.LOCK", IAP_EVENT_D0H_20H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT", IAP_EVENT_D0H_40H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL", IAP_EVENT_D0H_80H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ -__PMC_EV_ALIAS("MEM_UOP_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_LOADS", IAP_EVENT_D0H_11H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.STLB_MISS_STORES", IAP_EVENT_D0H_12H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_LOADS", IAP_EVENT_D0H_41H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.SPLIT_STORES", IAP_EVENT_D0H_42H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_LOADS", IAP_EVENT_D0H_81H) \ +__PMC_EV_ALIAS("MEM_UOPS_RETIRED.ALL_STORES", IAP_EVENT_D0H_82H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L1_HIT", IAP_EVENT_D1H_01H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.L2_HIT", IAP_EVENT_D1H_02H) \ __PMC_EV_ALIAS("MEM_LOAD_UOPS_RETIRED.LLC_HIT", IAP_EVENT_D1H_04H) \ @@ -3599,24 +3599,22 @@ __PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ -__PMC_EV_ALIAS("BR_INST_EXE.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ @@ -3829,25 +3827,23 @@ __PMC_EV_ALIAS("ITLB_MISSES.WALK_DURATION", IAP_EVENT_85H_04H) \ __PMC_EV_ALIAS("ITLB_MISSES.STLB_HIT", IAP_EVENT_85H_10H) \ __PMC_EV_ALIAS("ILD_STALL.LCP", IAP_EVENT_87H_01H) \ __PMC_EV_ALIAS("ILD_STALL.IQ_FULL", IAP_EVENT_87H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.COND", IAP_EVENT_88H_01H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_02H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN_COND", IAP_EVENT_88H_41H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN_COND", IAP_EVENT_88H_81H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_JMP", IAP_EVENT_88H_82H) \ __PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_88H_04H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_08H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_10H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_20H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.NONTAKEN", IAP_EVENT_88H_40H) \ -__PMC_EV_ALIAS("BR_INST_EXEC.TAKEN", IAP_EVENT_88H_80H) \ -__PMC_EV_ALIAS("BR_INST_EXE.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.COND", IAP_EVENT_89H_01H) \ + IAP_EVENT_88H_84H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.RETURN_NEAR", IAP_EVENT_88H_88H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_88H_90H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_88H_A0H) \ +__PMC_EV_ALIAS("BR_INST_EXEC.ALL_BRANCHES", IAP_EVENT_88H_FFH) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN_COND", IAP_EVENT_89H_41H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN_COND", IAP_EVENT_89H_81H) \ __PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_JMP_NON_CALL_RET", \ - IAP_EVENT_89H_04H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_08H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_10H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_20H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.NONTAKEN", IAP_EVENT_89H_40H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.TAKEN", IAP_EVENT_89H_80H) \ -__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ + IAP_EVENT_89H_84H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.RETURN_NEAR", IAP_EVENT_89H_88H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.DIRECT_NEAR_CALL", IAP_EVENT_89H_90H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.INDIRECT_NEAR_CALL", IAP_EVENT_89H_A0H) \ +__PMC_EV_ALIAS("BR_MISP_EXEC.ALL_BRANCHES", IAP_EVENT_89H_FFH) \ __PMC_EV_ALIAS("IDQ_UOPS_NOT_DELIVERED.CORE", IAP_EVENT_9CH_01H) \ __PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_0", IAP_EVENT_A1H_01H) \ __PMC_EV_ALIAS("UOPS_DISPATCHED_PORT.PORT_1", IAP_EVENT_A1H_02H) \ diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 5f45e66bd1ce..fca37ba0324a 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -736,7 +736,14 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) switch(cmd) { case SIOCSIFADDR: - case SIOCGIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) { + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + hn_ifinit(sc); + arp_ifinit(ifp, ifa); + } else +#endif error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: @@ -900,6 +907,7 @@ hn_stop(hn_softc_t *sc) printf(" Closing Device ...\n"); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + if_link_state_change(ifp, LINK_STATE_DOWN); sc->hn_initdone = 0; ret = hv_rf_on_close(device_ctx); @@ -949,6 +957,7 @@ hn_ifinit_locked(hn_softc_t *sc) } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if_link_state_change(ifp, LINK_STATE_UP); } /* diff --git a/sys/dev/iicbus/ds3231.c b/sys/dev/iicbus/ds3231.c index e6366bf3368e..690c92b05ee6 100644 --- a/sys/dev/iicbus/ds3231.c +++ b/sys/dev/iicbus/ds3231.c @@ -64,8 +64,6 @@ struct ds3231_softc { uint8_t sc_status; }; -static int ds3231_sqw_freq[] = { 1, 1024, 4096, 8192 }; - static void ds3231_start(void *); static int @@ -282,6 +280,7 @@ ds3231_bbsqw_sysctl(SYSCTL_HANDLER_ARGS) static int ds3231_sqw_freq_sysctl(SYSCTL_HANDLER_ARGS) { + int ds3231_sqw_freq[] = { 1, 1024, 4096, 8192 }; int error, freq, i, newf, tmp; struct ds3231_softc *sc; @@ -290,8 +289,8 @@ ds3231_sqw_freq_sysctl(SYSCTL_HANDLER_ARGS) if (error != 0) return (error); tmp = (sc->sc_ctrl & DS3231_CTRL_RS_MASK) >> DS3231_CTRL_RS_SHIFT; - if (tmp > nitems(ds3231_sqw_freq)) - tmp = nitems(ds3231_sqw_freq); + if (tmp >= nitems(ds3231_sqw_freq)) + tmp = nitems(ds3231_sqw_freq) - 1; freq = ds3231_sqw_freq[tmp]; error = sysctl_handle_int(oidp, &freq, 0, req); if (error != 0 || req->newptr == NULL) diff --git a/sys/dev/iicbus/lm75.c b/sys/dev/iicbus/lm75.c index 2b9b1a362508..cbcd8c44c48f 100644 --- a/sys/dev/iicbus/lm75.c +++ b/sys/dev/iicbus/lm75.c @@ -90,8 +90,6 @@ struct lm75_softc { uint32_t sc_conf; }; -static int lm75_faults[4] = { 1, 2, 4, 6 }; - /* Utility functions */ static int lm75_conf_read(struct lm75_softc *); static int lm75_conf_write(struct lm75_softc *); @@ -457,14 +455,15 @@ static int lm75_faults_sysctl(SYSCTL_HANDLER_ARGS) { device_t dev; + int lm75_faults[] = { 1, 2, 4, 6 }; int error, faults, i, newf, tmp; struct lm75_softc *sc; dev = (device_t)arg1; sc = device_get_softc(dev); tmp = (sc->sc_conf & LM75_CONF_FAULT) >> LM75_CONF_FSHIFT; - if (tmp > nitems(lm75_faults)) - tmp = nitems(lm75_faults); + if (tmp >= nitems(lm75_faults)) + tmp = nitems(lm75_faults) - 1; faults = lm75_faults[tmp]; error = sysctl_handle_int(oidp, &faults, 0, req); diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c index 28d1271c868c..db202fdbc5b4 100644 --- a/sys/dev/ixgbe/ixgbe.c +++ b/sys/dev/ixgbe/ixgbe.c @@ -1699,6 +1699,7 @@ static void ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; + struct ixgbe_hw *hw = &adapter->hw; INIT_DEBUGOUT("ixgbe_media_status: begin"); IXGBE_CORE_LOCK(adapter); @@ -1714,16 +1715,27 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) ifmr->ifm_status |= IFM_ACTIVE; - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: + /* + * Not all NIC are 1000baseSX as an example X540T. + * We must set properly the media based on NIC model. + */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: + if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ifmr->ifm_active |= IFM_100_TX | IFM_FDX; - break; - case IXGBE_LINK_SPEED_1GB_FULL: - ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; - break; - case IXGBE_LINK_SPEED_10GB_FULL: + else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) + ifmr->ifm_active |= IFM_1000_T | IFM_FDX; + else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ifmr->ifm_active |= adapter->optics | IFM_FDX; - break; + break; + default: + if (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) + ifmr->ifm_active |= IFM_100_TX | IFM_FDX; + else if (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) + ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; + else if (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) + ifmr->ifm_active |= adapter->optics | IFM_FDX; + break; } IXGBE_CORE_UNLOCK(adapter); @@ -1751,6 +1763,7 @@ ixgbe_media_change(struct ifnet * ifp) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_10G_T: case IFM_AUTO: adapter->hw.phy.autoneg_advertised = IXGBE_LINK_SPEED_100_FULL | diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h index 57b3ee1690ac..0d52883e70d7 100755 --- a/sys/dev/ixl/i40e_adminq_cmd.h +++ b/sys/dev/ixl/i40e_adminq_cmd.h @@ -42,7 +42,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0002 struct i40e_aq_desc { __le16 flags; @@ -140,7 +140,12 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_list_func_capabilities = 0x000A, i40e_aqc_opc_list_dev_capabilities = 0x000B, + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ i40e_aqc_opc_mac_address_read = 0x0107, i40e_aqc_opc_mac_address_write = 0x0108, @@ -265,6 +270,7 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -276,6 +282,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, i40e_aqc_opc_debug_read_reg = 0xFF03, i40e_aqc_opc_debug_write_reg = 0xFF04, i40e_aqc_opc_debug_modify_reg = 0xFF07, @@ -509,8 +517,7 @@ struct i40e_aqc_mac_address_read { #define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 #define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x1F0 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -533,9 +540,7 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 __le16 mac_sah; __le32 mac_sal; u8 reserved[8]; @@ -1071,7 +1076,6 @@ struct i40e_aqc_set_vsi_promiscuous_modes { __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -2066,12 +2070,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); #define I40E_AQC_CEE_PFC_STATUS_MASK (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT) #define I40E_AQC_CEE_APP_STATUS_SHIFT 0x8 #define I40E_AQC_CEE_APP_STATUS_MASK (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT) -#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8 -#define I40E_AQC_CEE_FCOE_STATUS_MASK (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT) -#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT 0xA -#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT) -#define I40E_AQC_CEE_FIP_STATUS_SHIFT 0x10 -#define I40E_AQC_CEE_FIP_STATUS_MASK (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT) struct i40e_aqc_get_cee_dcb_cfg_v1_resp { u8 reserved1; u8 oper_num_tc; diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c index f0f128e1ed4c..777ecef9343a 100755 --- a/sys/dev/ixl/i40e_common.c +++ b/sys/dev/ixl/i40e_common.c @@ -866,7 +866,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) return media; } -#define I40E_PF_RESET_WAIT_COUNT 110 +#define I40E_PF_RESET_WAIT_COUNT 200 /** * i40e_pf_reset - Reset the PF * @hw: pointer to the hardware structure @@ -1108,11 +1108,9 @@ u32 i40e_led_get(struct i40e_hw *hw) if (!gpio_val) continue; - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + /* ignore gpio LED src mode entries related to the activity LEDs */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: @@ -1156,11 +1154,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) if (!gpio_val) continue; - /* ignore gpio LED src mode entries related to the activity - * LEDs - */ - current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) - >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); + /* ignore gpio LED src mode entries related to the activity LEDs */ + current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> + I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); switch (current_mode) { case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: @@ -1529,6 +1525,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, return status; } + /** * i40e_aq_set_phy_int_mask * @hw: pointer to the hw struct @@ -2816,13 +2813,12 @@ enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, #define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 #define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 +#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1 #define I40E_DEV_FUNC_CAP_CEM 0xF2 #define I40E_DEV_FUNC_CAP_IWARP 0x51 #define I40E_DEV_FUNC_CAP_LED 0x61 #define I40E_DEV_FUNC_CAP_SDP 0x62 #define I40E_DEV_FUNC_CAP_MDIO 0x63 -#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 /** * i40e_parse_discover_capabilities @@ -2840,7 +2836,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, struct i40e_aqc_list_capabilities_element_resp *cap; u32 valid_functions, num_functions; u32 number, logical_id, phys_id; - u8 major_rev; struct i40e_hw_capabilities *p; u32 i = 0; u16 id; @@ -2859,7 +2854,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, number = LE32_TO_CPU(cap->number); logical_id = LE32_TO_CPU(cap->logical_id); phys_id = LE32_TO_CPU(cap->phys_id); - major_rev = cap->major_rev; switch (id) { case I40E_DEV_FUNC_CAP_SWITCH_MODE: @@ -2934,21 +2928,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, case I40E_DEV_FUNC_CAP_MSIX_VF: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_FLEX10: - if (major_rev == 1) { - if (number == 1) { - p->flex10_enable = TRUE; - p->flex10_capable = TRUE; - } - } else { - /* Capability revision >= 2 */ - if (number & 1) - p->flex10_enable = TRUE; - if (number & 2) - p->flex10_capable = TRUE; - } - p->flex10_mode = logical_id; - p->flex10_status = phys_id; + case I40E_DEV_FUNC_CAP_MFP_MODE_1: + if (number == 1) + p->mfp_mode_1 = TRUE; break; case I40E_DEV_FUNC_CAP_CEM: if (number == 1) @@ -2981,18 +2963,11 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; - case I40E_DEV_FUNC_CAP_WR_CSR_PROT: - p->wr_csr_prot = (u64)number; - p->wr_csr_prot |= (u64)logical_id << 32; - break; default: break; } } - if (p->fcoe) - i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); - /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ p->fcoe = FALSE; @@ -4947,63 +4922,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) } } -/** - * i40e_aq_debug_dump - * @hw: pointer to the hardware structure - * @cluster_id: specific cluster to dump - * @table_id: table id within cluster - * @start_index: index of line in the block to read - * @buff_size: dump buffer size - * @buff: dump buffer - * @ret_buff_size: actual buffer size returned - * @ret_next_table: next block to read - * @ret_next_index: next index to read - * - * Dump internal FW/HW data for debug purposes. - * - **/ -enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_debug_dump_internals *cmd = - (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; - struct i40e_aqc_debug_dump_internals *resp = - (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; - enum i40e_status_code status; - - if (buff_size == 0 || !buff) - return I40E_ERR_PARAM; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_debug_dump_internals); - /* Indirect Command */ - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - - cmd->cluster_id = cluster_id; - cmd->table_id = table_id; - cmd->idx = CPU_TO_LE32(start_index); - - desc.datalen = CPU_TO_LE16(buff_size); - - status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - if (ret_buff_size != NULL) - *ret_buff_size = LE16_TO_CPU(desc.datalen); - if (ret_next_table != NULL) - *ret_next_table = resp->table_id; - if (ret_next_index != NULL) - *ret_next_index = LE32_TO_CPU(resp->idx); - } - - return status; -} - /** * i40e_read_bw_from_alt_ram * @hw: pointer to the hardware structure diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h index fc4907e96062..d49b52da66fb 100755 --- a/sys/dev/ixl/i40e_prototype.h +++ b/sys/dev/ixl/i40e_prototype.h @@ -445,9 +445,4 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, u16 vsi_seid, u16 queue, bool is_add, struct i40e_control_filter_stats *stats, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, - u8 table_id, u32 start_index, u16 buff_size, - void *buff, u16 *ret_buff_size, - u8 *ret_next_table, u32 *ret_next_index, - struct i40e_asq_cmd_details *cmd_details); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h index 7fe50e296416..1de747dfc9a4 100755 --- a/sys/dev/ixl/i40e_type.h +++ b/sys/dev/ixl/i40e_type.h @@ -287,17 +287,7 @@ struct i40e_hw_capabilities { bool dcb; bool fcoe; bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - + bool mfp_mode_1; bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -326,7 +316,6 @@ struct i40e_hw_capabilities { u8 rx_buf_chain_len; u32 enabled_tcmap; u32 maxtc; - u64 wr_csr_prot; }; struct i40e_mac_info { @@ -573,7 +562,7 @@ struct i40e_hw { u32 debug_mask; }; -static INLINE bool i40e_is_vf(struct i40e_hw *hw) +static inline bool i40e_is_vf(struct i40e_hw *hw) { return hw->mac.type == I40E_MAC_VF; } @@ -1274,9 +1263,6 @@ struct i40e_hw_port_stats { /* flow director stats */ u64 fd_atr_match; u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index 9c3c161039bb..d26db6812b23 100755 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -48,7 +48,7 @@ /********************************************************************* * Driver version *********************************************************************/ -char ixl_driver_version[] = "1.3.6"; +char ixl_driver_version[] = "1.4.1"; /********************************************************************* * PCI Device ID Table @@ -70,6 +70,7 @@ static ixl_vendor_info_t ixl_vendor_info_array[] = {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; @@ -113,16 +114,17 @@ static void ixl_configure_legacy(struct ixl_pf *); static void ixl_free_pci_resources(struct ixl_pf *); static void ixl_local_timer(void *); static int ixl_setup_interface(device_t, struct ixl_vsi *); -static bool ixl_config_link(struct i40e_hw *); +static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); static void ixl_config_rss(struct ixl_vsi *); static void ixl_set_queue_rx_itr(struct ixl_queue *); static void ixl_set_queue_tx_itr(struct ixl_queue *); static int ixl_set_advertised_speeds(struct ixl_pf *, int); -static void ixl_enable_rings(struct ixl_vsi *); -static void ixl_disable_rings(struct ixl_vsi *); -static void ixl_enable_intr(struct ixl_vsi *); -static void ixl_disable_intr(struct ixl_vsi *); +static int ixl_enable_rings(struct ixl_vsi *); +static int ixl_disable_rings(struct ixl_vsi *); +static void ixl_enable_intr(struct ixl_vsi *); +static void ixl_disable_intr(struct ixl_vsi *); +static void ixl_disable_rings_intr(struct ixl_vsi *); static void ixl_enable_adminq(struct i40e_hw *); static void ixl_disable_adminq(struct i40e_hw *); @@ -139,6 +141,7 @@ static void ixl_unregister_vlan(void *, struct ifnet *, u16); static void ixl_setup_vlan_filters(struct ixl_vsi *); static void ixl_init_filters(struct ixl_vsi *); +static void ixl_reconfigure_filters(struct ixl_vsi *vsi); static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); static void ixl_add_hw_filters(struct ixl_vsi *, int, int); @@ -146,6 +149,8 @@ static void ixl_del_hw_filters(struct ixl_vsi *, int); static struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, u8 *, s16); static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); +static void ixl_free_mac_filters(struct ixl_vsi *vsi); + /* Sysctl debug interface */ static int ixl_debug_info(SYSCTL_HANDLER_ARGS); @@ -175,6 +180,7 @@ static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct i40e_eth_stats *); static void ixl_update_stats_counters(struct ixl_pf *); static void ixl_update_eth_stats(struct ixl_vsi *); +static void ixl_update_vsi_stats(struct ixl_vsi *); static void ixl_pf_reset_stats(struct ixl_pf *); static void ixl_vsi_reset_stats(struct ixl_vsi *); static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, @@ -188,7 +194,21 @@ static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); -static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS); +#endif + +#ifdef PCI_IOV +static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); + +static int ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*); +static void ixl_uninit_iov(device_t dev); +static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*); + +static void ixl_handle_vf_msg(struct ixl_pf *, + struct i40e_arq_event_info *); +static void ixl_handle_vflr(void *arg, int pending); + +static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); +static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); #endif /********************************************************************* @@ -201,6 +221,11 @@ static device_method_t ixl_methods[] = { DEVMETHOD(device_attach, ixl_attach), DEVMETHOD(device_detach, ixl_detach), DEVMETHOD(device_shutdown, ixl_shutdown), +#ifdef PCI_IOV + DEVMETHOD(pci_init_iov, ixl_init_iov), + DEVMETHOD(pci_uninit_iov, ixl_uninit_iov), + DEVMETHOD(pci_add_vf, ixl_add_vf), +#endif {0, 0} }; @@ -213,10 +238,12 @@ DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); + #ifdef DEV_NETMAP MODULE_DEPEND(ixl, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ + /* ** Global reset mutex */ @@ -304,6 +331,10 @@ static char *ixl_fc_string[6] = { "Default" }; +static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); + +static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /********************************************************************* * Device identification routine @@ -380,6 +411,10 @@ ixl_attach(device_t dev) struct ixl_vsi *vsi; u16 bus; int error = 0; +#ifdef PCI_IOV + nvlist_t *pf_schema, *vf_schema; + int iov_error; +#endif INIT_DEBUGOUT("ixl_attach: begin"); @@ -467,11 +502,6 @@ ixl_attach(device_t dev) SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); - - SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), - OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR, - pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump"); #endif /* Save off the PCI information */ @@ -486,6 +516,8 @@ ixl_attach(device_t dev) hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); + pf->vc_debug_lvl = 1; + /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); @@ -556,7 +588,8 @@ ixl_attach(device_t dev) } /* Set up host memory cache */ - error = i40e_init_lan_hmc(hw, vsi->num_queues, vsi->num_queues, 0, 0); + error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, + hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init_lan_hmc failed: %d\n", error); goto err_get_cap; @@ -608,16 +641,8 @@ ixl_attach(device_t dev) } /* Determine link state */ - vsi->link_up = ixl_config_link(hw); - - /* Report if Unqualified modules are found */ - if ((vsi->link_up == FALSE) && - (pf->hw.phy.link_info.link_info & - I40E_AQ_MEDIA_AVAILABLE) && - (!(pf->hw.phy.link_info.an_info & - I40E_AQ_QUALIFIED_MODULE))) - device_printf(dev, "Link failed because " - "an unqualified module was detected\n"); + i40e_aq_get_link_info(hw, TRUE, NULL, NULL); + pf->link_up = i40e_get_link_status(hw); /* Setup OS specific network interface */ if (ixl_setup_interface(dev, vsi) != 0) { @@ -653,10 +678,31 @@ ixl_attach(device_t dev) vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); +#ifdef PCI_IOV + /* SR-IOV is only supported when MSI-X is in use. */ + if (pf->msix > 1) { + pf_schema = pci_iov_schema_alloc_node(); + vf_schema = pci_iov_schema_alloc_node(); + pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); + pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", + IOV_SCHEMA_HASDEFAULT, TRUE); + pci_iov_schema_add_bool(vf_schema, "allow-set-mac", + IOV_SCHEMA_HASDEFAULT, FALSE); + pci_iov_schema_add_bool(vf_schema, "allow-promisc", + IOV_SCHEMA_HASDEFAULT, FALSE); + + iov_error = pci_iov_attach(dev, pf_schema, vf_schema); + if (iov_error != 0) + device_printf(dev, + "Failed to initialize SR-IOV (error=%d)\n", + iov_error); + } +#endif #ifdef DEV_NETMAP ixl_netmap_attach(vsi); #endif /* DEV_NETMAP */ + INIT_DEBUGOUT("ixl_attach: end"); return (0); @@ -692,6 +738,9 @@ ixl_detach(device_t dev) struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *que = vsi->queues; i40e_status status; +#ifdef PCI_IOV + int error; +#endif INIT_DEBUGOUT("ixl_detach: begin"); @@ -701,6 +750,14 @@ ixl_detach(device_t dev) return (EBUSY); } +#ifdef PCI_IOV + error = pci_iov_detach(dev); + if (error != 0) { + device_printf(dev, "SR-IOV in use; detach first.\n"); + return (error); + } +#endif + ether_ifdetach(vsi->ifp); if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { IXL_PF_LOCK(pf); @@ -738,8 +795,6 @@ ixl_detach(device_t dev) #ifdef DEV_NETMAP netmap_detach(vsi->ifp); #endif /* DEV_NETMAP */ - - ixl_free_pci_resources(pf); bus_generic_detach(dev); if_free(vsi->ifp); @@ -908,7 +963,7 @@ static int ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_pf *pf = vsi->back; struct ifreq *ifr = (struct ifreq *) data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; @@ -1133,6 +1188,8 @@ ixl_init_locked(struct ixl_pf *pf) i40e_aq_set_default_vsi(hw, vsi->seid, NULL); + ixl_reconfigure_filters(vsi); + /* Set MTU in hardware*/ int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, TRUE, 0, NULL); @@ -1227,6 +1284,11 @@ ixl_intr(void *arg) mask = rd32(hw, I40E_PFINT_ICR0_ENA); +#ifdef PCI_IOV + if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) + taskqueue_enqueue(pf->tq, &pf->vflr_task); +#endif + if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { taskqueue_enqueue(pf->tq, &pf->adminq); return; @@ -1330,8 +1392,12 @@ ixl_msix_adminq(void *arg) mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; } - if (reg & I40E_PFINT_ICR0_VFLR_MASK) +#ifdef PCI_IOV + if (reg & I40E_PFINT_ICR0_VFLR_MASK) { mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; + taskqueue_enqueue(pf->tq, &pf->vflr_task); + } +#endif reg = rd32(hw, I40E_PFINT_DYN_CTL0); reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; @@ -1353,18 +1419,20 @@ static void ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); IXL_PF_LOCK(pf); + hw->phy.get_link_info = TRUE; + pf->link_up = i40e_get_link_status(hw); ixl_update_link_status(pf); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; - if (!vsi->link_up) { + if (!pf->link_up) { IXL_PF_UNLOCK(pf); return; } @@ -1754,15 +1822,14 @@ ixl_update_link_status(struct ixl_pf *pf) struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; - - if (vsi->link_up){ + if (pf->link_up){ if (vsi->link_active == FALSE) { - i40e_aq_get_link_info(hw, TRUE, NULL, NULL); pf->fc = hw->fc.current_mode; if (bootverbose) { device_printf(dev,"Link is up %d Gbps %s," " Flow Control: %s\n", - ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10), + ((pf->link_speed == + I40E_LINK_SPEED_40GB)? 40:10), "Full Duplex", ixl_fc_string[pf->fc]); } vsi->link_active = TRUE; @@ -1771,10 +1838,12 @@ ixl_update_link_status(struct ixl_pf *pf) ** partition is not at least 10GB */ if (hw->func_caps.npar_enable && - (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB || - hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) - device_printf(dev, "The partition detected link" - "speed that is less than 10Gbps\n"); + (hw->phy.link_info.link_speed == + I40E_LINK_SPEED_1GB || + hw->phy.link_info.link_speed == + I40E_LINK_SPEED_100MB)) + device_printf(dev, "The partition detected" + "link speed that is less than 10Gbps\n"); if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ @@ -1805,7 +1874,10 @@ ixl_stop(struct ixl_pf *pf) mtx_assert(&pf->pf_mtx, MA_OWNED); INIT_DEBUGOUT("ixl_stop: begin\n"); - ixl_disable_intr(vsi); + if (pf->num_vfs == 0) + ixl_disable_intr(vsi); + else + ixl_disable_rings_intr(vsi); ixl_disable_rings(vsi); /* Tell the stack that the interface is no longer active */ @@ -1858,6 +1930,11 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf) taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(dev)); TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); + +#ifdef PCI_IOV + TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); +#endif + pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", @@ -1906,6 +1983,11 @@ ixl_assign_vsi_msix(struct ixl_pf *pf) pf->admvec = vector; /* Tasklet for Admin Queue */ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); + +#ifdef PCI_IOV + TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); +#endif + pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", @@ -2330,6 +2412,10 @@ ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type) if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); + if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX)) + ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX)) + ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4) || @@ -2470,17 +2556,32 @@ ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) return (0); } -static bool -ixl_config_link(struct i40e_hw *hw) +/* +** Run when the Admin Queue gets a +** link transition interrupt. +*/ +static void +ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { + struct i40e_hw *hw = &pf->hw; + struct i40e_aqc_get_link_status *status = + (struct i40e_aqc_get_link_status *)&e->desc.params.raw; bool check; - i40e_aq_get_link_info(hw, TRUE, NULL, NULL); + hw->phy.get_link_info = TRUE; check = i40e_get_link_status(hw); + pf->link_up = check; #ifdef IXL_DEBUG printf("Link is %s\n", check ? "up":"down"); #endif - return (check); + /* Report if Unqualified modules are found */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP))) + device_printf(pf->dev, "Link failed because " + "an unqualified module was detected\n"); + + return; } /********************************************************************* @@ -2498,7 +2599,7 @@ ixl_switch_config(struct ixl_pf *pf) device_t dev = vsi->dev; struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; - int ret = I40E_SUCCESS; + int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); @@ -2506,19 +2607,26 @@ ixl_switch_config(struct ixl_pf *pf) ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { - device_printf(dev,"aq_get_switch_config failed!!\n"); + device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n", + ret); return (ret); } #ifdef IXL_DEBUG - printf("Switch config: header reported: %d in structure, %d total\n", + device_printf(dev, + "Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); - printf("type=%d seid=%d uplink=%d downlink=%d\n", - sw_config->element[0].element_type, - sw_config->element[0].seid, - sw_config->element[0].uplink_seid, - sw_config->element[0].downlink_seid); + for (int i = 0; i < sw_config->header.num_reported; i++) { + device_printf(dev, + "%d: type=%d seid=%d uplink=%d downlink=%d\n", i, + sw_config->element[i].element_type, + sw_config->element[i].seid, + sw_config->element[i].uplink_seid, + sw_config->element[i].downlink_seid); + } #endif /* Simplified due to a single VSI at the moment */ + vsi->uplink_seid = sw_config->element[0].uplink_seid; + vsi->downlink_seid = sw_config->element[0].downlink_seid; vsi->seid = sw_config->element[0].seid; return (ret); } @@ -2533,6 +2641,7 @@ ixl_switch_config(struct ixl_pf *pf) static int ixl_initialize_vsi(struct ixl_vsi *vsi) { + struct ixl_pf *pf = vsi->back; struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct i40e_hw *hw = vsi->hw; @@ -2541,6 +2650,8 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; + if (pf->veb_seid != 0) + ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (err) { @@ -2582,6 +2693,8 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) vsi->hw_filters_add = 0; vsi->hw_filters_del = 0; + ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); + err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev,"update vsi params failed %x!!\n", @@ -2602,7 +2715,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) size = que->num_desc * sizeof(struct i40e_tx_desc); memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); tctx.new_context = 1; - tctx.base = (txr->dma.pa/128); + tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); tctx.qlen = que->num_desc; tctx.fc_ena = 0; tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ @@ -2632,7 +2745,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ - if (vsi->max_frame_size <= 2048) + if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; @@ -2649,7 +2762,7 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no HDR split initially */ - rctx.base = (rxr->dma.pa/128); + rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); rctx.qlen = que->num_desc; rctx.tphrdesc_ena = 1; rctx.tphwdesc_ena = 1; @@ -2704,7 +2817,6 @@ ixl_free_vsi(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ixl_queue *que = vsi->queues; - struct ixl_mac_filter *f; /* Free station queues */ for (int i = 0; i < vsi->num_queues; i++, que++) { @@ -2733,6 +2845,14 @@ ixl_free_vsi(struct ixl_vsi *vsi) free(vsi->queues, M_DEVBUF); /* Free VSI filter list */ + ixl_free_mac_filters(vsi); +} + +static void +ixl_free_mac_filters(struct ixl_vsi *vsi) +{ + struct ixl_mac_filter *f; + while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); @@ -2764,6 +2884,7 @@ ixl_setup_stations(struct ixl_pf *pf) vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; + vsi->back = pf; /* Get memory for the station queues */ if (!(vsi->queues = @@ -3016,6 +3137,24 @@ ixl_set_queue_tx_itr(struct ixl_queue *que) return; } +#define QUEUE_NAME_LEN 32 + +static void +ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, + struct sysctl_ctx_list *ctx, const char *sysctl_name) +{ + struct sysctl_oid *tree; + struct sysctl_oid_list *child; + struct sysctl_oid_list *vsi_list; + + tree = device_get_sysctl_tree(pf->dev); + child = SYSCTL_CHILDREN(tree); + vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, + CTLFLAG_RD, NULL, "VSI Number"); + vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); + + ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); +} static void ixl_add_hw_stats(struct ixl_pf *pf) @@ -3023,18 +3162,19 @@ ixl_add_hw_stats(struct ixl_pf *pf) device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct ixl_queue *queues = vsi->queues; - struct i40e_eth_stats *vsi_stats = &vsi->eth_stats; struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct sysctl_oid_list *vsi_list; - struct sysctl_oid *vsi_node, *queue_node; - struct sysctl_oid_list *vsi_list, *queue_list; + struct sysctl_oid *queue_node; + struct sysctl_oid_list *queue_list; struct tx_ring *txr; struct rx_ring *rxr; + char queue_namebuf[QUEUE_NAME_LEN]; /* Driver statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", @@ -3044,23 +3184,18 @@ ixl_add_hw_stats(struct ixl_pf *pf) CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); - /* VSI statistics */ -#define QUEUE_NAME_LEN 32 - char queue_namebuf[QUEUE_NAME_LEN]; - - // ERJ: Only one vsi now, re-do when >1 VSI enabled - // snprintf(vsi_namebuf, QUEUE_NAME_LEN, "vsi%d", vsi->info.stat_counter_idx); - vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", - CTLFLAG_RD, NULL, "VSI-specific stats"); - vsi_list = SYSCTL_CHILDREN(vsi_node); + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "vc_debug_level", + CTLFLAG_RW, &pf->vc_debug_lvl, 0, + "PF/VF Virtual Channel debug logging level"); - ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats); + ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); + vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); /* Queue statistics */ for (int q = 0; q < vsi->num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); - queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, - CTLFLAG_RD, NULL, "Queue #"); + queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, + OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); queue_list = SYSCTL_CHILDREN(queue_node); txr = &(queues[q].txr); @@ -3383,8 +3518,7 @@ static void ixl_init_filters(struct ixl_vsi *vsi) { /* Add broadcast address */ - u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - ixl_add_filter(vsi, bc, IXL_VLAN_ANY); + ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); } /* @@ -3413,6 +3547,13 @@ ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) return; } +static void +ixl_reconfigure_filters(struct ixl_vsi *vsi) +{ + + ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); +} + /* ** This routine adds macvlan filters */ @@ -3420,10 +3561,14 @@ static void ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; - device_t dev = vsi->dev; + struct ixl_pf *pf; + device_t dev; DEBUGOUT("ixl_add_filter: begin"); + pf = vsi->back; + dev = pf->dev; + /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) @@ -3451,6 +3596,8 @@ ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; + else + vsi->num_macs++; ixl_add_hw_filters(vsi, f->flags, 1); return; @@ -3467,6 +3614,7 @@ ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); + vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { @@ -3510,9 +3658,15 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; - struct i40e_hw *hw = vsi->hw; - device_t dev = vsi->dev; - int err, j = 0; + struct ixl_pf *pf; + struct i40e_hw *hw; + device_t dev; + int err, j = 0; + + pf = vsi->back; + dev = pf->dev; + hw = &pf->hw; + IXL_PF_LOCK_ASSERT(pf); a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); @@ -3530,9 +3684,14 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) if (f->flags == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); - b->vlan_tag = - (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); - b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; + if (f->vlan == IXL_VLAN_ANY) { + b->vlan_tag = 0; + b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + } else { + b->vlan_tag = f->vlan; + b->flags = 0; + } + b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; f->flags &= ~IXL_FILTER_ADD; j++; } @@ -3560,13 +3719,18 @@ static void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; - struct i40e_hw *hw = vsi->hw; - device_t dev = vsi->dev; + struct ixl_pf *pf; + struct i40e_hw *hw; + device_t dev; struct ixl_mac_filter *f, *f_temp; int err, j = 0; DEBUGOUT("ixl_del_hw_filters: begin\n"); + pf = vsi->back; + hw = &pf->hw; + dev = pf->dev; + d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { @@ -3610,82 +3774,106 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) return; } - -static void +static int ixl_enable_rings(struct ixl_vsi *vsi) { - struct i40e_hw *hw = vsi->hw; + struct ixl_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int index, error; u32 reg; + error = 0; for (int i = 0; i < vsi->num_queues; i++) { - i40e_pre_tx_queue_cfg(hw, i, TRUE); + index = vsi->first_queue + i; + i40e_pre_tx_queue_cfg(hw, index, TRUE); - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; - wr32(hw, I40E_QTX_ENA(i), reg); + wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } - if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) - printf("TX queue %d disabled!\n", i); + if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { + device_printf(pf->dev, "TX queue %d disabled!\n", + index); + error = ETIMEDOUT; + } - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; - wr32(hw, I40E_QRX_ENA(i), reg); + wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_msec_delay(10); } - if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) - printf("RX queue %d disabled!\n", i); + if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { + device_printf(pf->dev, "RX queue %d disabled!\n", + index); + error = ETIMEDOUT; + } } + + return (error); } -static void +static int ixl_disable_rings(struct ixl_vsi *vsi) { - struct i40e_hw *hw = vsi->hw; + struct ixl_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int index, error; u32 reg; + error = 0; for (int i = 0; i < vsi->num_queues; i++) { - i40e_pre_tx_queue_cfg(hw, i, FALSE); + index = vsi->first_queue + i; + + i40e_pre_tx_queue_cfg(hw, index, FALSE); i40e_usec_delay(500); - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QTX_ENA(i), reg); + wr32(hw, I40E_QTX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QTX_ENA(i)); + reg = rd32(hw, I40E_QTX_ENA(index)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } - if (reg & I40E_QTX_ENA_QENA_STAT_MASK) - printf("TX queue %d still enabled!\n", i); + if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { + device_printf(pf->dev, "TX queue %d still enabled!\n", + index); + error = ETIMEDOUT; + } - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; - wr32(hw, I40E_QRX_ENA(i), reg); + wr32(hw, I40E_QRX_ENA(index), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { - reg = rd32(hw, I40E_QRX_ENA(i)); + reg = rd32(hw, I40E_QRX_ENA(index)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } - if (reg & I40E_QRX_ENA_QENA_STAT_MASK) - printf("RX queue %d still enabled!\n", i); + if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { + device_printf(pf->dev, "RX queue %d still enabled!\n", + index); + error = ETIMEDOUT; + } } + + return (error); } /** @@ -3775,16 +3963,23 @@ ixl_enable_intr(struct ixl_vsi *vsi) } static void -ixl_disable_intr(struct ixl_vsi *vsi) +ixl_disable_rings_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_queue *que = vsi->queues; - if (ixl_enable_msix) { + for (int i = 0; i < vsi->num_queues; i++, que++) + ixl_disable_queue(hw, que->me); +} + +static void +ixl_disable_intr(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + + if (ixl_enable_msix) ixl_disable_adminq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) - ixl_disable_queue(hw, que->me); - } else + else ixl_disable_legacy(hw); } @@ -3859,7 +4054,8 @@ static void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_vf *vf; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; @@ -4021,11 +4217,13 @@ ixl_update_stats_counters(struct ixl_pf *pf) /* End hw stats */ /* Update vsi stats */ - ixl_update_eth_stats(vsi); + ixl_update_vsi_stats(vsi); - /* OS statistics */ - // ERJ - these are per-port, update all vsis? - IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes); + for (int i = 0; i < pf->num_vfs; i++) { + vf = &pf->vfs[i]; + if (vf->vf_flags & VF_FLAG_ENABLED) + ixl_update_eth_stats(&pf->vfs[i].vsi); + } } /* @@ -4051,6 +4249,7 @@ ixl_do_adminq(void *context, int pending) return; } + IXL_PF_LOCK(pf); /* clean and process any events */ do { ret = i40e_clean_arq_element(hw, &event, &result); @@ -4059,11 +4258,13 @@ ixl_do_adminq(void *context, int pending) opcode = LE16_TO_CPU(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: - vsi->link_up = ixl_config_link(hw); + ixl_link_event(pf, &event); ixl_update_link_status(pf); break; case i40e_aqc_opc_send_msg_to_pf: - /* process pf/vf communication here */ +#ifdef PCI_IOV + ixl_handle_vf_msg(pf, &event); +#endif break; case i40e_aqc_opc_event_lan_overflow: break; @@ -4081,10 +4282,16 @@ ixl_do_adminq(void *context, int pending) wr32(hw, I40E_PFINT_ICR0_ENA, reg); free(event.msg_buf, M_DEVBUF); - if (pf->msix > 1) - ixl_enable_adminq(&pf->hw); + /* + * If there are still messages to process, reschedule ourselves. + * Otherwise, re-enable our interrupt and go to sleep. + */ + if (result > 0) + taskqueue_enqueue(pf->tq, &pf->adminq); else ixl_enable_intr(vsi); + + IXL_PF_UNLOCK(pf); } static int @@ -4161,8 +4368,6 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi) struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; - int i; - uint64_t tx_discards; struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; @@ -4212,9 +4417,27 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi) vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; +} + +static void +ixl_update_vsi_stats(struct ixl_vsi *vsi) +{ + struct ixl_pf *pf; + struct ifnet *ifp; + struct i40e_eth_stats *es; + u64 tx_discards; + + struct i40e_hw_port_stats *nsd; + + pf = vsi->back; + ifp = vsi->ifp; + es = &vsi->eth_stats; + nsd = &pf->stats; + + ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; - for (i = 0; i < vsi->num_queues; i++) + for (int i = 0; i < vsi->num_queues; i++) tx_discards += vsi->queues[i].txr.br->br_drops; /* Update ifnet stats */ @@ -4229,6 +4452,9 @@ void ixl_update_eth_stats(struct ixl_vsi *vsi) IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); + IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); @@ -4440,6 +4666,8 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; /* Translate into aq cmd link_speed */ + if (speeds & 0x8) + config.link_speed |= I40E_LINK_SPEED_20GB; if (speeds & 0x4) config.link_speed |= I40E_LINK_SPEED_10GB; if (speeds & 0x2) @@ -4475,6 +4703,7 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G +** 0x8 - advertise 20G ** ** Does not work on 40G devices. */ @@ -4499,11 +4728,36 @@ ixl_set_advertise(SYSCTL_HANDLER_ARGS) error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); - if (requested_ls < 1 || requested_ls > 7) { - device_printf(dev, - "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); + /* Check for sane value */ + if (requested_ls < 0x1 || requested_ls > 0xE) { + device_printf(dev, "Invalid advertised speed; " + "valid modes are 0x1 through 0xE\n"); return (EINVAL); } + /* Then check for validity based on adapter type */ + switch (hw->device_id) { + case I40E_DEV_ID_10G_BASE_T: + if (requested_ls & 0x8) { + device_printf(dev, + "20Gbs speed not supported on this device.\n"); + return (EINVAL); + } + break; + case I40E_DEV_ID_20G_KR2: + if (requested_ls & 0x1) { + device_printf(dev, + "100Mbs speed not supported on this device.\n"); + return (EINVAL); + } + break; + default: + if (requested_ls & ~0x6) { + device_printf(dev, + "Only 1/10Gbs speeds are supported on this device.\n"); + return (EINVAL); + } + break; + } /* Exit if no change */ if (pf->advertised_speed == requested_ls) @@ -4581,8 +4835,8 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev) if ((hw->bus.width <= i40e_bus_width_pcie_x8) && (hw->bus.speed < i40e_bus_speed_8000)) { device_printf(dev, "PCI-Express bandwidth available" - " for this device\n is not sufficient for" - " normal operation.\n"); + " for this device\n may be insufficient for" + " optimal performance.\n"); device_printf(dev, "For expected performance a x8 " "PCIE Gen3 slot is required.\n"); } @@ -4643,15 +4897,15 @@ ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { - struct ixl_pf *pf = (struct ixl_pf *)arg1; - struct i40e_hw *hw = &pf->hw; - struct i40e_aq_get_phy_abilities_resp abilities_resp; - char buf[512]; + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct i40e_hw *hw = &pf->hw; + char buf[512]; + enum i40e_status_code aq_error = 0; - enum i40e_status_code aq_error = 0; + struct i40e_aq_get_phy_abilities_resp abilities; - // TODO: Print out list of qualified modules as well? - aq_error = i40e_aq_get_phy_capabilities(hw, TRUE, FALSE, &abilities_resp, NULL); + aq_error = i40e_aq_get_phy_capabilities(hw, + TRUE, FALSE, &abilities, NULL); if (aq_error) { printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error); return (EPERM); @@ -4664,9 +4918,9 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) "EEE cap : %#06x\n" "EEER reg : %#010x\n" "D3 Lpan : %#04x", - abilities_resp.phy_type, abilities_resp.link_speed, - abilities_resp.abilities, abilities_resp.eee_capability, - abilities_resp.eeer_val, abilities_resp.d3_lpan); + abilities.phy_type, abilities.link_speed, + abilities.abilities, abilities.eee_capability, + abilities.eeer_val, abilities.d3_lpan); return (sysctl_handle_string(oidp, buf, strlen(buf), req)); } @@ -4752,7 +5006,8 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) IXL_SW_RES_SIZE, NULL); if (error) { - device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n", + device_printf(dev, + "%s: get_switch_resource_alloc() error %d, aq error %d\n", __func__, error, hw->aq.asq_last_status); sbuf_delete(buf); return error; @@ -4856,7 +5111,8 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) error = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (error) { - device_printf(dev, "%s: aq_get_switch_config() error %d, aq error %d\n", + device_printf(dev, + "%s: aq_get_switch_config() error %d, aq error %d\n", __func__, error, hw->aq.asq_last_status); sbuf_delete(buf); return error; @@ -4881,11 +5137,14 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", sw_config->element[i].seid); sbuf_cat(buf, " "); - sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, sw_config->element[i].seid, false)); + sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, + sw_config->element[i].seid, false)); sbuf_cat(buf, " | "); - sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].uplink_seid, true)); + sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, + sw_config->element[i].uplink_seid, true)); sbuf_cat(buf, " "); - sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, sw_config->element[i].downlink_seid, false)); + sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf, + sw_config->element[i].downlink_seid, false)); sbuf_cat(buf, " "); sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); if (i < sw_config->header.num_reported - 1) @@ -4907,67 +5166,1449 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) return (error); } - -/* -** Dump TX desc given index. -** Doesn't work; don't use. -** TODO: Also needs a queue index input! -**/ -static int -ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS) -{ - struct ixl_pf *pf = (struct ixl_pf *)arg1; - device_t dev = pf->dev; - struct sbuf *buf; - int error = 0; - - u16 desc_idx = 0; - - buf = sbuf_new_for_sysctl(NULL, NULL, 0, req); - if (!buf) { - device_printf(dev, "Could not allocate sbuf for output.\n"); - return (ENOMEM); - } - - /* Read in index */ - error = sysctl_handle_int(oidp, &desc_idx, 0, req); - if (error) - return (error); - if (req->newptr == NULL) - return (EIO); // fix - if (desc_idx > 1024) { // fix - device_printf(dev, - "Invalid descriptor index, needs to be < 1024\n"); // fix - return (EINVAL); - } - - // Don't use this sysctl yet - if (TRUE) - return (ENODEV); - - sbuf_cat(buf, "\n"); - - // set to queue 1? - struct ixl_queue *que = pf->vsi.queues; - struct tx_ring *txr = &(que[1].txr); - struct i40e_tx_desc *txd = &txr->base[desc_idx]; - - sbuf_printf(buf, "Que: %d, Desc: %d\n", que->me, desc_idx); - sbuf_printf(buf, "Addr: %#18lx\n", txd->buffer_addr); - sbuf_printf(buf, "Opts: %#18lx\n", txd->cmd_type_offset_bsz); - - error = sbuf_finish(buf); - if (error) { - device_printf(dev, "Error finishing sbuf: %d\n", error); - sbuf_delete(buf); - return error; - } - - error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req); - if (error) - device_printf(dev, "sysctl error: %d\n", error); - sbuf_delete(buf); - return error; -} #endif /* IXL_DEBUG_SYSCTL */ + +#ifdef PCI_IOV +static int +ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + struct ixl_vsi *vsi; + struct i40e_vsi_context vsi_ctx; + int i; + uint16_t first_queue; + enum i40e_status_code code; + + hw = &pf->hw; + vsi = &pf->vsi; + + vsi_ctx.pf_num = hw->pf_id; + vsi_ctx.uplink_seid = pf->veb_seid; + vsi_ctx.connection_type = IXL_VSI_DATA_PORT; + vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num; + vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF; + + bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); + + vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); + vsi_ctx.info.switch_id = htole16(0); + + vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID); + vsi_ctx.info.sec_flags = 0; + if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF) + vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; + + vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_NOTHING; + + vsi_ctx.info.valid_sections |= + htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); + vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); + first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES; + for (i = 0; i < IXLV_MAX_QUEUES; i++) + vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i); + for (; i < nitems(vsi_ctx.info.queue_mapping); i++) + vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK); + + vsi_ctx.info.tc_mapping[0] = htole16( + (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + + code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); + if (code != I40E_SUCCESS) + return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); + vf->vsi.seid = vsi_ctx.seid; + vf->vsi.vsi_num = vsi_ctx.vsi_number; + vf->vsi.first_queue = first_queue; + vf->vsi.num_queues = IXLV_MAX_QUEUES; + + code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); + if (code != I40E_SUCCESS) + return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); + + code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); + if (code != I40E_SUCCESS) { + device_printf(pf->dev, "Failed to disable BW limit: %d\n", + ixl_adminq_err_to_errno(hw->aq.asq_last_status)); + return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); + } + + memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); + return (0); +} + +static int +ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + int error; + + hw = &pf->hw; + + error = ixl_vf_alloc_vsi(pf, vf); + if (error != 0) + return (error); + + vf->vsi.hw_filters_add = 0; + vf->vsi.hw_filters_del = 0; + ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); + ixl_reconfigure_filters(&vf->vsi); + + return (0); +} + +static void +ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, + uint32_t val) +{ + uint32_t qtable; + int index, shift; + + /* + * Two queues are mapped in a single register, so we have to do some + * gymnastics to convert the queue number into a register index and + * shift. + */ + index = qnum / 2; + shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT; + + qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num)); + qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift); + qtable |= val << shift; + wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable); +} + +static void +ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + uint32_t qtable; + int i; + + hw = &pf->hw; + + /* + * Contiguous mappings aren't actually supported by the hardware, + * so we have to use non-contiguous mappings. + */ + wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + + wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), + I40E_VPLAN_MAPENA_TXRX_ENA_MASK); + + for (i = 0; i < vf->vsi.num_queues; i++) { + qtable = (vf->vsi.first_queue + i) << + I40E_VPLAN_QTABLE_QINDEX_SHIFT; + + wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); + } + + /* Map queues allocated to VF to its VSI. */ + for (i = 0; i < vf->vsi.num_queues; i++) + ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i); + + /* Set rest of VSI queues as unused. */ + for (; i < IXL_MAX_VSI_QUEUES; i++) + ixl_vf_map_vsi_queue(hw, vf, i, + I40E_VSILAN_QTABLE_QINDEX_0_MASK); + + ixl_flush(hw); +} + +static void +ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi) +{ + struct i40e_hw *hw; + + hw = &pf->hw; + + if (vsi->seid == 0) + return; + + i40e_aq_delete_element(hw, vsi->seid, NULL); +} + +static void +ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg) +{ + + wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); + ixl_flush(hw); +} + +static void +ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg) +{ + + wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | + I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); + ixl_flush(hw); +} + +static void +ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + uint32_t vfint_reg, vpint_reg; + int i; + + hw = &pf->hw; + + ixl_vf_vsi_release(pf, &vf->vsi); + + /* Index 0 has a special register. */ + ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num)); + + for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { + vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num); + ixl_vf_disable_queue_intr(hw, vfint_reg); + } + + /* Index 0 has a special register. */ + ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); + + for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { + vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); + ixl_vf_unregister_intr(hw, vpint_reg); + } + + vf->vsi.num_queues = 0; +} + +static int +ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + int i; + uint16_t global_vf_num; + uint32_t ciad; + + hw = &pf->hw; + global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; + + wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS | + (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); + for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { + ciad = rd32(hw, I40E_PF_PCI_CIAD); + if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0) + return (0); + DELAY(1); + } + + return (ETIMEDOUT); +} + +static void +ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + uint32_t vfrtrig; + + hw = &pf->hw; + + vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); + vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); + ixl_flush(hw); + + ixl_reinit_vf(pf, vf); +} + +static void +ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_hw *hw; + uint32_t vfrstat, vfrtrig; + int i, error; + + hw = &pf->hw; + + error = ixl_flush_pcie(pf, vf); + if (error != 0) + device_printf(pf->dev, + "Timed out waiting for PCIe activity to stop on VF-%d\n", + vf->vf_num); + + for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { + DELAY(10); + + vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); + if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) + break; + } + + if (i == IXL_VF_RESET_TIMEOUT) + device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); + + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED); + + vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); + vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; + wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); + + if (vf->vsi.seid != 0) + ixl_disable_rings(&vf->vsi); + + ixl_vf_release_resources(pf, vf); + ixl_vf_setup_vsi(pf, vf); + ixl_vf_map_queues(pf, vf); + + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE); + ixl_flush(hw); +} + +static const char * +ixl_vc_opcode_str(uint16_t op) +{ + + switch (op) { + case I40E_VIRTCHNL_OP_VERSION: + return ("VERSION"); + case I40E_VIRTCHNL_OP_RESET_VF: + return ("RESET_VF"); + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + return ("GET_VF_RESOURCES"); + case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: + return ("CONFIG_TX_QUEUE"); + case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: + return ("CONFIG_RX_QUEUE"); + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + return ("CONFIG_VSI_QUEUES"); + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + return ("CONFIG_IRQ_MAP"); + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + return ("ENABLE_QUEUES"); + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + return ("DISABLE_QUEUES"); + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + return ("ADD_ETHER_ADDRESS"); + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + return ("DEL_ETHER_ADDRESS"); + case I40E_VIRTCHNL_OP_ADD_VLAN: + return ("ADD_VLAN"); + case I40E_VIRTCHNL_OP_DEL_VLAN: + return ("DEL_VLAN"); + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + return ("CONFIG_PROMISCUOUS_MODE"); + case I40E_VIRTCHNL_OP_GET_STATS: + return ("GET_STATS"); + case I40E_VIRTCHNL_OP_FCOE: + return ("FCOE"); + case I40E_VIRTCHNL_OP_EVENT: + return ("EVENT"); + default: + return ("UNKNOWN"); + } +} + +static int +ixl_vc_opcode_level(uint16_t opcode) +{ + + switch (opcode) { + case I40E_VIRTCHNL_OP_GET_STATS: + return (10); + default: + return (5); + } +} + +static void +ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, + enum i40e_status_code status, void *msg, uint16_t len) +{ + struct i40e_hw *hw; + int global_vf_id; + + hw = &pf->hw; + global_vf_id = hw->func_caps.vf_base_id + vf->vf_num; + + I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op), + "Sending msg (op=%s[%d], status=%d) to VF-%d\n", + ixl_vc_opcode_str(op), op, status, vf->vf_num); + + i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL); +} + +static void +ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op) +{ + + ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0); +} + +static void +ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, + enum i40e_status_code status, const char *file, int line) +{ + + I40E_VC_DEBUG(pf, 1, + "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n", + ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line); + ixl_send_vf_msg(pf, vf, op, status, NULL, 0); +} + +static void +ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_version_info reply; + + if (msg_size != sizeof(struct i40e_virtchnl_version_info)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION, + I40E_ERR_PARAM); + return; + } + + reply.major = I40E_VIRTCHNL_VERSION_MAJOR; + reply.minor = I40E_VIRTCHNL_VERSION_MINOR; + ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, + sizeof(reply)); +} + +static void +ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + + if (msg_size != 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF, + I40E_ERR_PARAM); + return; + } + + ixl_reset_vf(pf, vf); + + /* No response to a reset message. */ +} + +static void +ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_vf_resource reply; + + if (msg_size != 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + I40E_ERR_PARAM); + return; + } + + bzero(&reply, sizeof(reply)); + + reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; + + reply.num_vsis = 1; + reply.num_queue_pairs = vf->vsi.num_queues; + reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; + reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; + reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV; + reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues; + memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); + + ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + I40E_SUCCESS, &reply, sizeof(reply)); +} + +static int +ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, + struct i40e_virtchnl_txq_info *info) +{ + struct i40e_hw *hw; + struct i40e_hmc_obj_txq txq; + uint16_t global_queue_num, global_vf_num; + enum i40e_status_code status; + uint32_t qtx_ctl; + + hw = &pf->hw; + global_queue_num = vf->vsi.first_queue + info->queue_id; + global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; + bzero(&txq, sizeof(txq)); + + status = i40e_clear_lan_tx_queue_context(hw, global_queue_num); + if (status != I40E_SUCCESS) + return (EINVAL); + + txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS; + + txq.head_wb_ena = info->headwb_enabled; + txq.head_wb_addr = info->dma_headwb_addr; + txq.qlen = info->ring_len; + txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]); + txq.rdylist_act = 0; + + status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq); + if (status != I40E_SUCCESS) + return (EINVAL); + + qtx_ctl = I40E_QTX_CTL_VF_QUEUE | + (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) | + (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT); + wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl); + ixl_flush(hw); + + return (0); +} + +static int +ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, + struct i40e_virtchnl_rxq_info *info) +{ + struct i40e_hw *hw; + struct i40e_hmc_obj_rxq rxq; + uint16_t global_queue_num; + enum i40e_status_code status; + + hw = &pf->hw; + global_queue_num = vf->vsi.first_queue + info->queue_id; + bzero(&rxq, sizeof(rxq)); + + if (info->databuffer_size > IXL_VF_MAX_BUFFER) + return (EINVAL); + + if (info->max_pkt_size > IXL_VF_MAX_FRAME || + info->max_pkt_size < ETHER_MIN_LEN) + return (EINVAL); + + if (info->splithdr_enabled) { + if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER) + return (EINVAL); + + rxq.hsplit_0 = info->rx_split_pos & + (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 | + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP | + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP | + I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP); + rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; + + rxq.dtype = 2; + } + + status = i40e_clear_lan_rx_queue_context(hw, global_queue_num); + if (status != I40E_SUCCESS) + return (EINVAL); + + rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; + rxq.qlen = info->ring_len; + + rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; + + rxq.dsize = 1; + rxq.crcstrip = 1; + rxq.l2tsel = 1; + + rxq.rxmax = info->max_pkt_size; + rxq.tphrdesc_ena = 1; + rxq.tphwdesc_ena = 1; + rxq.tphdata_ena = 1; + rxq.tphhead_ena = 1; + rxq.lrxqthresh = 2; + rxq.prefena = 1; + + status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq); + if (status != I40E_SUCCESS) + return (EINVAL); + + return (0); +} + +static void +ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_vsi_queue_config_info *info; + struct i40e_virtchnl_queue_pair_info *pair; + int i; + + if (msg_size < sizeof(*info)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_ERR_PARAM); + return; + } + + info = msg; + if (info->num_queue_pairs == 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_ERR_PARAM); + return; + } + + if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_ERR_PARAM); + return; + } + + if (info->vsi_id != vf->vsi.vsi_num) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < info->num_queue_pairs; i++) { + pair = &info->qpair[i]; + + if (pair->txq.vsi_id != vf->vsi.vsi_num || + pair->rxq.vsi_id != vf->vsi.vsi_num || + pair->txq.queue_id != pair->rxq.queue_id || + pair->txq.queue_id >= vf->vsi.num_queues) { + + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); + return; + } + + if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); + return; + } + + if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); + return; + } + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES); +} + +static void +ixl_vf_set_qctl(struct ixl_pf *pf, + const struct i40e_virtchnl_vector_map *vector, + enum i40e_queue_type cur_type, uint16_t cur_queue, + enum i40e_queue_type *last_type, uint16_t *last_queue) +{ + uint32_t offset, qctl; + uint16_t itr_indx; + + if (cur_type == I40E_QUEUE_TYPE_RX) { + offset = I40E_QINT_RQCTL(cur_queue); + itr_indx = vector->rxitr_idx; + } else { + offset = I40E_QINT_TQCTL(cur_queue); + itr_indx = vector->txitr_idx; + } + + qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | + (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | + (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | + I40E_QINT_RQCTL_CAUSE_ENA_MASK | + (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT)); + + wr32(&pf->hw, offset, qctl); + + *last_type = cur_type; + *last_queue = cur_queue; +} + +static void +ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, + const struct i40e_virtchnl_vector_map *vector) +{ + struct i40e_hw *hw; + u_int qindex; + enum i40e_queue_type type, last_type; + uint32_t lnklst_reg; + uint16_t rxq_map, txq_map, cur_queue, last_queue; + + hw = &pf->hw; + + rxq_map = vector->rxq_map; + txq_map = vector->txq_map; + + last_queue = IXL_END_OF_INTR_LNKLST; + last_type = I40E_QUEUE_TYPE_RX; + + /* + * The datasheet says to optimize performance, RX queues and TX queues + * should be interleaved in the interrupt linked list, so we process + * both at once here. + */ + while ((rxq_map != 0) || (txq_map != 0)) { + if (txq_map != 0) { + qindex = ffs(txq_map) - 1; + type = I40E_QUEUE_TYPE_TX; + cur_queue = vf->vsi.first_queue + qindex; + ixl_vf_set_qctl(pf, vector, type, cur_queue, + &last_type, &last_queue); + txq_map &= ~(1 << qindex); + } + + if (rxq_map != 0) { + qindex = ffs(rxq_map) - 1; + type = I40E_QUEUE_TYPE_RX; + cur_queue = vf->vsi.first_queue + qindex; + ixl_vf_set_qctl(pf, vector, type, cur_queue, + &last_type, &last_queue); + rxq_map &= ~(1 << qindex); + } + } + + if (vector->vector_id == 0) + lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num); + else + lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id, + vf->vf_num); + wr32(hw, lnklst_reg, + (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | + (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); + + ixl_flush(hw); +} + +static void +ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_irq_map_info *map; + struct i40e_virtchnl_vector_map *vector; + struct i40e_hw *hw; + int i, largest_txq, largest_rxq; + + hw = &pf->hw; + + if (msg_size < sizeof(*map)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + + map = msg; + if (map->num_vectors == 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + + if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < map->num_vectors; i++) { + vector = &map->vecmap[i]; + + if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || + vector->vsi_id != vf->vsi.vsi_num) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); + return; + } + + if (vector->rxq_map != 0) { + largest_rxq = fls(vector->rxq_map) - 1; + if (largest_rxq >= vf->vsi.num_queues) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + } + + if (vector->txq_map != 0) { + largest_txq = fls(vector->txq_map) - 1; + if (largest_txq >= vf->vsi.num_queues) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + } + + if (vector->rxitr_idx > IXL_MAX_ITR_IDX || + vector->txitr_idx > IXL_MAX_ITR_IDX) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + I40E_ERR_PARAM); + return; + } + + ixl_vf_config_vector(pf, vf, vector); + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP); +} + +static void +ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_queue_select *select; + int error; + + if (msg_size != sizeof(*select)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + I40E_ERR_PARAM); + return; + } + + select = msg; + if (select->vsi_id != vf->vsi.vsi_num || + select->rx_queues == 0 || select->tx_queues == 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + I40E_ERR_PARAM); + return; + } + + error = ixl_enable_rings(&vf->vsi); + if (error) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + I40E_ERR_TIMEOUT); + return; + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES); +} + +static void +ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, + void *msg, uint16_t msg_size) +{ + struct i40e_virtchnl_queue_select *select; + int error; + + if (msg_size != sizeof(*select)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + I40E_ERR_PARAM); + return; + } + + select = msg; + if (select->vsi_id != vf->vsi.vsi_num || + select->rx_queues == 0 || select->tx_queues == 0) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + I40E_ERR_PARAM); + return; + } + + error = ixl_disable_rings(&vf->vsi); + if (error) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + I40E_ERR_TIMEOUT); + return; + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES); +} + +static boolean_t +ixl_zero_mac(const uint8_t *addr) +{ + uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; + + return (cmp_etheraddr(addr, zero)); +} + +static boolean_t +ixl_bcast_mac(const uint8_t *addr) +{ + + return (cmp_etheraddr(addr, ixl_bcast_addr)); +} + +static int +ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) +{ + + if (ixl_zero_mac(addr) || ixl_bcast_mac(addr)) + return (EINVAL); + + /* + * If the VF is not allowed to change its MAC address, don't let it + * set a MAC filter for an address that is not a multicast address and + * is not its assigned MAC. + */ + if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && + !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac))) + return (EPERM); + + return (0); +} + +static void +ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_ether_addr_list *addr_list; + struct i40e_virtchnl_ether_addr *addr; + struct ixl_vsi *vsi; + int i; + size_t expected_size; + + vsi = &vf->vsi; + + if (msg_size < sizeof(*addr_list)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + I40E_ERR_PARAM); + return; + } + + addr_list = msg; + expected_size = sizeof(*addr_list) + + addr_list->num_elements * sizeof(*addr); + + if (addr_list->num_elements == 0 || + addr_list->vsi_id != vsi->vsi_num || + msg_size != expected_size) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < addr_list->num_elements; i++) { + if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); + return; + } + } + + for (i = 0; i < addr_list->num_elements; i++) { + addr = &addr_list->list[i]; + ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS); +} + +static void +ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_ether_addr_list *addr_list; + struct i40e_virtchnl_ether_addr *addr; + size_t expected_size; + int i; + + if (msg_size < sizeof(*addr_list)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + I40E_ERR_PARAM); + return; + } + + addr_list = msg; + expected_size = sizeof(*addr_list) + + addr_list->num_elements * sizeof(*addr); + + if (addr_list->num_elements == 0 || + addr_list->vsi_id != vf->vsi.vsi_num || + msg_size != expected_size) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < addr_list->num_elements; i++) { + addr = &addr_list->list[i]; + if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM); + return; + } + } + + for (i = 0; i < addr_list->num_elements; i++) { + addr = &addr_list->list[i]; + ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS); +} + +static enum i40e_status_code +ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) +{ + struct i40e_vsi_context vsi_ctx; + + vsi_ctx.seid = vf->vsi.seid; + + bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); + vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); + vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | + I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; + return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); +} + +static void +ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_vlan_filter_list *filter_list; + enum i40e_status_code code; + size_t expected_size; + int i; + + if (msg_size < sizeof(*filter_list)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + + filter_list = msg; + expected_size = sizeof(*filter_list) + + filter_list->num_elements * sizeof(uint16_t); + if (filter_list->num_elements == 0 || + filter_list->vsi_id != vf->vsi.vsi_num || + msg_size != expected_size) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + + if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < filter_list->num_elements; i++) { + if (filter_list->vlan_id[i] > EVL_VLID_MASK) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + } + + code = ixl_vf_enable_vlan_strip(pf, vf); + if (code != I40E_SUCCESS) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + } + + for (i = 0; i < filter_list->num_elements; i++) + ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN); +} + +static void +ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_vlan_filter_list *filter_list; + int i; + size_t expected_size; + + if (msg_size < sizeof(*filter_list)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, + I40E_ERR_PARAM); + return; + } + + filter_list = msg; + expected_size = sizeof(*filter_list) + + filter_list->num_elements * sizeof(uint16_t); + if (filter_list->num_elements == 0 || + filter_list->vsi_id != vf->vsi.vsi_num || + msg_size != expected_size) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < filter_list->num_elements; i++) { + if (filter_list->vlan_id[i] > EVL_VLID_MASK) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + } + + if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN, + I40E_ERR_PARAM); + return; + } + + for (i = 0; i < filter_list->num_elements; i++) + ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN); +} + +static void +ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, + void *msg, uint16_t msg_size) +{ + struct i40e_virtchnl_promisc_info *info; + enum i40e_status_code code; + + if (msg_size != sizeof(*info)) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); + return; + } + + if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); + return; + } + + info = msg; + if (info->vsi_id != vf->vsi.vsi_num) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); + return; + } + + code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, + info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL); + if (code != I40E_SUCCESS) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); + return; + } + + code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, + info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL); + if (code != I40E_SUCCESS) { + i40e_send_vf_nack(pf, vf, + I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); + return; + } + + ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); +} + +static void +ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, + uint16_t msg_size) +{ + struct i40e_virtchnl_queue_select *queue; + + if (msg_size != sizeof(*queue)) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, + I40E_ERR_PARAM); + return; + } + + queue = msg; + if (queue->vsi_id != vf->vsi.vsi_num) { + i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, + I40E_ERR_PARAM); + return; + } + + ixl_update_eth_stats(&vf->vsi); + + ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS, + I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); +} + +static void +ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) +{ + struct ixl_vf *vf; + void *msg; + uint16_t vf_num, msg_size; + uint32_t opcode; + + vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; + opcode = le32toh(event->desc.cookie_high); + + if (vf_num >= pf->num_vfs) { + device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); + return; + } + + vf = &pf->vfs[vf_num]; + msg = event->msg_buf; + msg_size = event->msg_len; + + I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), + "Got msg %s(%d) from VF-%d of size %d\n", + ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size); + + switch (opcode) { + case I40E_VIRTCHNL_OP_VERSION: + ixl_vf_version_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + ixl_vf_reset_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + ixl_vf_get_resources_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + ixl_vf_config_irq_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + ixl_vf_add_mac_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + ixl_vf_del_mac_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + ixl_vf_get_stats_msg(pf, vf, msg, msg_size); + break; + + /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ + case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: + case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: + default: + i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); + break; + } +} + +/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ +static void +ixl_handle_vflr(void *arg, int pending) +{ + struct ixl_pf *pf; + struct i40e_hw *hw; + uint16_t global_vf_num; + uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; + int i; + + pf = arg; + hw = &pf->hw; + + IXL_PF_LOCK(pf); + for (i = 0; i < pf->num_vfs; i++) { + global_vf_num = hw->func_caps.vf_base_id + i; + + vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); + vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); + vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); + if (vflrstat & vflrstat_mask) { + wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), + vflrstat_mask); + + ixl_reinit_vf(pf, &pf->vfs[i]); + } + } + + icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); + icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, icr0); + ixl_flush(hw); + + IXL_PF_UNLOCK(pf); +} + +static int +ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) +{ + + switch (err) { + case I40E_AQ_RC_EPERM: + return (EPERM); + case I40E_AQ_RC_ENOENT: + return (ENOENT); + case I40E_AQ_RC_ESRCH: + return (ESRCH); + case I40E_AQ_RC_EINTR: + return (EINTR); + case I40E_AQ_RC_EIO: + return (EIO); + case I40E_AQ_RC_ENXIO: + return (ENXIO); + case I40E_AQ_RC_E2BIG: + return (E2BIG); + case I40E_AQ_RC_EAGAIN: + return (EAGAIN); + case I40E_AQ_RC_ENOMEM: + return (ENOMEM); + case I40E_AQ_RC_EACCES: + return (EACCES); + case I40E_AQ_RC_EFAULT: + return (EFAULT); + case I40E_AQ_RC_EBUSY: + return (EBUSY); + case I40E_AQ_RC_EEXIST: + return (EEXIST); + case I40E_AQ_RC_EINVAL: + return (EINVAL); + case I40E_AQ_RC_ENOTTY: + return (ENOTTY); + case I40E_AQ_RC_ENOSPC: + return (ENOSPC); + case I40E_AQ_RC_ENOSYS: + return (ENOSYS); + case I40E_AQ_RC_ERANGE: + return (ERANGE); + case I40E_AQ_RC_EFLUSHED: + return (EINVAL); /* No exact equivalent in errno.h */ + case I40E_AQ_RC_BAD_ADDR: + return (EFAULT); + case I40E_AQ_RC_EMODE: + return (EPERM); + case I40E_AQ_RC_EFBIG: + return (EFBIG); + default: + return (EINVAL); + } +} + +static int +ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params) +{ + struct ixl_pf *pf; + struct i40e_hw *hw; + struct ixl_vsi *pf_vsi; + enum i40e_status_code ret; + int i, error; + + pf = device_get_softc(dev); + hw = &pf->hw; + pf_vsi = &pf->vsi; + + IXL_PF_LOCK(pf); + pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | + M_ZERO); + + if (pf->vfs == NULL) { + error = ENOMEM; + goto fail; + } + + for (i = 0; i < num_vfs; i++) + sysctl_ctx_init(&pf->vfs[i].ctx); + + ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, + 1, FALSE, FALSE, &pf->veb_seid, NULL); + if (ret != I40E_SUCCESS) { + error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); + device_printf(dev, "add_veb failed; code=%d error=%d", ret, + error); + goto fail; + } + + ixl_configure_msix(pf); + ixl_enable_adminq(hw); + + pf->num_vfs = num_vfs; + IXL_PF_UNLOCK(pf); + return (0); + +fail: + free(pf->vfs, M_IXL); + pf->vfs = NULL; + IXL_PF_UNLOCK(pf); + return (error); +} + +static void +ixl_uninit_iov(device_t dev) +{ + struct ixl_pf *pf; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + struct ifnet *ifp; + struct ixl_vf *vfs; + int i, num_vfs; + + pf = device_get_softc(dev); + hw = &pf->hw; + vsi = &pf->vsi; + ifp = vsi->ifp; + + IXL_PF_LOCK(pf); + for (i = 0; i < pf->num_vfs; i++) { + if (pf->vfs[i].vsi.seid != 0) + i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); + } + + if (pf->veb_seid != 0) { + i40e_aq_delete_element(hw, pf->veb_seid, NULL); + pf->veb_seid = 0; + } + + if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) + ixl_disable_intr(vsi); + + vfs = pf->vfs; + num_vfs = pf->num_vfs; + + pf->vfs = NULL; + pf->num_vfs = 0; + IXL_PF_UNLOCK(pf); + + /* Do this after the unlock as sysctl_ctx_free might sleep. */ + for (i = 0; i < num_vfs; i++) + sysctl_ctx_free(&vfs[i].ctx); + free(vfs, M_IXL); +} + +static int +ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) +{ + char sysctl_name[QUEUE_NAME_LEN]; + struct ixl_pf *pf; + struct ixl_vf *vf; + const void *mac; + size_t size; + int error; + + pf = device_get_softc(dev); + vf = &pf->vfs[vfnum]; + + IXL_PF_LOCK(pf); + vf->vf_num = vfnum; + + vf->vsi.back = pf; + vf->vf_flags = VF_FLAG_ENABLED; + SLIST_INIT(&vf->vsi.ftl); + + error = ixl_vf_setup_vsi(pf, vf); + if (error != 0) + goto out; + + if (nvlist_exists_binary(params, "mac-addr")) { + mac = nvlist_get_binary(params, "mac-addr", &size); + bcopy(mac, vf->mac, ETHER_ADDR_LEN); + + if (nvlist_get_bool(params, "allow-set-mac")) + vf->vf_flags |= VF_FLAG_SET_MAC_CAP; + } else + /* + * If the administrator has not specified a MAC address then + * we must allow the VF to choose one. + */ + vf->vf_flags |= VF_FLAG_SET_MAC_CAP; + + if (nvlist_get_bool(params, "mac-anti-spoof")) + vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; + + if (nvlist_get_bool(params, "allow-promisc")) + vf->vf_flags |= VF_FLAG_PROMISC_CAP; + + vf->vf_flags |= VF_FLAG_VLAN_CAP; + + ixl_reset_vf(pf, vf); +out: + IXL_PF_UNLOCK(pf); + if (error == 0) { + snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); + ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); + } + + return (error); +} +#endif /* PCI_IOV */ diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c index 8cf4a433c6da..a9ee127f75e9 100644 --- a/sys/dev/ixl/if_ixlv.c +++ b/sys/dev/ixl/if_ixlv.c @@ -48,7 +48,7 @@ /********************************************************************* * Driver version *********************************************************************/ -char ixlv_driver_version[] = "1.2.1"; +char ixlv_driver_version[] = "1.2.4"; /********************************************************************* * PCI Device ID Table @@ -398,7 +398,7 @@ ixlv_attach(device_t dev) vsi->id = sc->vsi_res->vsi_id; vsi->back = (void *)sc; - vsi->link_up = TRUE; + sc->link_up = TRUE; /* This allocates the memory and early settings */ if (ixlv_setup_queues(sc) != 0) { @@ -480,7 +480,7 @@ ixlv_detach(device_t dev) /* Make sure VLANS are not using driver */ if (vsi->ifp->if_vlantrunk != NULL) { - device_printf(dev, "Vlan in use, detach first\n"); + if_printf(vsi->ifp, "Vlan in use, detach first\n"); INIT_DBG_DEV(dev, "end"); return (EBUSY); } @@ -893,7 +893,7 @@ ixlv_init_locked(struct ixlv_sc *sc) ixl_init_tx_ring(que); - if (vsi->max_frame_size <= 2048) + if (vsi->max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; @@ -1383,7 +1383,7 @@ ixlv_assign_msix(struct ixlv_sc *sc) struct tx_ring *txr; int error, rid, vector = 1; #ifdef RSS - cpuset_t cpu_mask; + cpuset_t cpu_mask; #endif for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { @@ -1413,7 +1413,7 @@ ixlv_assign_msix(struct ixlv_sc *sc) #endif bus_bind_intr(dev, que->res, cpu_id); que->msix = vector; - vsi->que_mask |= (u64)(1 << que->msix); + vsi->que_mask |= (u64)(1 << que->msix); TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); TASK_INIT(&que->task, 0, ixlv_handle_que, que); que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT, @@ -1718,12 +1718,12 @@ ixlv_setup_queues(struct ixlv_sc *sc) static void ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; - if (ifp->if_softc != arg) /* Not our event */ + if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ @@ -1755,12 +1755,12 @@ ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) static void ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; - int i = 0; + int i = 0; - if (ifp->if_softc != arg) + if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ @@ -2154,7 +2154,7 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; - if (!vsi->link_up) { + if (!sc->link_up) { mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end: link not up"); return; @@ -2395,7 +2395,7 @@ ixlv_local_timer(void *arg) } else { /* Check if we've come back from hung */ if ((vsi->active_queues & ((u64)1 << que->me)) == 0) - vsi->active_queues |= ((u64)1 << que->me); + vsi->active_queues |= ((u64)1 << que->me); } if (que->busy >= IXL_MAX_TX_BUSY) { device_printf(dev,"Warning queue %d " @@ -2426,20 +2426,19 @@ ixlv_update_link_status(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; - device_t dev = sc->dev; - if (vsi->link_up){ + if (sc->link_up){ if (vsi->link_active == FALSE) { if (bootverbose) - device_printf(dev,"Link is Up, %d Gbps\n", - (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); + if_printf(ifp,"Link is Up, %d Gbps\n", + (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); vsi->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) - device_printf(dev,"Link is Down\n"); + if_printf(ifp,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } @@ -2657,7 +2656,6 @@ static int ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) { struct ixlv_mac_filter *f; - device_t dev = sc->dev; /* Does one already exist? */ f = ixlv_find_mac_filter(sc, macaddr); @@ -2670,7 +2668,7 @@ ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) /* If not, get a new empty filter */ f = ixlv_get_mac_filter(sc); if (f == NULL) { - device_printf(dev, "%s: no filters available!!\n", + if_printf(sc->vsi.ifp, "%s: no filters available!!\n", __func__); return (ENOMEM); } @@ -2836,7 +2834,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc) struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { - SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, + SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; @@ -2852,34 +2850,34 @@ ixlv_add_sysctls(struct ixlv_sc *sc) txr = &(queues[q].txr); rxr = &(queues[q].rxr); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &(queues[q].dropped_pkts), "Driver dropped packets"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", CTLFLAG_RD, &(queues[q].tx_dma_setup), "Driver tx dma failure in xmit"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h index 30e195f2980e..df8f04f557bd 100644 --- a/sys/dev/ixl/ixl.h +++ b/sys/dev/ixl/ixl.h @@ -90,6 +90,11 @@ #include #include +#ifdef PCI_IOV +#include +#include +#endif + #include "i40e_type.h" #include "i40e_prototype.h" @@ -224,6 +229,10 @@ #define IXL_QUEUE_HUNG 0x80000000 #define IXL_KEYSZ 10 +#define IXL_VF_MAX_BUFFER 0x3F80 +#define IXL_VF_MAX_HDR_BUFFER 0x840 +#define IXL_VF_MAX_FRAME 0x3FFF + /* ERJ: hardware can support ~1.5k filters between all functions */ #define IXL_MAX_FILTERS 256 #define IXL_MAX_TX_BUSY 10 @@ -265,6 +274,35 @@ #define IXL_FLAGS_KEEP_TSO4 (1 << 0) #define IXL_FLAGS_KEEP_TSO6 (1 << 1) +#define IXL_VF_RESET_TIMEOUT 100 + +#define IXL_VSI_DATA_PORT 0x01 + +#define IXLV_MAX_QUEUES 16 +#define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1)) + +#define IXL_RX_CTX_BASE_UNITS 128 +#define IXL_TX_CTX_BASE_UNITS 128 + +#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ + I40E_VPINT_LNKLSTN(((vector) - 1) + \ + (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) + +#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ + I40E_VFINT_DYN_CTLN(((vector) - 1) + \ + (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) + +#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA + +#define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20 + +#define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32) +#define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32)) + +#define IXL_MAX_ITR_IDX 3 + +#define IXL_END_OF_INTR_LNKLST 0x7FF + #define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) @@ -461,19 +499,22 @@ struct ixl_vsi { struct ifmedia media; u64 que_mask; int id; + u16 vsi_num; u16 msix_base; /* station base MSIX vector */ + u16 first_queue; u16 num_queues; u16 rx_itr_setting; u16 tx_itr_setting; struct ixl_queue *queues; /* head of queues */ bool link_active; u16 seid; + u16 uplink_seid; + u16 downlink_seid; u16 max_frame_size; - u32 link_speed; - bool link_up; /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; + u16 num_macs; struct i40e_aqc_vsi_properties_data info; @@ -505,6 +546,7 @@ struct ixl_vsi { /* Misc. */ u64 active_queues; u64 flags; + struct sysctl_oid *vsi_node; }; /* @@ -543,7 +585,7 @@ ixl_get_filter(struct ixl_vsi *vsi) ** Compare two ethernet addresses */ static inline bool -cmp_etheraddr(u8 *ea1, u8 *ea2) +cmp_etheraddr(const u8 *ea1, const u8 *ea2) { bool cmp = FALSE; diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h index 7c3d3bbdeacd..4f0bddad76a6 100644 --- a/sys/dev/ixl/ixl_pf.h +++ b/sys/dev/ixl/ixl_pf.h @@ -36,6 +36,22 @@ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ +#define VF_FLAG_ENABLED 0x01 +#define VF_FLAG_SET_MAC_CAP 0x02 +#define VF_FLAG_VLAN_CAP 0x04 +#define VF_FLAG_PROMISC_CAP 0x08 +#define VF_FLAG_MAC_ANTI_SPOOF 0x10 + +struct ixl_vf { + struct ixl_vsi vsi; + uint32_t vf_flags; + + uint8_t mac[ETHER_ADDR_LEN]; + uint16_t vf_num; + + struct sysctl_ctx_list ctx; +}; + /* Physical controller structure */ struct ixl_pf { struct i40e_hw hw; @@ -64,15 +80,18 @@ struct ixl_pf { struct task adminq; struct taskqueue *tq; + bool link_up; + u32 link_speed; int advertised_speed; int fc; /* local flow ctrl setting */ /* - ** VSI - Stations: + ** Network interfaces ** These are the traffic class holders, and ** will have a stack interface and queues ** associated with them. - ** NOTE: for now using just one, so embed it. + ** NOTE: The PF has only a single interface, + ** so it is embedded in the PF struct. */ struct ixl_vsi vsi; @@ -84,8 +103,31 @@ struct ixl_pf { struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; + + struct ixl_vf *vfs; + int num_vfs; + uint16_t veb_seid; + struct task vflr_task; + int vc_debug_lvl; }; +#define IXL_SET_ADVERTISE_HELP \ +"Control link advertise speed:\n" \ +"\tFlags:\n" \ +"\t\t0x1 - advertise 100 Mb\n" \ +"\t\t0x2 - advertise 1G\n" \ +"\t\t0x4 - advertise 10G\n" \ +"\t\t0x8 - advertise 20G\n\n" \ +"\tDoes not work on 40G devices." + +#define I40E_VC_DEBUG(pf, level, ...) \ + do { \ + if ((pf)->vc_debug_lvl >= (level)) \ + device_printf((pf)->dev, __VA_ARGS__); \ + } while (0) + +#define i40e_send_vf_nack(pf, vf, op, st) \ + ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) #define IXL_PF_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c index 5942e7c09cf2..43a19490c074 100755 --- a/sys/dev/ixl/ixl_txrx.c +++ b/sys/dev/ixl/ixl_txrx.c @@ -68,7 +68,6 @@ static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *, /* ** Multiqueue Transmit driver -** */ int ixl_mq_start(struct ifnet *ifp, struct mbuf *m) @@ -112,7 +111,7 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m) err = drbr_enqueue(ifp, txr->br, m); if (err) - return(err); + return (err); if (IXL_TX_TRYLOCK(txr)) { ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); @@ -488,15 +487,16 @@ ixl_allocate_tx_data(struct ixl_queue *que) void ixl_init_tx_ring(struct ixl_queue *que) { - struct tx_ring *txr = &que->txr; - struct ixl_tx_buf *buf; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ + struct tx_ring *txr = &que->txr; + struct ixl_tx_buf *buf; /* Clear the old ring contents */ IXL_TX_LOCK(txr); + #ifdef DEV_NETMAP /* * (under lock): if in netmap mode, do some consistency @@ -1158,7 +1158,7 @@ ixl_init_rx_ring(struct ixl_queue *que) struct ixl_rx_buf *buf; bus_dma_segment_t pseg[1], hseg[1]; int rsize, nsegs, error = 0; -#ifdef DEV_NETMAP +#ifdef DEV_NETMAP struct netmap_adapter *na = NA(que->vsi->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ @@ -1222,7 +1222,6 @@ ixl_init_rx_ring(struct ixl_queue *que) continue; } #endif /* DEV_NETMAP */ - /* ** Don't allocate mbufs if not ** doing header split, its wasteful diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h index 77a02fa7e879..840e17af71da 100644 --- a/sys/dev/ixl/ixlv.h +++ b/sys/dev/ixl/ixlv.h @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright (c) 2013-2014, Intel Corporation + Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -119,6 +119,9 @@ struct ixlv_sc { int msix; int if_flags; + bool link_up; + u32 link_speed; + struct mtx mtx; u32 qbase; diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c index a0142bad7d69..02192f3f00c7 100644 --- a/sys/dev/ixl/ixlvc.c +++ b/sys/dev/ixl/ixlvc.c @@ -362,7 +362,7 @@ ixlv_configure_queues(struct ixlv_sc *sc) struct i40e_virtchnl_vsi_queue_config_info *vqci; struct i40e_virtchnl_queue_pair_info *vqpi; - + pairs = vsi->num_queues; len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); @@ -788,14 +788,11 @@ ixlv_request_stats(struct ixlv_sc *sc) void ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) { - struct ixl_vsi *vsi; + struct ixl_vsi *vsi = &sc->vsi; uint64_t tx_discards; - int i; - - vsi = &sc->vsi; tx_discards = es->tx_discards; - for (i = 0; i < sc->vsi.num_queues; i++) + for (int i = 0; i < vsi->num_queues; i++) tx_discards += sc->vsi.queues[i].txr.br->br_drops; /* Update ifnet stats */ @@ -816,7 +813,7 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); - sc->vsi.eth_stats = *es; + vsi->eth_stats = *es; } /* @@ -845,9 +842,9 @@ ixlv_vc_completion(struct ixlv_sc *sc, vpe->event_data.link_event.link_status, vpe->event_data.link_event.link_speed); #endif - vsi->link_up = + sc->link_up = vpe->event_data.link_event.link_status; - vsi->link_speed = + sc->link_speed = vpe->event_data.link_event.link_speed; ixlv_update_link_status(sc); break; diff --git a/sys/dev/pci/pci_iov.c b/sys/dev/pci/pci_iov.c index ff5a833c3702..e256a5d126f1 100644 --- a/sys/dev/pci/pci_iov.c +++ b/sys/dev/pci/pci_iov.c @@ -586,7 +586,7 @@ pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config, * VFs. */ if (nvlist_get_bool(iov_config, "passthrough")) - device_set_devclass(vf, "ppt"); + device_set_devclass_fixed(vf, "ppt"); vfinfo = device_get_ivars(vf); diff --git a/sys/dev/usb/controller/dwc_otg_fdt.c b/sys/dev/usb/controller/dwc_otg_fdt.c index a6209ffc0256..73df72e78238 100644 --- a/sys/dev/usb/controller/dwc_otg_fdt.c +++ b/sys/dev/usb/controller/dwc_otg_fdt.c @@ -53,15 +53,11 @@ __FBSDID("$FreeBSD$"); #include #include +#include static device_probe_t dwc_otg_probe; -static device_attach_t dwc_otg_attach; static device_detach_t dwc_otg_detach; -struct dwc_otg_super_softc { - struct dwc_otg_softc sc_otg; /* must be first */ -}; - static int dwc_otg_probe(device_t dev) { @@ -74,13 +70,13 @@ dwc_otg_probe(device_t dev) device_set_desc(dev, "DWC OTG 2.0 integrated USB controller"); - return (0); + return (BUS_PROBE_DEFAULT); } -static int +int dwc_otg_attach(device_t dev) { - struct dwc_otg_super_softc *sc = device_get_softc(dev); + struct dwc_otg_fdt_softc *sc = device_get_softc(dev); char usb_mode[24]; int err; int rid; @@ -161,7 +157,7 @@ dwc_otg_attach(device_t dev) static int dwc_otg_detach(device_t dev) { - struct dwc_otg_super_softc *sc = device_get_softc(dev); + struct dwc_otg_fdt_softc *sc = device_get_softc(dev); device_t bdev; int err; @@ -212,10 +208,10 @@ static device_method_t dwc_otg_methods[] = { DEVMETHOD_END }; -static driver_t dwc_otg_driver = { +driver_t dwc_otg_driver = { .name = "dwcotg", .methods = dwc_otg_methods, - .size = sizeof(struct dwc_otg_super_softc), + .size = sizeof(struct dwc_otg_fdt_softc), }; static devclass_t dwc_otg_devclass; diff --git a/sys/dev/usb/controller/dwc_otg_fdt.h b/sys/dev/usb/controller/dwc_otg_fdt.h new file mode 100644 index 000000000000..9e01118c80d6 --- /dev/null +++ b/sys/dev/usb/controller/dwc_otg_fdt.h @@ -0,0 +1,39 @@ +/*- + * Copyright (c) 2012 Hans Petter Selasky. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _DWC_OTG_FDT_H_ +#define _DWC_OTG_FDT_H_ + +struct dwc_otg_fdt_softc { + struct dwc_otg_softc sc_otg; /* must be first */ +}; + +extern driver_t dwc_otg_driver; + +device_attach_t dwc_otg_attach; + +#endif diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c index ece5e9573309..b0fe3a012845 100644 --- a/sys/dev/usb/input/uhid.c +++ b/sys/dev/usb/input/uhid.c @@ -518,7 +518,9 @@ uhid_open(struct usb_fifo *fifo, int fflags) */ if (fflags & FREAD) { /* reset flags */ + mtx_lock(&sc->sc_mtx); sc->sc_flags &= ~UHID_FLAG_IMMED; + mtx_unlock(&sc->sc_mtx); if (usb_fifo_alloc_buffer(fifo, sc->sc_isize + 1, UHID_FRAME_NUM)) { diff --git a/sys/dev/usb/serial/usb_serial.c b/sys/dev/usb/serial/usb_serial.c index bb641efe1fd5..ae4ed3a4339b 100644 --- a/sys/dev/usb/serial/usb_serial.c +++ b/sys/dev/usb/serial/usb_serial.c @@ -1102,14 +1102,19 @@ ucom_cfg_status_change(struct usb_proc_msg *_task) sc->sc_msr = new_msr; sc->sc_lsr = new_lsr; - /* time pulse counting support */ + /* + * Time pulse counting support. Note that both CTS and DCD are + * active-low signals. The status bit is high to indicate that + * the signal on the line is low, which corresponds to a PPS + * clear event. + */ switch(ucom_pps_mode) { case 1: if ((sc->sc_pps.ppsparam.mode & PPS_CAPTUREBOTH) && (msr_delta & SER_CTS)) { pps_capture(&sc->sc_pps); pps_event(&sc->sc_pps, (sc->sc_msr & SER_CTS) ? - PPS_CAPTUREASSERT : PPS_CAPTURECLEAR); + PPS_CAPTURECLEAR : PPS_CAPTUREASSERT); } break; case 2: @@ -1117,7 +1122,7 @@ ucom_cfg_status_change(struct usb_proc_msg *_task) (msr_delta & SER_DCD)) { pps_capture(&sc->sc_pps); pps_event(&sc->sc_pps, (sc->sc_msr & SER_DCD) ? - PPS_CAPTUREASSERT : PPS_CAPTURECLEAR); + PPS_CAPTURECLEAR : PPS_CAPTUREASSERT); } break; default: diff --git a/sys/dev/vt/hw/ofwfb/ofwfb.c b/sys/dev/vt/hw/ofwfb/ofwfb.c index ad5b664a9d71..acad5d4d86c6 100644 --- a/sys/dev/vt/hw/ofwfb/ofwfb.c +++ b/sys/dev/vt/hw/ofwfb/ofwfb.c @@ -54,6 +54,7 @@ struct ofwfb_softc { phandle_t sc_node; ihandle_t sc_handle; bus_space_tag_t sc_memt; + int iso_palette; }; static vd_probe_t ofwfb_probe; @@ -73,6 +74,12 @@ static const struct vt_driver vt_ofwfb_driver = { .vd_priority = VD_PRIORITY_GENERIC+1, }; +static unsigned char ofw_colors[16] = { + /* See "16-color Text Extension" Open Firmware document, page 4 */ + 0, 4, 2, 6, 1, 5, 3, 7, + 8, 12, 10, 14, 9, 13, 11, 15 +}; + static struct ofwfb_softc ofwfb_conssoftc; VT_DRIVER_DECLARE(vt_ofwfb, vt_ofwfb_driver); @@ -121,6 +128,11 @@ ofwfb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw, bgc = sc->fb_cmap[bg]; b = m = 0; + if (((struct ofwfb_softc *)vd->vd_softc)->iso_palette) { + fg = ofw_colors[fg]; + bg = ofw_colors[bg]; + } + line = (sc->fb_stride * y) + x * sc->fb_bpp/8; if (mask == NULL && sc->fb_bpp == 8 && (width % 8 == 0)) { /* Don't try to put off screen pixels */ @@ -255,7 +267,7 @@ static void ofwfb_initialize(struct vt_device *vd) { struct ofwfb_softc *sc = vd->vd_softc; - int i; + int i, err; cell_t retval; uint32_t oldpix; @@ -263,18 +275,24 @@ ofwfb_initialize(struct vt_device *vd) * Set up the color map */ + sc->iso_palette = 0; switch (sc->fb.fb_bpp) { case 8: vt_generate_cons_palette(sc->fb.fb_cmap, COLOR_FORMAT_RGB, 255, 16, 255, 8, 255, 0); for (i = 0; i < 16; i++) { - OF_call_method("color!", sc->sc_handle, 4, 1, + err = OF_call_method("color!", sc->sc_handle, 4, 1, (cell_t)((sc->fb.fb_cmap[i] >> 16) & 0xff), (cell_t)((sc->fb.fb_cmap[i] >> 8) & 0xff), (cell_t)((sc->fb.fb_cmap[i] >> 0) & 0xff), (cell_t)i, &retval); + if (err) + break; } + if (i != 16) + sc->iso_palette = 1; + break; case 32: diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c index 87e8b8b08512..fa1948c09b76 100644 --- a/sys/dev/wpi/if_wpi.c +++ b/sys/dev/wpi/if_wpi.c @@ -447,8 +447,6 @@ wpi_attach(device_t dev) ic->ic_cryptocaps = IEEE80211_CRYPTO_AES_CCM; - ic->ic_flags |= IEEE80211_F_DATAPAD; - /* * Read in the eeprom and also setup the channels for * net80211. We don't set the rates as net80211 does this for us @@ -2224,8 +2222,6 @@ wpi_intr(void *arg) static int wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) { - struct ifnet *ifp = sc->sc_ifp; - struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_frame *wh; struct wpi_tx_cmd *cmd; struct wpi_tx_data *data; @@ -2233,16 +2229,22 @@ wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) struct wpi_tx_ring *ring; struct mbuf *m1; bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER]; - int error, i, hdrspace, nsegs, totlen; + int error, i, hdrlen, nsegs, totlen, pad; WPI_LOCK_ASSERT(sc); DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__); wh = mtod(buf->m, struct ieee80211_frame *); - hdrspace = ieee80211_anyhdrspace(ic, wh); + hdrlen = ieee80211_anyhdrsize(wh); totlen = buf->m->m_pkthdr.len; + if (hdrlen & 3) { + /* First segment length must be a multiple of 4. */ + pad = 4 - (hdrlen & 3); + } else + pad = 0; + ring = &sc->txq[buf->ac]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; @@ -2257,8 +2259,8 @@ wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) memcpy(cmd->data, buf->data, buf->size); /* Save and trim IEEE802.11 header. */ - memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrspace); - m_adj(buf->m, hdrspace); + memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen); + m_adj(buf->m, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m, segs, &nsegs, BUS_DMA_NOWAIT); @@ -2296,10 +2298,10 @@ wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf) __func__, ring->qid, ring->cur, totlen, nsegs); /* Fill TX descriptor. */ - desc->nsegs = WPI_PAD32(totlen) << 4 | (1 + nsegs); + desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs); /* First DMA segment is used by the TX command. */ desc->segs[0].addr = htole32(data->cmd_paddr); - desc->segs[0].len = htole32(4 + buf->size + hdrspace); + desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad); /* Other DMA segments are for data payload. */ seg = &segs[0]; for (i = 1; i <= nsegs; i++) { @@ -2345,10 +2347,9 @@ wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) uint32_t flags; uint16_t qos; uint8_t tid, type; - int ac, error, rate, ismcast, hdrlen, totlen; + int ac, error, rate, ismcast, totlen; wh = mtod(m, struct ieee80211_frame *); - hdrlen = ieee80211_anyhdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); @@ -2392,12 +2393,12 @@ wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni) /* 802.11 header may have moved. */ wh = mtod(m, struct ieee80211_frame *); } - totlen = m->m_pkthdr.len - (hdrlen & 3); + totlen = m->m_pkthdr.len; if (ieee80211_radiotap_active_vap(vap)) { struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; - tap->wt_flags = IEEE80211_RADIOTAP_F_DATAPAD; + tap->wt_flags = 0; tap->wt_rate = rate; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; @@ -2514,12 +2515,11 @@ wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni, struct wpi_buf tx_data; uint32_t flags; uint8_t type; - int ac, rate, hdrlen, totlen; + int ac, rate, totlen; wh = mtod(m, struct ieee80211_frame *); - hdrlen = ieee80211_anyhdrsize(wh); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; - totlen = m->m_pkthdr.len - (hdrlen & 3); + totlen = m->m_pkthdr.len; ac = params->ibp_pri & 3; @@ -2541,8 +2541,6 @@ wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni, tap->wt_flags = 0; tap->wt_rate = rate; - if (params->ibp_flags & IEEE80211_BPF_DATAPAD) - tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD; ieee80211_radiotap_tx(vap, m); } @@ -2926,7 +2924,7 @@ wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni) return EINVAL; memset(&node, 0, sizeof node); - IEEE80211_ADDR_COPY(node.macaddr, ni->ni_bssid); + IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.id = wn->id; node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1]; @@ -2993,7 +2991,7 @@ wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni) } memset(&node, 0, sizeof node); - IEEE80211_ADDR_COPY(node.macaddr, ni->ni_bssid); + IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.count = 1; error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1); @@ -3404,6 +3402,7 @@ wpi_config(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; + struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t flags; int error; @@ -3425,7 +3424,7 @@ wpi_config(struct wpi_softc *sc) /* Configure adapter. */ memset(&sc->rxon, 0, sizeof (struct wpi_rxon)); - IEEE80211_ADDR_COPY(sc->rxon.myaddr, IF_LLADDR(ifp)); + IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr); /* Set default channel. */ sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_curchan); @@ -3559,6 +3558,7 @@ wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_scan_state *ss = ic->ic_scan; + struct ieee80211vap *vap = ss->ss_vap; struct wpi_scan_hdr *hdr; struct wpi_cmd_data *tx; struct wpi_scan_essid *essids; @@ -3645,7 +3645,7 @@ wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c) IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); - IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp)); + IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 075139b922c9..0a8b79a4a927 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -33,12 +33,13 @@ __FBSDID("$FreeBSD$"); #include "opt_capsicum.h" #include "opt_compat.h" -#include "opt_core.h" +#include "opt_gzio.h" #include #include #include #include +#include #include #include #include @@ -69,8 +70,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include - #include #include #include @@ -105,11 +104,7 @@ static Elf_Word __elfN(untrans_prot)(vm_prot_t); SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0, ""); -#ifdef COMPRESS_USER_CORES -static int compress_core(gzFile, char *, char *, unsigned int, - struct thread * td); -#endif -#define CORE_BUF_SIZE (16 * 1024) +#define CORE_BUF_SIZE (16 * 1024) int __elfN(fallback_brand) = -1; SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, @@ -1067,11 +1062,23 @@ struct note_info { TAILQ_HEAD(note_info_list, note_info); +/* Coredump output parameters. */ +struct coredump_params { + off_t offset; + struct ucred *active_cred; + struct ucred *file_cred; + struct thread *td; + struct vnode *vp; + struct gzio_stream *gzs; +}; + static void cb_put_phdr(vm_map_entry_t, void *); static void cb_size_segment(vm_map_entry_t, void *); +static int core_write(struct coredump_params *, void *, size_t, off_t, + enum uio_seg); static void each_writable_segment(struct thread *, segment_callback, void *); -static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *, - int, void *, size_t, struct note_info_list *, size_t, gzFile); +static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t, + struct note_info_list *, size_t); static void __elfN(prepare_notes)(struct thread *, struct note_info_list *, size_t *); static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t); @@ -1095,42 +1102,60 @@ static void note_procstat_rlimit(void *, struct sbuf *, size_t *); static void note_procstat_umask(void *, struct sbuf *, size_t *); static void note_procstat_vmmap(void *, struct sbuf *, size_t *); -#ifdef COMPRESS_USER_CORES -extern int compress_user_cores; +#ifdef GZIO extern int compress_user_cores_gzlevel; -#endif +/* + * Write out a core segment to the compression stream. + */ static int -core_output(struct vnode *vp, void *base, size_t len, off_t offset, - struct ucred *active_cred, struct ucred *file_cred, - struct thread *td, char *core_buf, gzFile gzfile) { - +compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len) +{ + u_int chunk_len; int error; - if (gzfile) { -#ifdef COMPRESS_USER_CORES - error = compress_core(gzfile, base, core_buf, len, td); -#else - panic("shouldn't be here"); -#endif - } else { - error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset, - UIO_USERSPACE, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, - active_cred, file_cred, NULL, td); + + while (len > 0) { + chunk_len = MIN(len, CORE_BUF_SIZE); + copyin(base, buf, chunk_len); + error = gzio_write(p->gzs, buf, chunk_len); + if (error != 0) + break; + base += chunk_len; + len -= chunk_len; } return (error); } -/* Coredump output parameters for sbuf drain routine. */ -struct sbuf_drain_core_params { - off_t offset; - struct ucred *active_cred; - struct ucred *file_cred; - struct thread *td; - struct vnode *vp; -#ifdef COMPRESS_USER_CORES - gzFile gzfile; +static int +core_gz_write(void *base, size_t len, off_t offset, void *arg) +{ + + return (core_write((struct coredump_params *)arg, base, len, offset, + UIO_SYSSPACE)); +} +#endif /* GZIO */ + +static int +core_write(struct coredump_params *p, void *base, size_t len, off_t offset, + enum uio_seg seg) +{ + + return (vn_rdwr_inchunks(UIO_WRITE, p->vp, base, len, offset, + seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED, + p->active_cred, p->file_cred, NULL, p->td)); +} + +static int +core_output(void *base, size_t len, off_t offset, struct coredump_params *p, + void *tmpbuf) +{ + +#ifdef GZIO + if (p->gzs != NULL) + return (compress_chunk(p, base, tmpbuf, len)); #endif -}; + return (core_write(p, base, len, offset, UIO_USERSPACE)); +} /* * Drain into a core file. @@ -1138,10 +1163,10 @@ struct sbuf_drain_core_params { static int sbuf_drain_core_output(void *arg, const char *data, int len) { - struct sbuf_drain_core_params *p; + struct coredump_params *p; int error, locked; - p = (struct sbuf_drain_core_params *)arg; + p = (struct coredump_params *)arg; /* * Some kern_proc out routines that print to this sbuf may @@ -1154,16 +1179,13 @@ sbuf_drain_core_output(void *arg, const char *data, int len) locked = PROC_LOCKED(p->td->td_proc); if (locked) PROC_UNLOCK(p->td->td_proc); -#ifdef COMPRESS_USER_CORES - if (p->gzfile != Z_NULL) - error = compress_core(p->gzfile, NULL, __DECONST(char *, data), - len, p->td); +#ifdef GZIO + if (p->gzs != NULL) + error = gzio_write(p->gzs, __DECONST(char *, data), len); else #endif - error = vn_rdwr_inchunks(UIO_WRITE, p->vp, - __DECONST(void *, data), len, p->offset, UIO_SYSSPACE, - IO_UNIT | IO_DIRECT | IO_RANGELOCKED, p->active_cred, - p->file_cred, NULL, p->td); + error = core_write(p, __DECONST(void *, data), len, p->offset, + UIO_SYSSPACE); if (locked) PROC_LOCK(p->td->td_proc); if (error != 0) @@ -1192,42 +1214,16 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) int error = 0; struct sseg_closure seginfo; struct note_info_list notelst; + struct coredump_params params; struct note_info *ninfo; - void *hdr; + void *hdr, *tmpbuf; size_t hdrsize, notesz, coresize; + boolean_t compress; - gzFile gzfile = Z_NULL; - char *core_buf = NULL; -#ifdef COMPRESS_USER_CORES - char gzopen_flags[8]; - char *p; - int doing_compress = flags & IMGACT_CORE_COMPRESS; -#endif - + compress = (flags & IMGACT_CORE_COMPRESS) != 0; hdr = NULL; TAILQ_INIT(¬elst); -#ifdef COMPRESS_USER_CORES - if (doing_compress) { - p = gzopen_flags; - *p++ = 'w'; - if (compress_user_cores_gzlevel >= 0 && - compress_user_cores_gzlevel <= 9) - *p++ = '0' + compress_user_cores_gzlevel; - *p = 0; - gzfile = gz_open("", gzopen_flags, vp); - if (gzfile == Z_NULL) { - error = EFAULT; - goto done; - } - core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); - if (!core_buf) { - error = ENOMEM; - goto done; - } - } -#endif - /* Size the program segments. */ seginfo.count = 0; seginfo.size = 0; @@ -1254,6 +1250,28 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) goto done; } + /* Set up core dump parameters. */ + params.offset = 0; + params.active_cred = cred; + params.file_cred = NOCRED; + params.td = td; + params.vp = vp; + params.gzs = NULL; + + tmpbuf = NULL; +#ifdef GZIO + /* Create a compression stream if necessary. */ + if (compress) { + params.gzs = gzio_init(core_gz_write, GZIO_DEFLATE, + CORE_BUF_SIZE, compress_user_cores_gzlevel, ¶ms); + if (params.gzs == NULL) { + error = EFAULT; + goto done; + } + tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO); + } +#endif + /* * Allocate memory for building the header, fill it up, * and write it out following the notes. @@ -1263,8 +1281,8 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) error = EINVAL; goto done; } - error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize, - ¬elst, notesz, gzfile); + error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst, + notesz); /* Write the contents of all of the writable segments. */ if (error == 0) { @@ -1275,13 +1293,17 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1; offset = round_page(hdrsize + notesz); for (i = 0; i < seginfo.count; i++) { - error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr, - php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile); + error = core_output((caddr_t)(uintptr_t)php->p_vaddr, + php->p_filesz, offset, ¶ms, tmpbuf); if (error != 0) break; offset += php->p_filesz; php++; } +#ifdef GZIO + if (error == 0 && compress) + error = gzio_flush(params.gzs); +#endif } if (error) { log(LOG_WARNING, @@ -1290,11 +1312,11 @@ __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags) } done: -#ifdef COMPRESS_USER_CORES - if (core_buf) - free(core_buf, M_TEMP); - if (gzfile) - gzclose(gzfile); +#ifdef GZIO + if (compress) { + free(tmpbuf, M_TEMP); + gzio_fini(params.gzs); + } #endif while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) { TAILQ_REMOVE(¬elst, ninfo, link); @@ -1419,29 +1441,19 @@ each_writable_segment(td, func, closure) * the page boundary. */ static int -__elfN(corehdr)(struct thread *td, struct vnode *vp, struct ucred *cred, - int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst, - size_t notesz, gzFile gzfile) +__elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr, + size_t hdrsize, struct note_info_list *notelst, size_t notesz) { - struct sbuf_drain_core_params params; struct note_info *ninfo; struct sbuf *sb; int error; /* Fill in the header. */ bzero(hdr, hdrsize); - __elfN(puthdr)(td, hdr, hdrsize, numsegs, notesz); + __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz); - params.offset = 0; - params.active_cred = cred; - params.file_cred = NOCRED; - params.td = td; - params.vp = vp; -#ifdef COMPRESS_USER_CORES - params.gzfile = gzfile; -#endif sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN); - sbuf_set_drain(sb, sbuf_drain_core_output, ¶ms); + sbuf_set_drain(sb, sbuf_drain_core_output, p); sbuf_start_section(sb, NULL); sbuf_bcat(sb, hdr, hdrsize); TAILQ_FOREACH(ninfo, notelst, link) @@ -2108,58 +2120,6 @@ static struct execsw __elfN(execsw) = { }; EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw)); -#ifdef COMPRESS_USER_CORES -/* - * Compress and write out a core segment for a user process. - * - * 'inbuf' is the starting address of a VM segment in the process' address - * space that is to be compressed and written out to the core file. 'dest_buf' - * is a buffer in the kernel's address space. The segment is copied from - * 'inbuf' to 'dest_buf' first before being processed by the compression - * routine gzwrite(). This copying is necessary because the content of the VM - * segment may change between the compression pass and the crc-computation pass - * in gzwrite(). This is because realtime threads may preempt the UNIX kernel. - * - * If inbuf is NULL it is assumed that data is already copied to 'dest_buf'. - */ -static int -compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len, - struct thread *td) -{ - int len_compressed; - int error = 0; - unsigned int chunk_len; - - while (len) { - if (inbuf != NULL) { - chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len; - copyin(inbuf, dest_buf, chunk_len); - inbuf += chunk_len; - } else { - chunk_len = len; - } - len_compressed = gzwrite(file, dest_buf, chunk_len); - - EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed); - - if ((unsigned int)len_compressed != chunk_len) { - log(LOG_WARNING, - "compress_core: length mismatch (0x%x returned, " - "0x%x expected)\n", len_compressed, chunk_len); - EVENTHANDLER_INVOKE(app_coredump_error, td, - "compress_core: length mismatch %x -> %x", - chunk_len, len_compressed); - error = EFAULT; - break; - } - len -= chunk_len; - maybe_yield(); - } - - return (error); -} -#endif /* COMPRESS_USER_CORES */ - static vm_prot_t __elfN(trans_prot)(Elf_Word flags) { diff --git a/sys/kern/kern_gzio.c b/sys/kern/kern_gzio.c index 15dc3015ca25..a4974a729c3f 100644 --- a/sys/kern/kern_gzio.c +++ b/sys/kern/kern_gzio.c @@ -1,400 +1,223 @@ -/* - * $Id: kern_gzio.c,v 1.6 2008-10-18 22:54:45 lbazinet Exp $ +/*- + * Copyright (c) 2014 Mark Johnston * - * core_gzip.c -- gzip routines used in compressing user process cores + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. * - * This file is derived from src/lib/libz/gzio.c in FreeBSD. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. */ -/* gzio.c -- IO on .gz files - * Copyright (C) 1995-1998 Jean-loup Gailly. - * For conditions of distribution and use, see copyright notice in zlib.h - * - */ - -/* @(#) $FreeBSD$ */ +#include +__FBSDID("$FreeBSD$"); #include -#include + +#include +#include #include -#include -#include -#include + #include -#include -#include -#include +#define KERN_GZ_HDRLEN 10 /* gzip header length */ +#define KERN_GZ_TRAILERLEN 8 /* gzip trailer length */ +#define KERN_GZ_MAGIC1 0x1f /* first magic byte */ +#define KERN_GZ_MAGIC2 0x8b /* second magic byte */ -#define GZ_HEADER_LEN 10 +MALLOC_DEFINE(M_GZIO, "gzio", "zlib state"); -#ifndef Z_BUFSIZE -# ifdef MAXSEG_64K -# define Z_BUFSIZE 4096 /* minimize memory usage for 16-bit DOS */ -# else -# define Z_BUFSIZE 16384 -# endif -#endif -#ifndef Z_PRINTF_BUFSIZE -# define Z_PRINTF_BUFSIZE 4096 -#endif +struct gzio_stream { + uint8_t * gz_buffer; /* output buffer */ + size_t gz_bufsz; /* total buffer size */ + off_t gz_off; /* offset into the output stream */ + enum gzio_mode gz_mode; /* stream mode */ + uint32_t gz_crc; /* stream CRC32 */ + gzio_cb gz_cb; /* output callback */ + void * gz_arg; /* private callback arg */ + z_stream gz_stream; /* zlib state */ +}; -#define ALLOC(size) malloc(size, M_TEMP, M_WAITOK | M_ZERO) -#define TRYFREE(p) {if (p) free(p, M_TEMP);} +static void * gz_alloc(void *, u_int, u_int); +static void gz_free(void *, void *); +static int gz_write(struct gzio_stream *, void *, u_int, int); -static int gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ -#define HEAD_CRC 0x02 /* bit 1 set: header CRC present */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define RESERVED 0xE0 /* bits 5..7: reserved */ - -typedef struct gz_stream { - z_stream stream; - int z_err; /* error code for last stream operation */ - int z_eof; /* set if end of input file */ - struct vnode *file; /* vnode pointer of .gz file */ - Byte *inbuf; /* input buffer */ - Byte *outbuf; /* output buffer */ - uLong crc; /* crc32 of uncompressed data */ - char *msg; /* error message */ - char *path; /* path name for debugging only */ - int transparent; /* 1 if input file is not a .gz file */ - char mode; /* 'w' or 'r' */ - long startpos; /* start of compressed data in file (header skipped) */ - off_t outoff; /* current offset in output file */ - int flags; -} gz_stream; - - -local int do_flush OF((gzFile file, int flush)); -local int destroy OF((gz_stream *s)); -local void putU32 OF((gz_stream *file, uint32_t x)); -local void *gz_alloc OF((void *notused, u_int items, u_int size)); -local void gz_free OF((void *notused, void *ptr)); - -/* =========================================================================== - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb"). The file is given either by file descriptor - or path name (if fd == -1). - gz_open return NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). -*/ -gzFile gz_open (path, mode, vp) - const char *path; - const char *mode; - struct vnode *vp; +struct gzio_stream * +gzio_init(gzio_cb cb, enum gzio_mode mode, size_t bufsz, int level, void *arg) { - int err; - int level = Z_DEFAULT_COMPRESSION; /* compression level */ - int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ - const char *p = mode; - gz_stream *s; - char fmode[80]; /* copy of mode, without the compression level */ - char *m = fmode; - ssize_t resid; - int error; - char buf[GZ_HEADER_LEN + 1]; + struct gzio_stream *s; + uint8_t *hdr; + int error; - if (!path || !mode) return Z_NULL; + if (bufsz < KERN_GZ_HDRLEN) + return (NULL); + if (mode != GZIO_DEFLATE) + return (NULL); - s = (gz_stream *)ALLOC(sizeof(gz_stream)); - if (!s) return Z_NULL; + s = gz_alloc(NULL, 1, sizeof(*s)); + s->gz_bufsz = bufsz; + s->gz_buffer = gz_alloc(NULL, 1, s->gz_bufsz); + s->gz_mode = mode; + s->gz_crc = ~0U; + s->gz_cb = cb; + s->gz_arg = arg; - s->stream.zalloc = (alloc_func)gz_alloc; - s->stream.zfree = (free_func)gz_free; - s->stream.opaque = (voidpf)0; - s->stream.next_in = s->inbuf = Z_NULL; - s->stream.next_out = s->outbuf = Z_NULL; - s->stream.avail_in = s->stream.avail_out = 0; - s->file = NULL; - s->z_err = Z_OK; - s->z_eof = 0; - s->crc = 0; - s->msg = NULL; - s->transparent = 0; - s->outoff = 0; - s->flags = 0; + s->gz_stream.zalloc = gz_alloc; + s->gz_stream.zfree = gz_free; + s->gz_stream.opaque = NULL; + s->gz_stream.next_in = Z_NULL; + s->gz_stream.avail_in = 0; - s->path = (char*)ALLOC(strlen(path)+1); - if (s->path == NULL) { - return destroy(s), (gzFile)Z_NULL; - } - strcpy(s->path, path); /* do this early for debugging */ + error = deflateInit2(&s->gz_stream, level, Z_DEFLATED, -MAX_WBITS, + DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY); + if (error != 0) + goto fail; - s->mode = '\0'; - do { - if (*p == 'r') s->mode = 'r'; - if (*p == 'w' || *p == 'a') s->mode = 'w'; - if (*p >= '0' && *p <= '9') { - level = *p - '0'; - } else if (*p == 'f') { - strategy = Z_FILTERED; - } else if (*p == 'h') { - strategy = Z_HUFFMAN_ONLY; - } else { - *m++ = *p; /* copy the mode */ - } - } while (*p++ && m != fmode + sizeof(fmode)); + s->gz_stream.avail_out = s->gz_bufsz; + s->gz_stream.next_out = s->gz_buffer; - if (s->mode != 'w') { - log(LOG_ERR, "gz_open: mode is not w (%c)\n", s->mode); - return destroy(s), (gzFile)Z_NULL; - } - - err = deflateInit2(&(s->stream), level, - Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, strategy); - /* windowBits is passed < 0 to suppress zlib header */ + /* Write the gzip header to the output buffer. */ + hdr = s->gz_buffer; + memset(hdr, 0, KERN_GZ_HDRLEN); + hdr[0] = KERN_GZ_MAGIC1; + hdr[1] = KERN_GZ_MAGIC2; + hdr[2] = Z_DEFLATED; + hdr[9] = OS_CODE; + s->gz_stream.next_out += KERN_GZ_HDRLEN; + s->gz_stream.avail_out -= KERN_GZ_HDRLEN; - s->stream.next_out = s->outbuf = (Byte*)ALLOC(Z_BUFSIZE); - if (err != Z_OK || s->outbuf == Z_NULL) { - return destroy(s), (gzFile)Z_NULL; - } + return (s); - s->stream.avail_out = Z_BUFSIZE; - s->file = vp; - - /* Write a very simple .gz header: - */ - snprintf(buf, sizeof(buf), "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], - gz_magic[1], Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, - 0 /*xflags*/, OS_CODE); - - if ((error = vn_rdwr(UIO_WRITE, s->file, buf, GZ_HEADER_LEN, s->outoff, - UIO_SYSSPACE, IO_UNIT, curproc->p_ucred, - NOCRED, &resid, curthread))) { - s->outoff += GZ_HEADER_LEN - resid; - return destroy(s), (gzFile)Z_NULL; - } - s->outoff += GZ_HEADER_LEN; - s->startpos = 10L; - - return (gzFile)s; +fail: + gz_free(NULL, s->gz_buffer); + gz_free(NULL, s); + return (NULL); } - - /* =========================================================================== - * Cleanup then free the given gz_stream. Return a zlib error code. - Try freeing in the reverse order of allocations. - */ -local int destroy (s) - gz_stream *s; +int +gzio_write(struct gzio_stream *s, void *data, u_int len) { - int err = Z_OK; - if (!s) return Z_STREAM_ERROR; - - TRYFREE(s->msg); - - if (s->stream.state != NULL) { - if (s->mode == 'w') { - err = deflateEnd(&(s->stream)); - } - } - if (s->z_err < 0) err = s->z_err; - - TRYFREE(s->inbuf); - TRYFREE(s->outbuf); - TRYFREE(s->path); - TRYFREE(s); - return err; + return (gz_write(s, data, len, Z_NO_FLUSH)); } - -/* =========================================================================== - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of bytes actually written (0 in case of error). -*/ -int ZEXPORT gzwrite (file, buf, len) - gzFile file; - const voidp buf; - unsigned len; +int +gzio_flush(struct gzio_stream *s) { - gz_stream *s = (gz_stream*)file; - off_t curoff; - size_t resid; - int error; - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - s->stream.next_in = (Bytef*)buf; - s->stream.avail_in = len; - - curoff = s->outoff; - while (s->stream.avail_in != 0) { - - if (s->stream.avail_out == 0) { - - s->stream.next_out = s->outbuf; - error = vn_rdwr_inchunks(UIO_WRITE, s->file, s->outbuf, Z_BUFSIZE, - curoff, UIO_SYSSPACE, IO_UNIT, - curproc->p_ucred, NOCRED, &resid, curthread); - if (error) { - log(LOG_ERR, "gzwrite: vn_rdwr return %d\n", error); - curoff += Z_BUFSIZE - resid; - s->z_err = Z_ERRNO; - break; - } - curoff += Z_BUFSIZE; - s->stream.avail_out = Z_BUFSIZE; - } - s->z_err = deflate(&(s->stream), Z_NO_FLUSH); - if (s->z_err != Z_OK) { - log(LOG_ERR, - "gzwrite: deflate returned error %d\n", s->z_err); - break; - } - } - - s->crc = ~crc32_raw(buf, len, ~s->crc); - s->outoff = curoff; - - return (int)(len - s->stream.avail_in); + return (gz_write(s, NULL, 0, Z_FINISH)); } - -/* =========================================================================== - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. -*/ -local int do_flush (file, flush) - gzFile file; - int flush; +void +gzio_fini(struct gzio_stream *s) { - uInt len; - int done = 0; - gz_stream *s = (gz_stream*)file; - off_t curoff = s->outoff; - size_t resid; - int error; - if (s == NULL || s->mode != 'w') return Z_STREAM_ERROR; - - if (s->stream.avail_in) { - log(LOG_WARNING, "do_flush: avail_in non-zero on entry\n"); - } - - s->stream.avail_in = 0; /* should be zero already anyway */ - - for (;;) { - len = Z_BUFSIZE - s->stream.avail_out; - - if (len != 0) { - error = vn_rdwr_inchunks(UIO_WRITE, s->file, s->outbuf, len, curoff, - UIO_SYSSPACE, IO_UNIT, curproc->p_ucred, - NOCRED, &resid, curthread); - if (error) { - s->z_err = Z_ERRNO; - s->outoff = curoff + len - resid; - return Z_ERRNO; - } - s->stream.next_out = s->outbuf; - s->stream.avail_out = Z_BUFSIZE; - curoff += len; - } - if (done) break; - s->z_err = deflate(&(s->stream), flush); - - /* Ignore the second of two consecutive flushes: */ - if (len == 0 && s->z_err == Z_BUF_ERROR) s->z_err = Z_OK; - - /* deflate has finished flushing only when it hasn't used up - * all the available space in the output buffer: - */ - done = (s->stream.avail_out != 0 || s->z_err == Z_STREAM_END); - - if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break; - } - s->outoff = curoff; - - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; + (void)deflateEnd(&s->gz_stream); + gz_free(NULL, s->gz_buffer); + gz_free(NULL, s); } -int ZEXPORT gzflush (file, flush) - gzFile file; - int flush; -{ - gz_stream *s = (gz_stream*)file; - int err = do_flush (file, flush); - - if (err) return err; - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; -} - - -/* =========================================================================== - Outputs a long in LSB order to the given file -*/ -local void putU32 (s, x) - gz_stream *s; - uint32_t x; -{ - uint32_t xx; - off_t curoff = s->outoff; - ssize_t resid; - -#if BYTE_ORDER == BIG_ENDIAN - xx = bswap32(x); -#else - xx = x; -#endif - vn_rdwr(UIO_WRITE, s->file, (caddr_t)&xx, sizeof(xx), curoff, - UIO_SYSSPACE, IO_UNIT, curproc->p_ucred, - NOCRED, &resid, curthread); - s->outoff += sizeof(xx) - resid; -} - - -/* =========================================================================== - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. -*/ -int ZEXPORT gzclose (file) - gzFile file; -{ - int err; - gz_stream *s = (gz_stream*)file; - - if (s == NULL) return Z_STREAM_ERROR; - - if (s->mode == 'w') { - err = do_flush (file, Z_FINISH); - if (err != Z_OK) { - log(LOG_ERR, "gzclose: do_flush failed (err %d)\n", err); - return destroy((gz_stream*)file); - } -#if 0 - printf("gzclose: putting crc: %lld total: %lld\n", - (long long)s->crc, (long long)s->stream.total_in); - printf("sizeof uLong = %d\n", (int)sizeof(uLong)); -#endif - putU32 (s, s->crc); - putU32 (s, (uint32_t) s->stream.total_in); - } - return destroy((gz_stream*)file); -} - -/* - * Space allocation and freeing routines for use by zlib routines when called - * from gzip modules. - */ static void * -gz_alloc(void *notused __unused, u_int items, u_int size) +gz_alloc(void *arg __unused, u_int n, u_int sz) { - void *ptr; - MALLOC(ptr, void *, items * size, M_TEMP, M_NOWAIT | M_ZERO); - return ptr; + /* + * Memory for zlib state is allocated using M_NODUMP since it may be + * used to compress a kernel dump, and we don't want zlib to attempt to + * compress its own state. + */ + return (malloc(n * sz, M_GZIO, M_WAITOK | M_ZERO | M_NODUMP)); } - + static void -gz_free(void *opaque __unused, void *ptr) +gz_free(void *arg __unused, void *ptr) { - FREE(ptr, M_TEMP); + + free(ptr, M_GZIO); } +static int +gz_write(struct gzio_stream *s, void *buf, u_int len, int zflag) +{ + uint8_t trailer[KERN_GZ_TRAILERLEN]; + size_t room; + int error, zerror; + + KASSERT(zflag == Z_FINISH || zflag == Z_NO_FLUSH, + ("unexpected flag %d", zflag)); + KASSERT(s->gz_mode == GZIO_DEFLATE, + ("invalid stream mode %d", s->gz_mode)); + + if (len > 0) { + s->gz_stream.avail_in = len; + s->gz_stream.next_in = buf; + s->gz_crc = crc32_raw(buf, len, s->gz_crc); + } else + s->gz_crc ^= ~0U; + + error = 0; + do { + zerror = deflate(&s->gz_stream, zflag); + if (zerror != Z_OK && zerror != Z_STREAM_END) { + error = EIO; + break; + } + + if (s->gz_stream.avail_out == 0 || zerror == Z_STREAM_END) { + /* + * Our output buffer is full or there's nothing left + * to produce, so we're flushing the buffer. + */ + len = s->gz_bufsz - s->gz_stream.avail_out; + if (zerror == Z_STREAM_END) { + /* + * Try to pack as much of the trailer into the + * output buffer as we can. + */ + ((uint32_t *)trailer)[0] = s->gz_crc; + ((uint32_t *)trailer)[1] = + s->gz_stream.total_in; + room = MIN(KERN_GZ_TRAILERLEN, + s->gz_bufsz - len); + memcpy(s->gz_buffer + len, trailer, room); + len += room; + } + + error = s->gz_cb(s->gz_buffer, len, s->gz_off, + s->gz_arg); + if (error != 0) + break; + + s->gz_off += len; + s->gz_stream.next_out = s->gz_buffer; + s->gz_stream.avail_out = s->gz_bufsz; + + /* + * If we couldn't pack the trailer into the output + * buffer, write it out now. + */ + if (zerror == Z_STREAM_END && room < KERN_GZ_TRAILERLEN) + error = s->gz_cb(trailer + room, + KERN_GZ_TRAILERLEN - room, s->gz_off, + s->gz_arg); + } + } while (zerror != Z_STREAM_END && + (zflag == Z_FINISH || s->gz_stream.avail_in > 0)); + + return (error); +} diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 57f66b02632d..58d9707029c7 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -38,8 +38,8 @@ __FBSDID("$FreeBSD$"); #include "opt_compat.h" +#include "opt_gzio.h" #include "opt_ktrace.h" -#include "opt_core.h" #include #include @@ -3075,17 +3075,18 @@ sysctl_debug_num_cores_check (SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_debug, OID_AUTO, ncores, CTLTYPE_INT|CTLFLAG_RW, 0, sizeof(int), sysctl_debug_num_cores_check, "I", ""); -#if defined(COMPRESS_USER_CORES) -int compress_user_cores = 1; -SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RW, +#define GZ_SUFFIX ".gz" + +#ifdef GZIO +static int compress_user_cores = 1; +SYSCTL_INT(_kern, OID_AUTO, compress_user_cores, CTLFLAG_RWTUN, &compress_user_cores, 0, "Compression of user corefiles"); -int compress_user_cores_gzlevel = -1; /* default level */ -SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RW, - &compress_user_cores_gzlevel, -1, "Corefile gzip compression level"); - -#define GZ_SUFFIX ".gz" -#define GZ_SUFFIX_LEN 3 +int compress_user_cores_gzlevel = 6; +SYSCTL_INT(_kern, OID_AUTO, compress_user_cores_gzlevel, CTLFLAG_RWTUN, + &compress_user_cores_gzlevel, 0, "Corefile gzip compression level"); +#else +static int compress_user_cores = 0; #endif static char corefilename[MAXPATHLEN] = {"%N.core"}; @@ -3162,10 +3163,8 @@ corefile_open(const char *comm, uid_t uid, pid_t pid, struct thread *td, } } free(hostname, M_TEMP); -#ifdef COMPRESS_USER_CORES if (compress) sbuf_printf(&sb, GZ_SUFFIX); -#endif if (sbuf_error(&sb) != 0) { log(LOG_ERR, "pid %ld (%s), uid (%lu): corename is too " "long\n", (long)pid, comm, (u_long)uid); @@ -3260,18 +3259,12 @@ coredump(struct thread *td) char *name; /* name of corefile */ void *rl_cookie; off_t limit; - int compress; char *data = NULL; char *fullpath, *freepath = NULL; size_t len; static const char comm_name[] = "comm="; static const char core_name[] = "core="; -#ifdef COMPRESS_USER_CORES - compress = compress_user_cores; -#else - compress = 0; -#endif PROC_LOCK_ASSERT(p, MA_OWNED); MPASS((p->p_flag & P_HADTHREADS) == 0 || p->p_singlethread == td); _STOPEVENT(p, S_CORE, 0); @@ -3297,8 +3290,8 @@ coredump(struct thread *td) } PROC_UNLOCK(p); - error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td, compress, - &vp, &name); + error = corefile_open(p->p_comm, cred->cr_uid, p->p_pid, td, + compress_user_cores, &vp, &name); if (error != 0) return (error); @@ -3337,7 +3330,7 @@ coredump(struct thread *td) if (p->p_sysent->sv_coredump != NULL) { error = p->p_sysent->sv_coredump(td, vp, limit, - compress ? IMGACT_CORE_COMPRESS : 0); + compress_user_cores ? IMGACT_CORE_COMPRESS : 0); } else { error = ENOSYS; } diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 1c7b21ca7d9e..20a14abef428 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -2682,6 +2682,25 @@ device_set_devclass(device_t dev, const char *classname) return (error); } +/** + * @brief Set the devclass of a device and mark the devclass fixed. + * @see device_set_devclass() + */ +int +device_set_devclass_fixed(device_t dev, const char *classname) +{ + int error; + + if (classname == NULL) + return (EINVAL); + + error = device_set_devclass(dev, classname); + if (error) + return (error); + dev->flags |= DF_FIXEDCLASS; + return (0); +} + /** * @brief Set the driver of a device * diff --git a/sys/ofed/drivers/net/mlx4/utils.c b/sys/kern/uipc_mbufhash.c similarity index 56% rename from sys/ofed/drivers/net/mlx4/utils.c rename to sys/kern/uipc_mbufhash.c index 2444ec5961c9..75f4f93988f9 100644 --- a/sys/ofed/drivers/net/mlx4/utils.c +++ b/sys/kern/uipc_mbufhash.c @@ -25,58 +25,34 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include -#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include + +#include +#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #if defined(INET) || defined(INET6) #include #endif + #ifdef INET -#include -#include #include #endif #ifdef INET6 #include -#include -#include #endif #include -#include "utils.h" - -/* XXX this code should be factored out */ -/* XXX copied from if_lagg.c */ - static const void * -mlx4_en_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) +m_ether_tcpip_hash_gethdr(const struct mbuf *m, const u_int off, + const u_int len, void *buf) { + if (m->m_pkthdr.len < (off + len)) { return (NULL); } else if (m->m_len < (off + len)) { @@ -87,22 +63,18 @@ mlx4_en_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) } uint32_t -mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key) +m_ether_tcpip_hash_init(void) +{ + uint32_t seed; + + seed = arc4random(); + return (fnv_32_buf(&seed, sizeof(seed), FNV1_32_INIT)); +} + +uint32_t +m_ether_tcpip_hash(const uint32_t flags, const struct mbuf *m, + const uint32_t key) { - uint16_t etype; - uint32_t p = key; - int off; - struct ether_header *eh; - const struct ether_vlan_header *vlan; -#ifdef INET - const struct ip *ip; - const uint32_t *ports; - int iphlen; -#endif -#ifdef INET6 - const struct ip6_hdr *ip6; - uint32_t flow; -#endif union { #ifdef INET struct ip ip; @@ -113,47 +85,57 @@ mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key) struct ether_vlan_header vlan; uint32_t port; } buf; + struct ether_header *eh; + const struct ether_vlan_header *vlan; +#ifdef INET + const struct ip *ip; +#endif +#ifdef INET6 + const struct ip6_hdr *ip6; +#endif + uint32_t p; + int off; + uint16_t etype; - + p = key; off = sizeof(*eh); if (m->m_len < off) - goto out; + goto done; eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); - if (flags & MLX4_F_HASHL2) { - p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p); - p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); + if (flags & MBUF_HASHFLAG_L2) { + p = fnv_32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p); + p = fnv_32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); } - /* Special handling for encapsulating VLAN frames */ - if ((m->m_flags & M_VLANTAG) && (flags & MLX4_F_HASHL2)) { - p = hash32_buf(&m->m_pkthdr.ether_vtag, + if ((m->m_flags & M_VLANTAG) && (flags & MBUF_HASHFLAG_L2)) { + p = fnv_32_buf(&m->m_pkthdr.ether_vtag, sizeof(m->m_pkthdr.ether_vtag), p); } else if (etype == ETHERTYPE_VLAN) { - vlan = mlx4_en_gethdr(m, off, sizeof(*vlan), &buf); + vlan = m_ether_tcpip_hash_gethdr(m, off, sizeof(*vlan), &buf); if (vlan == NULL) - goto out; + goto done; - if (flags & MLX4_F_HASHL2) - p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p); + if (flags & MBUF_HASHFLAG_L2) + p = fnv_32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p); etype = ntohs(vlan->evl_proto); off += sizeof(*vlan) - sizeof(*eh); } - switch (etype) { #ifdef INET case ETHERTYPE_IP: - ip = mlx4_en_gethdr(m, off, sizeof(*ip), &buf); + ip = m_ether_tcpip_hash_gethdr(m, off, sizeof(*ip), &buf); if (ip == NULL) - goto out; - - if (flags & MLX4_F_HASHL3) { - p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p); - p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p); - } - if (!(flags & MLX4_F_HASHL4)) break; - switch (ip->ip_p) { + if (flags & MBUF_HASHFLAG_L3) { + p = fnv_32_buf(&ip->ip_src, sizeof(struct in_addr), p); + p = fnv_32_buf(&ip->ip_dst, sizeof(struct in_addr), p); + } + if (flags & MBUF_HASHFLAG_L4) { + const uint32_t *ports; + int iphlen; + + switch (ip->ip_p) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: @@ -161,29 +143,39 @@ mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key) if (iphlen < sizeof(*ip)) break; off += iphlen; - ports = mlx4_en_gethdr(m, off, sizeof(*ports), &buf); + ports = m_ether_tcpip_hash_gethdr(m, + off, sizeof(*ports), &buf); if (ports == NULL) break; - p = hash32_buf(ports, sizeof(*ports), p); + p = fnv_32_buf(ports, sizeof(*ports), p); break; + default: + break; + } } break; #endif #ifdef INET6 case ETHERTYPE_IPV6: - if (!(flags & MLX4_F_HASHL3)) - break; - ip6 = mlx4_en_gethdr(m, off, sizeof(*ip6), &buf); + ip6 = m_ether_tcpip_hash_gethdr(m, off, sizeof(*ip6), &buf); if (ip6 == NULL) - goto out; + break; + if (flags & MBUF_HASHFLAG_L3) { + p = fnv_32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); + p = fnv_32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); + } + if (flags & MBUF_HASHFLAG_L4) { + uint32_t flow; - p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); - p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); - flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; - p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */ + /* IPv6 flow label */ + flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; + p = fnv_32_buf(&flow, sizeof(flow), p); + } break; #endif + default: + break; } -out: +done: return (p); } diff --git a/sys/mips/atheros/if_arge.c b/sys/mips/atheros/if_arge.c index 1db0e3289dad..ffbe2d3cc103 100644 --- a/sys/mips/atheros/if_arge.c +++ b/sys/mips/atheros/if_arge.c @@ -304,6 +304,8 @@ arge_reset_mac(struct arge_softc *sc) uint32_t reg; uint32_t reset_reg; + ARGEDEBUG(sc, ARGE_DBG_RESET, "%s called\n", __func__); + /* Step 1. Soft-reset MAC */ ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); DELAY(20); @@ -649,8 +651,7 @@ arge_attach(device_t dev) } /* - * Get default media & duplex mode, by default its Base100T - * and full duplex + * Get default/hard-coded media & duplex mode. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "media", &hint) != 0) @@ -658,8 +659,12 @@ arge_attach(device_t dev) if (hint == 1000) sc->arge_media_type = IFM_1000_T; - else + else if (hint == 100) sc->arge_media_type = IFM_100_TX; + else if (hint == 10) + sc->arge_media_type = IFM_10_T; + else + sc->arge_media_type = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fduplex", &hint) != 0) @@ -847,9 +852,10 @@ arge_attach(device_t dev) } } } + if (sc->arge_miibus == NULL) { /* no PHY, so use hard-coded values */ - ifmedia_init(&sc->arge_ifmedia, 0, + ifmedia_init(&sc->arge_ifmedia, 0, arge_multiphy_mediachange, arge_multiphy_mediastatus); ifmedia_add(&sc->arge_ifmedia, @@ -1071,6 +1077,25 @@ arge_update_link_locked(struct arge_softc *sc) return; } + /* + * If we have a static media type configured, then + * use that. Some PHY configurations (eg QCA955x -> AR8327) + * use a static speed/duplex between the SoC and switch, + * even though the front-facing PHY speed changes. + */ + if (sc->arge_media_type != 0) { + ARGEDEBUG(sc, ARGE_DBG_MII, "%s: fixed; media=%d, duplex=%d\n", + __func__, + sc->arge_media_type, + sc->arge_duplex_mode); + if (mii->mii_media_status & IFM_ACTIVE) { + sc->arge_link_status = 1; + } else { + sc->arge_link_status = 0; + } + arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); + } + if (mii->mii_media_status & IFM_ACTIVE) { media = IFM_SUBTYPE(mii->mii_media_active); @@ -1095,6 +1120,12 @@ arge_set_pll(struct arge_softc *sc, int media, int duplex) uint32_t fifo_tx, pll; int if_speed; + /* + * XXX Verify - is this valid for all chips? + * QCA955x (and likely some of the earlier chips!) define + * this as nibble mode and byte mode, and those have to do + * with the interface type (MII/SMII versus GMII/RGMII.) + */ ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media, duplex == IFM_FDX ? "full" : "half"); cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); @@ -1199,6 +1230,9 @@ arge_set_pll(struct arge_softc *sc, int media, int duplex) static void arge_reset_dma(struct arge_softc *sc) { + + ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__); + ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); @@ -1230,8 +1264,6 @@ arge_reset_dma(struct arge_softc *sc) arge_flush_ddr(sc); } - - static void arge_init(void *xsc) { diff --git a/sys/mips/conf/AP135.hints b/sys/mips/conf/AP135.hints index 87074afefae7..b1ea76a7dca0 100644 --- a/sys/mips/conf/AP135.hints +++ b/sys/mips/conf/AP135.hints @@ -83,7 +83,11 @@ hint.arswitch.0.port.6.txpause=1 hint.arswitch.0.port.6.rxpause=1 # arge0 - hooked up to AR8327 GMAC6, RGMII -hint.arge.0.phymask=0x0 +# set at 1000/full to the switch. +# so, lock both sides of this connect up to 1000/full; +# if_arge thus wont change the PLL configuration +# upon a link status change. +hint.arge.0.phymask=0x10 hint.arge.0.miimode=3 # RGMII hint.arge.0.media=1000 hint.arge.0.fduplex=1 diff --git a/sys/modules/Makefile b/sys/modules/Makefile index f4348b88098f..96148e4ab0af 100644 --- a/sys/modules/Makefile +++ b/sys/modules/Makefile @@ -353,6 +353,7 @@ SUBDIR= \ ${_virtio} \ vge \ ${_viawd} \ + videomode \ vkbd \ ${_vmm} \ ${_vmware} \ diff --git a/sys/modules/dtb/am335x/Makefile b/sys/modules/dtb/am335x/Makefile new file mode 100644 index 000000000000..d149c95fc545 --- /dev/null +++ b/sys/modules/dtb/am335x/Makefile @@ -0,0 +1,7 @@ +# $FreeBSD$ +# All the dts files for am335x systems we support. +DTS= \ + beaglebone.dts \ + beaglebone-black.dts + +.include diff --git a/sys/modules/mlxen/Makefile b/sys/modules/mlxen/Makefile index 02f777659fcb..02cca400f741 100644 --- a/sys/modules/mlxen/Makefile +++ b/sys/modules/mlxen/Makefile @@ -4,7 +4,7 @@ KMOD = mlxen SRCS = device_if.h bus_if.h pci_if.h vnode_if.h SRCS += en_cq.c en_main.c en_netdev.c en_port.c en_resources.c -SRCS += en_rx.c en_tx.c utils.c +SRCS += en_rx.c en_tx.c SRCS += opt_inet.h opt_inet6.h CFLAGS+= -I${.CURDIR}/../../ofed/drivers/net/mlx4 CFLAGS+= -I${.CURDIR}/../../ofed/include/ diff --git a/sys/net/ieee8023ad_lacp.c b/sys/net/ieee8023ad_lacp.c index 3052e4b526ee..c422c25201a3 100644 --- a/sys/net/ieee8023ad_lacp.c +++ b/sys/net/ieee8023ad_lacp.c @@ -35,7 +35,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include /* hz */ #include /* for net/if.h */ @@ -758,16 +757,13 @@ void lacp_attach(struct lagg_softc *sc) { struct lacp_softc *lsc; - uint32_t seed; lsc = malloc(sizeof(struct lacp_softc), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_psc = lsc; lsc->lsc_softc = sc; - seed = arc4random(); - lsc->lsc_hashkey = FNV1_32_INIT; - lsc->lsc_hashkey = fnv_32_buf(&seed, sizeof(seed), lsc->lsc_hashkey); + lsc->lsc_hashkey = m_ether_tcpip_hash_init(); lsc->lsc_active_aggregator = NULL; lsc->lsc_strict_mode = 1; LACP_LOCK_INIT(lsc); @@ -843,7 +839,7 @@ lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m) M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) hash = m->m_pkthdr.flowid >> sc->flowid_shift; else - hash = lagg_hashmbuf(sc, m, lsc->lsc_hashkey); + hash = m_ether_tcpip_hash(sc->sc_flags, m, lsc->lsc_hashkey); hash %= pm->pm_count; lp = pm->pm_map[hash]; diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c index ec828fa8a949..46c5858069b1 100644 --- a/sys/net/if_lagg.c +++ b/sys/net/if_lagg.c @@ -36,7 +36,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include @@ -131,7 +130,6 @@ static int lagg_media_change(struct ifnet *); static void lagg_media_status(struct ifnet *, struct ifmediareq *); static struct lagg_port *lagg_link_active(struct lagg_softc *, struct lagg_port *); -static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *); /* Simple round robin */ static void lagg_rr_attach(struct lagg_softc *); @@ -490,7 +488,7 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params) sc->flowid_shift = V_def_flowid_shift; /* Hash all layers by default */ - sc->sc_flags = LAGG_F_HASHL2|LAGG_F_HASHL3|LAGG_F_HASHL4; + sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4; lagg_proto_attach(sc, LAGG_PROTO_DEFAULT); @@ -1349,7 +1347,15 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) LAGG_WUNLOCK(sc); break; case SIOCGLAGGFLAGS: - rf->rf_flags = sc->sc_flags; + rf->rf_flags = 0; + LAGG_RLOCK(sc, &tracker); + if (sc->sc_flags & MBUF_HASHFLAG_L2) + rf->rf_flags |= LAGG_F_HASHL2; + if (sc->sc_flags & MBUF_HASHFLAG_L3) + rf->rf_flags |= LAGG_F_HASHL3; + if (sc->sc_flags & MBUF_HASHFLAG_L4) + rf->rf_flags |= LAGG_F_HASHL4; + LAGG_RUNLOCK(sc, &tracker); break; case SIOCSLAGGHASH: error = priv_check(td, PRIV_NET_LAGG); @@ -1360,8 +1366,13 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) break; } LAGG_WLOCK(sc); - sc->sc_flags &= ~LAGG_F_HASHMASK; - sc->sc_flags |= rf->rf_flags & LAGG_F_HASHMASK; + sc->sc_flags = 0; + if (rf->rf_flags & LAGG_F_HASHL2) + sc->sc_flags |= MBUF_HASHFLAG_L2; + if (rf->rf_flags & LAGG_F_HASHL3) + sc->sc_flags |= MBUF_HASHFLAG_L3; + if (rf->rf_flags & LAGG_F_HASHL4) + sc->sc_flags |= MBUF_HASHFLAG_L4; LAGG_WUNLOCK(sc); break; case SIOCGLAGGPORT: @@ -1806,120 +1817,6 @@ lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp) return (rval); } -static const void * -lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf) -{ - if (m->m_pkthdr.len < (off + len)) { - return (NULL); - } else if (m->m_len < (off + len)) { - m_copydata(m, off, len, buf); - return (buf); - } - return (mtod(m, char *) + off); -} - -uint32_t -lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m, uint32_t key) -{ - uint16_t etype; - uint32_t p = key; - int off; - struct ether_header *eh; - const struct ether_vlan_header *vlan; -#ifdef INET - const struct ip *ip; - const uint32_t *ports; - int iphlen; -#endif -#ifdef INET6 - const struct ip6_hdr *ip6; - uint32_t flow; -#endif - union { -#ifdef INET - struct ip ip; -#endif -#ifdef INET6 - struct ip6_hdr ip6; -#endif - struct ether_vlan_header vlan; - uint32_t port; - } buf; - - - off = sizeof(*eh); - if (m->m_len < off) - goto out; - eh = mtod(m, struct ether_header *); - etype = ntohs(eh->ether_type); - if (sc->sc_flags & LAGG_F_HASHL2) { - p = fnv_32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p); - p = fnv_32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p); - } - - /* Special handling for encapsulating VLAN frames */ - if ((m->m_flags & M_VLANTAG) && (sc->sc_flags & LAGG_F_HASHL2)) { - p = fnv_32_buf(&m->m_pkthdr.ether_vtag, - sizeof(m->m_pkthdr.ether_vtag), p); - } else if (etype == ETHERTYPE_VLAN) { - vlan = lagg_gethdr(m, off, sizeof(*vlan), &buf); - if (vlan == NULL) - goto out; - - if (sc->sc_flags & LAGG_F_HASHL2) - p = fnv_32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p); - etype = ntohs(vlan->evl_proto); - off += sizeof(*vlan) - sizeof(*eh); - } - - switch (etype) { -#ifdef INET - case ETHERTYPE_IP: - ip = lagg_gethdr(m, off, sizeof(*ip), &buf); - if (ip == NULL) - goto out; - - if (sc->sc_flags & LAGG_F_HASHL3) { - p = fnv_32_buf(&ip->ip_src, sizeof(struct in_addr), p); - p = fnv_32_buf(&ip->ip_dst, sizeof(struct in_addr), p); - } - if (!(sc->sc_flags & LAGG_F_HASHL4)) - break; - switch (ip->ip_p) { - case IPPROTO_TCP: - case IPPROTO_UDP: - case IPPROTO_SCTP: - iphlen = ip->ip_hl << 2; - if (iphlen < sizeof(*ip)) - break; - off += iphlen; - ports = lagg_gethdr(m, off, sizeof(*ports), &buf); - if (ports == NULL) - break; - p = fnv_32_buf(ports, sizeof(*ports), p); - break; - } - break; -#endif -#ifdef INET6 - case ETHERTYPE_IPV6: - if (!(sc->sc_flags & LAGG_F_HASHL3)) - break; - ip6 = lagg_gethdr(m, off, sizeof(*ip6), &buf); - if (ip6 == NULL) - goto out; - - p = fnv_32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p); - p = fnv_32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p); - flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK; - p = fnv_32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */ - break; -#endif - } -out: - return (p); -} - int lagg_enqueue(struct ifnet *ifp, struct mbuf *m) { @@ -2087,15 +1984,12 @@ lagg_lb_attach(struct lagg_softc *sc) { struct lagg_port *lp; struct lagg_lb *lb; - uint32_t seed; lb = malloc(sizeof(struct lagg_lb), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX; - seed = arc4random(); - lb->lb_key = FNV1_32_INIT; - lb->lb_key = fnv_32_buf(&seed, sizeof(seed), lb->lb_key); + lb->lb_key = m_ether_tcpip_hash_init(); sc->sc_psc = lb; SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) @@ -2160,7 +2054,7 @@ lagg_lb_start(struct lagg_softc *sc, struct mbuf *m) M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) p = m->m_pkthdr.flowid >> sc->flowid_shift; else - p = lagg_hashmbuf(sc, m, lb->lb_key); + p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key); p %= sc->sc_count; lp = lb->lb_ports[p]; diff --git a/sys/net/if_lagg.h b/sys/net/if_lagg.h index 8d6decd3c069..a45fa169939d 100644 --- a/sys/net/if_lagg.h +++ b/sys/net/if_lagg.h @@ -279,7 +279,6 @@ extern struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *); extern void (*lagg_linkstate_p)(struct ifnet *, int ); int lagg_enqueue(struct ifnet *, struct mbuf *); -uint32_t lagg_hashmbuf(struct lagg_softc *, struct mbuf *, uint32_t); SYSCTL_DECL(_net_link_lagg); diff --git a/sys/net/zlib.h b/sys/net/zlib.h index 04941df360b5..16edae12b6f6 100644 --- a/sys/net/zlib.h +++ b/sys/net/zlib.h @@ -1010,13 +1010,6 @@ extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits, uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */ -#ifdef _KERNEL -struct vnode; -extern gzFile gz_open OF((const char *path, const char *mode, - struct vnode *vp)); -#endif - - #ifdef __cplusplus } #endif diff --git a/sys/netinet/sctp.h b/sys/netinet/sctp.h index 9b795ede256b..8a033d8187c8 100644 --- a/sys/netinet/sctp.h +++ b/sys/netinet/sctp.h @@ -128,6 +128,7 @@ struct sctp_paramhdr { #define SCTP_RECONFIG_SUPPORTED 0x00000029 #define SCTP_NRSACK_SUPPORTED 0x00000030 #define SCTP_PKTDROP_SUPPORTED 0x00000031 +#define SCTP_MAX_CWND 0x00000032 /* * read-only options diff --git a/sys/netinet/sctp_cc_functions.c b/sys/netinet/sctp_cc_functions.c index e32faf95a0cf..17a897b93d45 100644 --- a/sys/netinet/sctp_cc_functions.c +++ b/sys/netinet/sctp_cc_functions.c @@ -52,6 +52,19 @@ __FBSDID("$FreeBSD$"); #define SHIFT_MPTCP_MULTI_Z 16 #define SHIFT_MPTCP_MULTI 8 +static void +sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net) +{ + if ((assoc->max_cwnd > 0) && + (net->cwnd > assoc->max_cwnd) && + (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { + net->cwnd = assoc->max_cwnd; + if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { + net->cwnd = net->mtu - sizeof(struct sctphdr); + } + } +} + static void sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) { @@ -80,6 +93,7 @@ sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) net->cwnd = net->mtu - sizeof(struct sctphdr); } } + sctp_enforce_cwnd_limit(assoc, net); net->ssthresh = assoc->peers_rwnd; SDT_PROBE(sctp, cwnd, net, init, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, @@ -178,6 +192,7 @@ sctp_cwnd_update_after_fr(struct sctp_tcb *stcb, } } net->cwnd = net->ssthresh; + sctp_enforce_cwnd_limit(asoc, net); SDT_PROBE(sctp, cwnd, net, fr, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net, old_cwnd, net->cwnd); @@ -426,6 +441,7 @@ cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint6 if ((net->cc_mod.rtcc.vol_reduce) && (inst_ind != SCTP_INST_GAINING)) { net->cwnd += net->mtu; + sctp_enforce_cwnd_limit(&stcb->asoc, net); net->cc_mod.rtcc.vol_reduce--; } net->cc_mod.rtcc.last_step_state = 2; @@ -457,6 +473,7 @@ cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint6 if ((net->cc_mod.rtcc.vol_reduce) && (inst_ind != SCTP_INST_GAINING)) { net->cwnd += net->mtu; + sctp_enforce_cwnd_limit(&stcb->asoc, net); net->cc_mod.rtcc.vol_reduce--; } net->cc_mod.rtcc.last_step_state = 3; @@ -488,6 +505,7 @@ cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint6 if ((net->cc_mod.rtcc.vol_reduce) && (inst_ind != SCTP_INST_GAINING)) { net->cwnd += net->mtu; + sctp_enforce_cwnd_limit(&stcb->asoc, net); net->cc_mod.rtcc.vol_reduce--; } net->cc_mod.rtcc.last_step_state = 4; @@ -882,6 +900,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, break; } net->cwnd += incr; + sctp_enforce_cwnd_limit(asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); @@ -948,6 +967,7 @@ sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb, break; } net->cwnd += incr; + sctp_enforce_cwnd_limit(asoc, net); SDT_PROBE(sctp, cwnd, net, ack, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1227,6 +1247,7 @@ sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, /* We always have 1 MTU */ net->cwnd = net->mtu; } + sctp_enforce_cwnd_limit(&stcb->asoc, net); if (net->cwnd - old_cwnd != 0) { /* log only changes */ SDT_PROBE(sctp, cwnd, net, pd, @@ -1251,6 +1272,7 @@ sctp_cwnd_update_after_output(struct sctp_tcb *stcb, net->ssthresh = net->cwnd; if (burst_limit) { net->cwnd = (net->flight_size + (burst_limit * net->mtu)); + sctp_enforce_cwnd_limit(&stcb->asoc, net); SDT_PROBE(sctp, cwnd, net, bl, stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), @@ -1589,6 +1611,7 @@ static void sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) { int cur_val, i, indx, incr; + int old_cwnd = net->cwnd; cur_val = net->cwnd >> 10; indx = SCTP_HS_TABLE_SIZE - 1; @@ -1597,14 +1620,8 @@ sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) /* normal mode */ if (net->net_ack > net->mtu) { net->cwnd += net->mtu; - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { - sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); - } } else { net->cwnd += net->net_ack; - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { - sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); - } } } else { for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { @@ -1616,9 +1633,10 @@ sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) net->last_hs_used = indx; incr = ((sctp_cwnd_adjust[indx].increase) << 10); net->cwnd += incr; - if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { - sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); - } + } + sctp_enforce_cwnd_limit(&stcb->asoc, net); + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { + sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS); } } @@ -1657,6 +1675,7 @@ sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) net->last_hs_used = indx; } } + sctp_enforce_cwnd_limit(&stcb->asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); } @@ -1788,9 +1807,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, if (net->cwnd <= net->ssthresh) { /* We are in slow start */ if (net->flight_size + net->net_ack >= net->cwnd) { - sctp_hs_cwnd_increase(stcb, net); - } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, net->net_ack, @@ -1804,6 +1821,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, (net->partial_bytes_acked >= net->cwnd)) { net->partial_bytes_acked -= net->cwnd; net->cwnd += net->mtu; + sctp_enforce_cwnd_limit(asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_CA); @@ -2042,6 +2060,7 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) SCTP_CWND_LOG_FROM_SS); } } + sctp_enforce_cwnd_limit(&stcb->asoc, net); } else { if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { sctp_log_cwnd(stcb, net, net->net_ack, @@ -2063,6 +2082,7 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) */ net->cwnd += net->mtu; net->partial_bytes_acked = 0; + sctp_enforce_cwnd_limit(&stcb->asoc, net); htcp_alpha_update(&net->cc_mod.htcp_ca); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, net->mtu, @@ -2109,6 +2129,7 @@ sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) */ net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); net->ssthresh = stcb->asoc.peers_rwnd; + sctp_enforce_cwnd_limit(&stcb->asoc, net); htcp_init(net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { @@ -2212,6 +2233,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, htcp_reset(&net->cc_mod.htcp_ca); net->ssthresh = htcp_recalc_ssthresh(net); net->cwnd = net->ssthresh; + sctp_enforce_cwnd_limit(asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); @@ -2291,6 +2313,7 @@ sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, net->RTO <<= 1; } net->cwnd = net->ssthresh; + sctp_enforce_cwnd_limit(&stcb->asoc, net); if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); } diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c index 44932e039126..4fab9866486d 100644 --- a/sys/netinet/sctp_input.c +++ b/sys/netinet/sctp_input.c @@ -2763,6 +2763,7 @@ sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; inp->sctp_socket = so; inp->sctp_frag_point = (*inp_p)->sctp_frag_point; + inp->max_cwnd = (*inp_p)->max_cwnd; inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; inp->ecn_supported = (*inp_p)->ecn_supported; inp->prsctp_supported = (*inp_p)->prsctp_supported; diff --git a/sys/netinet/sctp_os_bsd.h b/sys/netinet/sctp_os_bsd.h index 2468c8af009b..140f34194de9 100644 --- a/sys/netinet/sctp_os_bsd.h +++ b/sys/netinet/sctp_os_bsd.h @@ -389,8 +389,11 @@ typedef struct callout sctp_os_timer_t; #define SCTP_CLEAR_SO_NBIO(so) ((so)->so_state &= ~SS_NBIO) /* get the socket type */ #define SCTP_SO_TYPE(so) ((so)->so_type) -/* Use a macro for renaming sb_cc to sb_ccc */ -#define sb_cc sb_ccc +/* Use a macro for renaming sb_cc to sb_acc. + * Initially sb_ccc was used, but this broke select() when used + * with SCTP sockets. + */ +#define sb_cc sb_acc /* reserve sb space for a socket */ #define SCTP_SORESERVE(so, send, recv) soreserve(so, send, recv) /* wakeup a socket */ diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c index 701a0c23f007..592cf2263c5e 100644 --- a/sys/netinet/sctp_pcb.c +++ b/sys/netinet/sctp_pcb.c @@ -2474,6 +2474,7 @@ sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) inp->sctp_associd_counter = 1; inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT; inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; + inp->max_cwnd = 0; inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off); inp->ecn_supported = (uint8_t) SCTP_BASE_SYSCTL(sctp_ecn_enable); inp->prsctp_supported = (uint8_t) SCTP_BASE_SYSCTL(sctp_pr_enable); diff --git a/sys/netinet/sctp_pcb.h b/sys/netinet/sctp_pcb.h index c9183b1d8207..963b89fbcac0 100644 --- a/sys/netinet/sctp_pcb.h +++ b/sys/netinet/sctp_pcb.h @@ -404,6 +404,7 @@ struct sctp_inpcb { uint32_t sctp_frag_point; uint32_t partial_delivery_point; uint32_t sctp_context; + uint32_t max_cwnd; uint8_t local_strreset_support; uint32_t sctp_cmt_on_off; uint8_t ecn_supported; diff --git a/sys/netinet/sctp_peeloff.c b/sys/netinet/sctp_peeloff.c index cf41d29f71cd..b7024765a7c3 100644 --- a/sys/netinet/sctp_peeloff.c +++ b/sys/netinet/sctp_peeloff.c @@ -127,6 +127,7 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id) n_inp->pktdrop_supported = inp->pktdrop_supported; n_inp->partial_delivery_point = inp->partial_delivery_point; n_inp->sctp_context = inp->sctp_context; + n_inp->max_cwnd = inp->max_cwnd; n_inp->local_strreset_support = inp->local_strreset_support; n_inp->inp_starting_point_for_iterator = NULL; /* copy in the authentication parameters from the original endpoint */ diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h index a8d3a97d1f4c..b1df17dbf3bf 100644 --- a/sys/netinet/sctp_structs.h +++ b/sys/netinet/sctp_structs.h @@ -1199,6 +1199,7 @@ struct sctp_association { uint8_t sctp_cmt_pf; uint8_t use_precise_time; uint64_t sctp_features; + uint32_t max_cwnd; uint16_t port; /* remote UDP encapsulation port */ /* * The mapping array is used to track out of order sequences above diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c index d7a3f0c405b0..d8422b901747 100644 --- a/sys/netinet/sctp_timer.c +++ b/sys/netinet/sctp_timer.c @@ -152,7 +152,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct mbuf *op_err; op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, - "Association error couter exceeded"); + "Association error counter exceeded"); inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); return (1); diff --git a/sys/netinet/sctp_usrreq.c b/sys/netinet/sctp_usrreq.c index 6e406548e1ff..e745cc447499 100644 --- a/sys/netinet/sctp_usrreq.c +++ b/sys/netinet/sctp_usrreq.c @@ -3694,6 +3694,33 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize, } break; } + case SCTP_MAX_CWND: + { + struct sctp_assoc_value *av; + + SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize); + SCTP_FIND_STCB(inp, stcb, av->assoc_id); + + if (stcb) { + av->assoc_value = stcb->asoc.max_cwnd; + SCTP_TCB_UNLOCK(stcb); + } else { + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || + (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || + (av->assoc_id == SCTP_FUTURE_ASSOC)) { + SCTP_INP_RLOCK(inp); + av->assoc_value = inp->max_cwnd; + SCTP_INP_RUNLOCK(inp); + } else { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); + error = EINVAL; + } + } + if (error == 0) { + *optsize = sizeof(struct sctp_assoc_value); + } + break; + } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; @@ -6162,14 +6189,16 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } if (stcb != NULL) { if (net != NULL) { + net->failure_threshold = thlds->spt_pathmaxrxt; + net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { - if ((net->failure_threshold > thlds->spt_pathmaxrxt) || - (net->failure_threshold <= thlds->spt_pathpfthld)) { + if ((net->error_count > net->failure_threshold) || + (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { - if ((net->failure_threshold > thlds->spt_pathpfthld) && - (net->failure_threshold <= thlds->spt_pathmaxrxt)) { + if ((net->error_count > net->pf_threshold) && + (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); @@ -6177,28 +6206,28 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } } if (net->dest_state & SCTP_ADDR_REACHABLE) { - if (net->failure_threshold > thlds->spt_pathmaxrxt) { + if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { - if (net->failure_threshold <= thlds->spt_pathmaxrxt) { + if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } - net->failure_threshold = thlds->spt_pathmaxrxt; - net->pf_threshold = thlds->spt_pathpfthld; } else { TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { + net->failure_threshold = thlds->spt_pathmaxrxt; + net->pf_threshold = thlds->spt_pathpfthld; if (net->dest_state & SCTP_ADDR_PF) { - if ((net->failure_threshold > thlds->spt_pathmaxrxt) || - (net->failure_threshold <= thlds->spt_pathpfthld)) { + if ((net->error_count > net->failure_threshold) || + (net->error_count <= net->pf_threshold)) { net->dest_state &= ~SCTP_ADDR_PF; } } else { - if ((net->failure_threshold > thlds->spt_pathpfthld) && - (net->failure_threshold <= thlds->spt_pathmaxrxt)) { + if ((net->error_count > net->pf_threshold) && + (net->error_count <= net->failure_threshold)) { net->dest_state |= SCTP_ADDR_PF; sctp_send_hb(stcb, net, SCTP_SO_LOCKED); sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); @@ -6206,22 +6235,21 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } } if (net->dest_state & SCTP_ADDR_REACHABLE) { - if (net->failure_threshold > thlds->spt_pathmaxrxt) { + if (net->error_count > net->failure_threshold) { net->dest_state &= ~SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED); } } else { - if (net->failure_threshold <= thlds->spt_pathmaxrxt) { + if (net->error_count <= net->failure_threshold) { net->dest_state |= SCTP_ADDR_REACHABLE; sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED); } } - net->failure_threshold = thlds->spt_pathmaxrxt; - net->pf_threshold = thlds->spt_pathpfthld; } stcb->asoc.def_net_failure = thlds->spt_pathmaxrxt; stcb->asoc.def_net_pf_threshold = thlds->spt_pathpfthld; } + SCTP_TCB_UNLOCK(stcb); } else { if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || @@ -6572,6 +6600,42 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize, } break; } + case SCTP_MAX_CWND: + { + struct sctp_assoc_value *av; + struct sctp_nets *net; + + SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize); + SCTP_FIND_STCB(inp, stcb, av->assoc_id); + + if (stcb) { + stcb->asoc.max_cwnd = av->assoc_value; + if (stcb->asoc.max_cwnd > 0) { + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { + if ((net->cwnd > stcb->asoc.max_cwnd) && + (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) { + net->cwnd = stcb->asoc.max_cwnd; + if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) { + net->cwnd = net->mtu - sizeof(struct sctphdr); + } + } + } + } + SCTP_TCB_UNLOCK(stcb); + } else { + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || + (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || + (av->assoc_id == SCTP_FUTURE_ASSOC)) { + SCTP_INP_WLOCK(inp); + inp->max_cwnd = av->assoc_value; + SCTP_INP_WUNLOCK(inp); + } else { + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); + error = EINVAL; + } + } + break; + } default: SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT); error = ENOPROTOOPT; diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c index 57c203251d86..987928283879 100644 --- a/sys/netinet/sctputil.c +++ b/sys/netinet/sctputil.c @@ -936,6 +936,7 @@ sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, asoc->sctp_frag_point = inp->sctp_frag_point; asoc->sctp_features = inp->sctp_features; asoc->default_dscp = inp->sctp_ep.default_dscp; + asoc->max_cwnd = inp->max_cwnd; #ifdef INET6 if (inp->sctp_ep.default_flowlabel) { asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; @@ -2744,7 +2745,11 @@ sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, static void sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, - struct sockaddr *sa, uint32_t error) + struct sockaddr *sa, uint32_t error, int so_locked +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) + SCTP_UNUSED +#endif +) { struct mbuf *m_notify; struct sctp_paddr_change *spc; @@ -2827,7 +2832,7 @@ sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, control, &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, - SCTP_SO_NOT_LOCKED); + so_locked); } @@ -3591,7 +3596,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, net = (struct sctp_nets *)data; sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, - (struct sockaddr *)&net->ro._l_addr, error); + (struct sockaddr *)&net->ro._l_addr, error, so_locked); break; } case SCTP_NOTIFY_INTERFACE_UP: @@ -3600,7 +3605,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, net = (struct sctp_nets *)data; sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, - (struct sockaddr *)&net->ro._l_addr, error); + (struct sockaddr *)&net->ro._l_addr, error, so_locked); break; } case SCTP_NOTIFY_INTERFACE_CONFIRMED: @@ -3609,7 +3614,7 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, net = (struct sctp_nets *)data; sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, - (struct sockaddr *)&net->ro._l_addr, error); + (struct sockaddr *)&net->ro._l_addr, error, so_locked); break; } case SCTP_NOTIFY_SPECIAL_SP_FAIL: @@ -3680,15 +3685,15 @@ sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, break; case SCTP_NOTIFY_ASCONF_ADD_IP: sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, - error); + error, so_locked); break; case SCTP_NOTIFY_ASCONF_DELETE_IP: sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, - error); + error, so_locked); break; case SCTP_NOTIFY_ASCONF_SET_PRIMARY: sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, - error); + error, so_locked); break; case SCTP_NOTIFY_PEER_SHUTDOWN: sctp_notify_shutdown_event(stcb); diff --git a/sys/netinet/tcp_usrreq.c b/sys/netinet/tcp_usrreq.c index dee1c33186ab..4ea39e300cff 100644 --- a/sys/netinet/tcp_usrreq.c +++ b/sys/netinet/tcp_usrreq.c @@ -476,8 +476,12 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td) inp = sotoinpcb(so); KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL")); INP_WLOCK(inp); - if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; + if (inp->inp_flags & INP_TIMEWAIT) { + error = EADDRINUSE; + goto out; + } + if (inp->inp_flags & INP_DROPPED) { + error = ECONNREFUSED; goto out; } tp = intotcpcb(inp); @@ -523,8 +527,12 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td) inp = sotoinpcb(so); KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL")); INP_WLOCK(inp); - if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { - error = EINVAL; + if (inp->inp_flags & INP_TIMEWAIT) { + error = EADDRINUSE; + goto out; + } + if (inp->inp_flags & INP_DROPPED) { + error = ECONNREFUSED; goto out; } tp = intotcpcb(inp); diff --git a/sys/ofed/drivers/net/mlx4/en_netdev.c b/sys/ofed/drivers/net/mlx4/en_netdev.c index a0a310d62546..68ce12574b67 100644 --- a/sys/ofed/drivers/net/mlx4/en_netdev.c +++ b/sys/ofed/drivers/net/mlx4/en_netdev.c @@ -1916,19 +1916,22 @@ static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); break; case SIOCSIFFLAGS: - mutex_lock(&mdev->state_lock); if (dev->if_flags & IFF_UP) { - if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) + if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { + mutex_lock(&mdev->state_lock); mlx4_en_start_port(dev); - else + mutex_unlock(&mdev->state_lock); + } else { mlx4_en_set_rx_mode(dev); + } } else { + mutex_lock(&mdev->state_lock); if (dev->if_drv_flags & IFF_DRV_RUNNING) { mlx4_en_stop_port(dev); - if_link_state_change(dev, LINK_STATE_DOWN); + if_link_state_change(dev, LINK_STATE_DOWN); } + mutex_unlock(&mdev->state_lock); } - mutex_unlock(&mdev->state_lock); break; case SIOCADDMULTI: case SIOCDELMULTI: diff --git a/sys/ofed/drivers/net/mlx4/en_tx.c b/sys/ofed/drivers/net/mlx4/en_tx.c index 115dd0bebc11..2d7e8a8c3530 100644 --- a/sys/ofed/drivers/net/mlx4/en_tx.c +++ b/sys/ofed/drivers/net/mlx4/en_tx.c @@ -49,7 +49,6 @@ #include #include "mlx4_en.h" -#include "utils.h" enum { MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ @@ -699,10 +698,10 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct mbuf *mb, tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; } -static unsigned long hashrandom; +static uint32_t hashrandom; static void hashrandom_init(void *arg) { - hashrandom = random(); + hashrandom = m_ether_tcpip_hash_init(); } SYSINIT(hashrandom_init, SI_SUB_KLD, SI_ORDER_SECOND, &hashrandom_init, NULL); @@ -724,7 +723,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb) if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) queue_index = mb->m_pkthdr.flowid; else - queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom); + queue_index = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4, mb, hashrandom); return ((queue_index % rings_p_up) + (up * rings_p_up)); } diff --git a/sys/ofed/drivers/net/mlx4/utils.h b/sys/ofed/drivers/net/mlx4/utils.h deleted file mode 100644 index 51a654ccb1cc..000000000000 --- a/sys/ofed/drivers/net/mlx4/utils.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2014 Mellanox Technologies Ltd. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _MLX4_UTILS_H_ -#define _MLX4_UTILS_H_ - -/* Lagg flags */ -#define MLX4_F_HASHL2 0x00000001 /* hash layer 2 */ -#define MLX4_F_HASHL3 0x00000002 /* hash layer 3 */ -#define MLX4_F_HASHL4 0x00000004 /* hash layer 4 */ -#define MLX4_F_HASHMASK 0x00000007 - -uint32_t mlx4_en_hashmbuf(uint32_t flags, struct mbuf *m, uint32_t key); - -#endif /* _MLX4_UTILS_H_ */ diff --git a/sys/powerpc/powermac/platform_powermac.c b/sys/powerpc/powermac/platform_powermac.c index 3e1bf7a6d126..f6e9d9932c52 100644 --- a/sys/powerpc/powermac/platform_powermac.c +++ b/sys/powerpc/powermac/platform_powermac.c @@ -126,6 +126,8 @@ powermac_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, int physacells = 1; memory = OF_finddevice("/memory"); + if (memory == -1) + memory = OF_finddevice("/memory@0"); /* "reg" has variable #address-cells, but #size-cells is always 1 */ OF_getprop(OF_parent(memory), "#address-cells", &physacells, @@ -154,23 +156,32 @@ powermac_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, /* "available" always has #address-cells = 1 */ propsize = OF_getprop(memory, "available", memoryprop, sizeof(memoryprop)); - propsize /= sizeof(cell_t); - for (i = 0, j = 0; i < propsize; i += 2, j++) { - avail[j].mr_start = memoryprop[i]; - avail[j].mr_size = memoryprop[i + 1]; - } + if (propsize <= 0) { + for (i = 0; i < *physsz; i++) { + avail[i].mr_start = phys[i].mr_start; + avail[i].mr_size = phys[i].mr_size; + } + + *availsz = *physsz; + } else { + propsize /= sizeof(cell_t); + for (i = 0, j = 0; i < propsize; i += 2, j++) { + avail[j].mr_start = memoryprop[i]; + avail[j].mr_size = memoryprop[i + 1]; + } #ifdef __powerpc64__ - /* Add in regions above 4 GB to the available list */ - for (i = 0; i < *physsz; i++) { - if (phys[i].mr_start > BUS_SPACE_MAXADDR_32BIT) { - avail[j].mr_start = phys[i].mr_start; - avail[j].mr_size = phys[i].mr_size; - j++; + /* Add in regions above 4 GB to the available list */ + for (i = 0; i < *physsz; i++) { + if (phys[i].mr_start > BUS_SPACE_MAXADDR_32BIT) { + avail[j].mr_start = phys[i].mr_start; + avail[j].mr_size = phys[i].mr_size; + j++; + } } - } #endif - *availsz = j; + *availsz = j; + } } static int diff --git a/sys/powerpc/powerpc/syncicache.c b/sys/powerpc/powerpc/syncicache.c index 906bc3bb54af..9346c902cd61 100644 --- a/sys/powerpc/powerpc/syncicache.c +++ b/sys/powerpc/powerpc/syncicache.c @@ -31,10 +31,8 @@ * $NetBSD: syncicache.c,v 1.2 1999/05/05 12:36:40 tsubai Exp $ */ -#ifndef lint -static const char rcsid[] = - "$FreeBSD$"; -#endif /* not lint */ +#include +__FBSDID("$FreeBSD$"); #include #if defined(_KERNEL) || defined(_STANDALONE) diff --git a/sys/sys/bus.h b/sys/sys/bus.h index d6dc53585eba..8b1f1736a17f 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -522,6 +522,7 @@ void device_quiet(device_t dev); void device_set_desc(device_t dev, const char* desc); void device_set_desc_copy(device_t dev, const char* desc); int device_set_devclass(device_t dev, const char *classname); +int device_set_devclass_fixed(device_t dev, const char *classname); int device_set_driver(device_t dev, driver_t *driver); void device_set_flags(device_t dev, u_int32_t flags); void device_set_softc(device_t dev, void *softc); diff --git a/sys/sys/gpio.h b/sys/sys/gpio.h index b1f3ba8f8890..c8724cbe6e89 100644 --- a/sys/sys/gpio.h +++ b/sys/sys/gpio.h @@ -93,5 +93,6 @@ struct gpio_req { #define GPIOGET _IOWR('G', 3, struct gpio_req) #define GPIOSET _IOW('G', 4, struct gpio_req) #define GPIOTOGGLE _IOWR('G', 5, struct gpio_req) +#define GPIOSETNAME _IOW('G', 6, struct gpio_pin) #endif /* __GPIO_H__ */ diff --git a/sys/sys/gzio.h b/sys/sys/gzio.h new file mode 100644 index 000000000000..c61c2818f2e9 --- /dev/null +++ b/sys/sys/gzio.h @@ -0,0 +1,49 @@ +/*- + * Copyright (c) 2014 Mark Johnston + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS__GZIO_H_ +#define _SYS__GZIO_H_ + +#ifdef _KERNEL + +enum gzio_mode { + GZIO_DEFLATE, +}; + +typedef int (*gzio_cb)(void *, size_t, off_t, void *); + +struct gzio_stream; + +struct gzio_stream *gzio_init(gzio_cb cb, enum gzio_mode, size_t, int, void *); +int gzio_write(struct gzio_stream *, void *, u_int); +int gzio_flush(struct gzio_stream *); +void gzio_fini(struct gzio_stream *); + +#endif /* _KERNEL */ + +#endif /* _SYS__GZIO_H_ */ diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index 4b4a67776d2c..6830c5918b30 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -1190,6 +1190,15 @@ rt_m_getfib(struct mbuf *m) ((_m)->m_pkthdr.fibnum) = (_fib); \ } while (0) +/* flags passed as first argument for "m_ether_tcpip_hash()" */ +#define MBUF_HASHFLAG_L2 (1 << 2) +#define MBUF_HASHFLAG_L3 (1 << 3) +#define MBUF_HASHFLAG_L4 (1 << 4) + +/* mbuf hashing helper routines */ +uint32_t m_ether_tcpip_hash_init(void); +uint32_t m_ether_tcpip_hash(const uint32_t, const struct mbuf *, const uint32_t); + #ifdef MBUF_PROFILING void m_profile(struct mbuf *m); #define M_PROFILE(m) m_profile(m) diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index 23980e818126..585a7494060c 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -428,7 +428,7 @@ vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages, msucc = TAILQ_FIRST(&object->memq); if (msucc != NULL) { KASSERT(msucc->pindex > pindex, - ("vm_reserv_alloc_page: pindex already allocated")); + ("vm_reserv_alloc_contig: pindex already allocated")); rv = vm_reserv_from_page(msucc); if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) goto found; diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index 3d67333e51a5..f5bee64662f1 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -727,7 +727,7 @@ vnode_pager_local_getpages0(struct vnode *vp, vm_page_t *m, int bytecount, /* * The requested page has valid blocks. Invalid part can only * exist at the end of file, and the page is made fully valid - * by zeroing in vm_pager_getpages(). Free non-requested + * by zeroing in vm_pager_get_pages(). Free non-requested * pages, since no i/o is done to read its content. */ if (mreq->valid != 0) { diff --git a/tools/regression/usr.bin/env/regress-env.rgdata b/tools/regression/usr.bin/env/regress-env.rgdata index e060eac6fef8..90c296e5c604 100644 --- a/tools/regression/usr.bin/env/regress-env.rgdata +++ b/tools/regression/usr.bin/env/regress-env.rgdata @@ -382,3 +382,36 @@ gblenv=OUTSIDEVAR=OutsideValue setenv:D=D_ThisisAlongstring_D1 stdout:A_ThisisAlongstring_A1 B_ThisisAlongstring_B1 C_ThisisAlongstring_C1 D_ThisisAlongstring_D1 ScriptName: [%-script.pathname-%] [run] + +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -S '\c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -S'\c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -u foo -S '\c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -u foo -S'\c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -S '-u bar \c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -S'-u bar \c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -u foo -S '-u bar \c' >/dev/null +[run] +[test] + sb_args:sh + script:[%-testpgm.pathname-%] -u foo -S'-u bar \c' >/dev/null +[run] diff --git a/tools/regression/usr.bin/env/regress-sb.rb b/tools/regression/usr.bin/env/regress-sb.rb index f6333aa67c6e..b43ddd3b6aa4 100644 --- a/tools/regression/usr.bin/env/regress-sb.rb +++ b/tools/regression/usr.bin/env/regress-sb.rb @@ -346,6 +346,7 @@ class RGTestOptions # "just anything" that matches the general pattern. There are # no blanks in the recognized values, but I use an x-tended # regexp and then add blanks to make it more readable. + optval.gsub!(/\[%- testpgm\.pathname -%\]/x, $testpgm) optval.gsub!(/\[%- testpgm\.basename -%\]/x, File.basename($testpgm)) optval.gsub!(/\[%- script\.pathname -%\]/x, $scriptfile) diff --git a/usr.bin/ctlstat/ctlstat.8 b/usr.bin/ctlstat/ctlstat.8 index 22da2bfd2a5a..8351d9a372b7 100644 --- a/usr.bin/ctlstat/ctlstat.8 +++ b/usr.bin/ctlstat/ctlstat.8 @@ -34,7 +34,7 @@ .\" $Id: //depot/users/kenm/FreeBSD-test2/usr.bin/ctlstat/ctlstat.8#2 $ .\" $FreeBSD$ .\" -.Dd March 6, 2013 +.Dd March 8, 2015 .Dt CTLSTAT 8 .Os .Sh NAME @@ -69,7 +69,7 @@ and a combined total column that also includes non I/O operations. .It Fl c Ar count Display statistics this many times. .It Fl C -Disable display of CPU statistics. +Disable CPU statistics display. .It Fl d Display DMA operation time (latency) instead of overall I/O time (latency). .It Fl D diff --git a/usr.bin/env/envopts.c b/usr.bin/env/envopts.c index f8214305cc60..57400622912b 100644 --- a/usr.bin/env/envopts.c +++ b/usr.bin/env/envopts.c @@ -372,9 +372,9 @@ split_spaces(const char *str, int *origind, int *origc, char ***origv) *nextarg = NULL; /* Update optind/argc/argv in the calling routine */ - *origind = 1; - *origc += addcount; + *origc += addcount - *origind + 1; *origv = newargv; + *origind = 1; } /** diff --git a/usr.bin/m4/misc.c b/usr.bin/m4/misc.c index 19594486e958..30fb69094844 100644 --- a/usr.bin/m4/misc.c +++ b/usr.bin/m4/misc.c @@ -64,6 +64,7 @@ unsigned char *bbase[MAXINP]; /* the base for each ilevel */ unsigned char *bp; /* first available character */ unsigned char *endpbb; /* end of push-back buffer */ +static void *reallocarray(void *, size_t, size_t); /* * find the index of second str in the first str. @@ -352,23 +353,6 @@ xrealloc(void *old, size_t n, const char *fmt, ...) return p; } -/* - * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX - * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW - */ -#define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4)) - -static void * -reallocarray(void *optr, size_t nmemb, size_t size) -{ - if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && - nmemb > 0 && SIZE_MAX / nmemb < size) { - errno = ENOMEM; - return NULL; - } - return realloc(optr, size * nmemb); -} - void * xreallocarray(void *old, size_t s1, size_t s2, const char *fmt, ...) { @@ -483,3 +467,37 @@ dump_buffer(FILE *f, size_t m) for (s = bp; s-buf > (long)m;) fputc(*--s, f); } + +/* $OpenBSD: reallocarray.c,v 1.2 2014/12/08 03:45:00 bcook Exp $ */ +/* + * Copyright (c) 2008 Otto Moerbeek + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX + * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW + */ +#define MUL_NO_OVERFLOW ((size_t)1 << (sizeof(size_t) * 4)) + +void * +reallocarray(void *optr, size_t nmemb, size_t size) +{ + if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) && + nmemb > 0 && SIZE_MAX / nmemb < size) { + errno = ENOMEM; + return NULL; + } + return realloc(optr, size * nmemb); +} diff --git a/usr.bin/procstat/Makefile b/usr.bin/procstat/Makefile index 1744fe083214..211ce1a1431b 100644 --- a/usr.bin/procstat/Makefile +++ b/usr.bin/procstat/Makefile @@ -8,6 +8,7 @@ SRCS= procstat.c \ procstat_basic.c \ procstat_bin.c \ procstat_cred.c \ + procstat_cs.c \ procstat_files.c \ procstat_kstack.c \ procstat_rlimit.c \ diff --git a/usr.bin/procstat/procstat.1 b/usr.bin/procstat/procstat.1 index f33c746bc736..3ef39ebc0c2b 100644 --- a/usr.bin/procstat/procstat.1 +++ b/usr.bin/procstat/procstat.1 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd May 16, 2014 +.Dd March 10, 2015 .Dt PROCSTAT 1 .Os .Sh NAME @@ -35,7 +35,7 @@ .Nm .Op Fl CHhn .Op Fl w Ar interval -.Op Fl b | c | e | f | i | j | k | l | r | s | t | v | x +.Op Fl b | c | e | f | i | j | k | l | r | s | S | t | v | x .Op Fl a | Ar pid | Ar core ... .Sh DESCRIPTION The @@ -75,6 +75,8 @@ Display resource limits for the process. Display resource usage information for the process. .It Fl s Display security credential information for the process. +.It Fl S +Display the cpuset information for the thread. .It Fl t Display thread information for the process. .It Fl v @@ -108,9 +110,16 @@ flag may be used to request per-thread statistics rather than per-process statistics for some options. For those options, the second field in the table will list the thread ID to which the row of information corresponds. +The +.Fl H +flag is implied for the +.Fl S +mode. .Pp -Some information, such as VM and file descriptor information, is available +Information for VM, file descriptor, and cpuset options is available only to the owner of a process or the superuser. +A cpuset value displayed as -1 means that the information is either invalid +or not available. .Ss Binary Information Display the process ID, command, and path to the process binary: .Pp diff --git a/usr.bin/procstat/procstat.c b/usr.bin/procstat/procstat.c index de0237f387ce..48a2135180a2 100644 --- a/usr.bin/procstat/procstat.c +++ b/usr.bin/procstat/procstat.c @@ -40,7 +40,7 @@ #include "procstat.h" static int aflag, bflag, cflag, eflag, fflag, iflag, jflag, kflag, lflag, rflag; -static int sflag, tflag, vflag, xflag; +static int sflag, tflag, vflag, xflag, Sflag; int hflag, nflag, Cflag, Hflag; static void @@ -50,7 +50,7 @@ usage(void) fprintf(stderr, "usage: procstat [-CHhn] [-M core] [-N system] " "[-w interval] \n"); fprintf(stderr, " [-b | -c | -e | -f | -i | -j | -k | " - "-l | -r | -s | -t | -v | -x]\n"); + "-l | -r | -s | -S | -t | -v | -x]\n"); fprintf(stderr, " [-a | pid | core ...]\n"); exit(EX_USAGE); } @@ -85,6 +85,8 @@ procstat(struct procstat *prstat, struct kinfo_proc *kipp) procstat_vm(prstat, kipp); else if (xflag) procstat_auxv(prstat, kipp); + else if (Sflag) + procstat_cs(prstat, kipp); else procstat_basic(kipp); } @@ -128,7 +130,7 @@ main(int argc, char *argv[]) interval = 0; memf = nlistf = NULL; - while ((ch = getopt(argc, argv, "CHN:M:abcefijklhrstvw:x")) != -1) { + while ((ch = getopt(argc, argv, "CHN:M:abcefijklhrsStvw:x")) != -1) { switch (ch) { case 'C': Cflag++; @@ -144,6 +146,9 @@ main(int argc, char *argv[]) case 'N': nlistf = optarg; break; + case 'S': + Sflag++; + break; case 'a': aflag++; break; @@ -228,7 +233,7 @@ main(int argc, char *argv[]) /* We require that either 0 or 1 mode flags be set. */ tmp = bflag + cflag + eflag + fflag + iflag + jflag + (kflag ? 1 : 0) + - lflag + rflag + sflag + tflag + vflag + xflag; + lflag + rflag + sflag + tflag + vflag + xflag + Sflag; if (!(tmp == 0 || tmp == 1)) usage(); diff --git a/usr.bin/procstat/procstat.h b/usr.bin/procstat/procstat.h index 98282b3ac1e0..50795219111f 100644 --- a/usr.bin/procstat/procstat.h +++ b/usr.bin/procstat/procstat.h @@ -39,6 +39,7 @@ void procstat_auxv(struct procstat *prstat, struct kinfo_proc *kipp); void procstat_basic(struct kinfo_proc *kipp); void procstat_bin(struct procstat *prstat, struct kinfo_proc *kipp); void procstat_cred(struct procstat *prstat, struct kinfo_proc *kipp); +void procstat_cs(struct procstat *prstat, struct kinfo_proc *kipp); void procstat_env(struct procstat *prstat, struct kinfo_proc *kipp); void procstat_files(struct procstat *prstat, struct kinfo_proc *kipp); void procstat_kstack(struct procstat *prstat, struct kinfo_proc *kipp, diff --git a/usr.bin/procstat/procstat_cs.c b/usr.bin/procstat/procstat_cs.c new file mode 100644 index 000000000000..8ccf1ea82227 --- /dev/null +++ b/usr.bin/procstat/procstat_cs.c @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 2007 Robert N. M. Watson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "procstat.h" + +void +procstat_cs(struct procstat *procstat, struct kinfo_proc *kipp) +{ + cpusetid_t cs; + cpuset_t mask; + struct kinfo_proc *kip; + unsigned int count, i; + int once, twice, lastcpu, cpu; + + if (!hflag) + printf("%5s %6s %-16s %-16s %2s %4s %-7s\n", "PID", + "TID", "COMM", "TDNAME", "CPU", "CSID", "CPU MASK"); + + kip = procstat_getprocs(procstat, KERN_PROC_PID | KERN_PROC_INC_THREAD, + kipp->ki_pid, &count); + if (kip == NULL) + return; + kinfo_proc_sort(kip, count); + for (i = 0; i < count; i++) { + kipp = &kip[i]; + printf("%5d ", kipp->ki_pid); + printf("%6d ", kipp->ki_tid); + printf("%-16s ", strlen(kipp->ki_comm) ? + kipp->ki_comm : "-"); + printf("%-16s ", (strlen(kipp->ki_tdname) && + (strcmp(kipp->ki_comm, kipp->ki_tdname) != 0)) ? + kipp->ki_tdname : "-"); + if (kipp->ki_oncpu != 255) + printf("%3d ", kipp->ki_oncpu); + else if (kipp->ki_lastcpu != 255) + printf("%3d ", kipp->ki_lastcpu); + else + printf("%3s ", "-"); + if (cpuset_getid(CPU_LEVEL_CPUSET, CPU_WHICH_TID, + kipp->ki_tid, &cs) != 0) { + cs = CPUSET_INVALID; + } + printf("%4d ", cs); + if ((cs != CPUSET_INVALID) && + (cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, + kipp->ki_tid, sizeof(mask), &mask) == 0)) { + lastcpu = -1; + once = 0; + twice = 0; + for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { + if (CPU_ISSET(cpu, &mask)) { + if (once == 0) { + printf("%d", cpu); + once = 1; + } else if (cpu == lastcpu + 1) { + twice = 1; + } else if (twice == 1) { + printf("-%d,%d", lastcpu, cpu); + twice = 0; + } else + printf(",%d", cpu); + lastcpu = cpu; + } + } + if (once && twice) + printf("-%d", lastcpu); + } + printf("\n"); + } + procstat_freeprocs(procstat, kip); +} diff --git a/usr.bin/touch/touch.1 b/usr.bin/touch/touch.1 index 6201b28dbd15..a3cc2a74e59d 100644 --- a/usr.bin/touch/touch.1 +++ b/usr.bin/touch/touch.1 @@ -31,7 +31,7 @@ .\" @(#)touch.1 8.3 (Berkeley) 4/28/95 .\" $FreeBSD$ .\" -.Dd June 10, 2012 +.Dd March 8, 2015 .Dt TOUCH 1 .Os .Sh NAME @@ -238,7 +238,7 @@ If the letter pair is in the range 39 to 99, the year is set to 1939 to 1999, otherwise, the year is set in the 21st century. .Sh SEE ALSO -.Xr utimes 2 +.Xr utimensat 2 .Sh STANDARDS The .Nm diff --git a/usr.bin/w/w.c b/usr.bin/w/w.c index 0889c4c1b43f..f21b9767feab 100644 --- a/usr.bin/w/w.c +++ b/usr.bin/w/w.c @@ -554,5 +554,6 @@ usage(int wcmd) xo_error("usage: w [-dhin] [-M core] [-N system] [user ...]\n"); else xo_error("usage: uptime\n"); + xo_finish(); exit(1); } diff --git a/usr.sbin/autofs/automount.c b/usr.sbin/autofs/automount.c index 3c59704b5c58..8a7c91a59b21 100644 --- a/usr.sbin/autofs/automount.c +++ b/usr.sbin/autofs/automount.c @@ -141,8 +141,8 @@ mount_autofs(const char *from, const char *fspath, const char *options, } static void -mount_if_not_already(const struct node *n, const char *map, - const struct statfs *mntbuf, int nitems) +mount_if_not_already(const struct node *n, const char *map, const char *options, + const char *prefix, const struct statfs *mntbuf, int nitems) { const struct statfs *sb; char *mountpoint; @@ -175,7 +175,7 @@ mount_if_not_already(const struct node *n, const char *map, mountpoint); } - mount_autofs(from, mountpoint, n->n_options, n->n_key); + mount_autofs(from, mountpoint, options, prefix); free(from); free(mountpoint); } @@ -184,7 +184,7 @@ static void mount_unmount(struct node *root) { struct statfs *mntbuf; - struct node *n, *n2, *n3; + struct node *n, *n2; int i, nitems; nitems = getmntinfo(&mntbuf, MNT_WAIT); @@ -216,15 +216,14 @@ mount_unmount(struct node *root) TAILQ_FOREACH(n, &root->n_children, n_next) { if (!node_is_direct_map(n)) { - mount_if_not_already(n, n->n_map, mntbuf, nitems); + mount_if_not_already(n, n->n_map, n->n_options, + n->n_key, mntbuf, nitems); continue; } TAILQ_FOREACH(n2, &n->n_children, n_next) { - TAILQ_FOREACH(n3, &n2->n_children, n_next) { - mount_if_not_already(n3, n->n_map, - mntbuf, nitems); - } + mount_if_not_already(n2, n->n_map, n->n_options, + "/", mntbuf, nitems); } } } @@ -349,8 +348,7 @@ main_automount(int argc, char **argv) if (options == NULL) { options = checked_strdup(optarg); } else { - options = - separated_concat(options, optarg, ','); + options = concat(options, ',', optarg); } break; case 'u': @@ -388,8 +386,7 @@ main_automount(int argc, char **argv) if (show_maps) { if (options != NULL) { - root->n_options = separated_concat(options, - root->n_options, ','); + root->n_options = concat(options, ',', root->n_options); } if (show_maps > 1) { node_expand_indirect_maps(root); diff --git a/usr.sbin/autofs/automountd.8 b/usr.sbin/autofs/automountd.8 index 31fc8f20c700..175633b5f6d8 100644 --- a/usr.sbin/autofs/automountd.8 +++ b/usr.sbin/autofs/automountd.8 @@ -27,7 +27,7 @@ .\" .\" $FreeBSD$ .\" -.Dd April 20, 2014 +.Dd March 10, 2015 .Dt AUTOMOUNTD 8 .Os .Sh NAME @@ -78,7 +78,7 @@ The default is 30. Debug mode: increase verbosity and do not daemonize. .It Fl o Ar options Specify mount options. -Options specified here ill be overridden by options entered in maps or +Options specified here will be overridden by options entered in maps or .Xr auto_master 5 . .It Fl v Increase verbosity. diff --git a/usr.sbin/autofs/automountd.c b/usr.sbin/autofs/automountd.c index ff60a997288a..168e8b31a428 100644 --- a/usr.sbin/autofs/automountd.c +++ b/usr.sbin/autofs/automountd.c @@ -202,7 +202,7 @@ handle_request(const struct autofs_daemon_request *adr, char *cmdline_options, parent = root; } else { parent = node_new_map(root, checked_strdup(adr->adr_prefix), - checked_strdup(adr->adr_options), checked_strdup(map), + NULL, checked_strdup(map), checked_strdup("[kernel request]"), lineno); } @@ -231,20 +231,19 @@ handle_request(const struct autofs_daemon_request *adr, char *cmdline_options, "failing mount", map, adr->adr_path); } + options = node_options(node); + options = concat(adr->adr_options, ',', options); + + /* + * Prepend options passed via automountd(8) command line. + */ + if (cmdline_options != NULL) + options = concat(cmdline_options, ',', options); + if (node->n_location == NULL) { log_debugx("found node defined at %s:%d; not a mountpoint", node->n_config_file, node->n_config_line); - options = node_options(node); - - /* - * Prepend options passed via automountd(8) command line. - */ - if (cmdline_options != NULL) { - options = - separated_concat(cmdline_options, options, ','); - } - nobrowse = pick_option("nobrowse", &options); if (nobrowse != NULL && adr->adr_key[0] == '\0') { log_debugx("skipping map %s due to \"nobrowse\" " @@ -268,8 +267,7 @@ handle_request(const struct autofs_daemon_request *adr, char *cmdline_options, * We still need to create the single subdirectory * user is trying to access. */ - tmp = separated_concat(adr->adr_path, - adr->adr_key, '/'); + tmp = concat(adr->adr_path, '/', adr->adr_key); node = node_find(root, tmp); if (node != NULL) create_subtree(node, false); @@ -295,18 +293,10 @@ handle_request(const struct autofs_daemon_request *adr, char *cmdline_options, "failing mount", adr->adr_path); } - options = node_options(node); - - /* - * Prepend options passed via automountd(8) command line. - */ - if (cmdline_options != NULL) - options = separated_concat(cmdline_options, options, ','); - /* * Append "automounted". */ - options = separated_concat(options, "automounted", ','); + options = concat(options, ',', "automounted"); /* * Remove "nobrowse", mount(8) doesn't understand it. @@ -334,11 +324,10 @@ handle_request(const struct autofs_daemon_request *adr, char *cmdline_options, if (retrycnt == NULL) { log_debugx("retrycnt not specified in options; " "defaulting to 1"); - options = separated_concat(options, - separated_concat("retrycnt", "1", '='), ','); + options = concat(options, ',', "retrycnt=1"); } else { - options = separated_concat(options, - separated_concat("retrycnt", retrycnt, '='), ','); + options = concat(options, ',', + concat("retrycnt", '=', retrycnt)); } } @@ -465,8 +454,7 @@ main_automountd(int argc, char **argv) if (options == NULL) { options = checked_strdup(optarg); } else { - options = - separated_concat(options, optarg, ','); + options = concat(options, ',', optarg); } break; case 'v': diff --git a/usr.sbin/autofs/common.c b/usr.sbin/autofs/common.c index 8ace69d5833a..d725eab2d028 100644 --- a/usr.sbin/autofs/common.c +++ b/usr.sbin/autofs/common.c @@ -85,50 +85,11 @@ checked_strdup(const char *s) return (c); } -/* - * Take two pointers to strings, concatenate the contents with "/" in the - * middle, make the first pointer point to the result, the second pointer - * to NULL, and free the old strings. - * - * Concatenate pathnames, basically. - */ -static void -concat(char **p1, char **p2) -{ - int ret; - char *path; - - assert(p1 != NULL); - assert(p2 != NULL); - - if (*p1 == NULL) - *p1 = checked_strdup(""); - - if (*p2 == NULL) - *p2 = checked_strdup(""); - - ret = asprintf(&path, "%s/%s", *p1, *p2); - if (ret < 0) - log_err(1, "asprintf"); - - /* - * XXX - */ - //free(*p1); - //free(*p2); - - *p1 = path; - *p2 = NULL; -} - /* * Concatenate two strings, inserting separator between them, unless not needed. - * - * This function is very convenient to use when you do not care about freeing - * memory - which is okay here, because we are a short running process. */ char * -separated_concat(const char *s1, const char *s2, char separator) +concat(const char *s1, char separator, const char *s2) { char *result; int ret; @@ -136,8 +97,14 @@ separated_concat(const char *s1, const char *s2, char separator) assert(s1 != NULL); assert(s2 != NULL); - if (s1[0] == '\0' || s2[0] == '\0' || - s1[strlen(s1) - 1] == separator || s2[0] == separator) { + /* + * If s2 starts with separator - skip it; otherwise concatenating + * "/" and "/foo" would end up returning "//foo". + */ + if (s2[0] == separator) + s2++; + + if (s1[0] == '\0' || s2[0] == '\0' || s1[strlen(s1) - 1] == separator) { ret = asprintf(&result, "%s%s", s1, s2); } else { ret = asprintf(&result, "%s%c%s", s1, separator, s2); @@ -145,7 +112,7 @@ separated_concat(const char *s1, const char *s2, char separator) if (ret < 0) log_err(1, "asprintf"); - //log_debugx("separated_concat: got %s and %s, returning %s", s1, s2, result); + //log_debugx("%s: got %s and %s, returning %s", __func__, s1, s2, result); return (result); } @@ -153,7 +120,7 @@ separated_concat(const char *s1, const char *s2, char separator) void create_directory(const char *path) { - char *component, *copy, *tofree, *partial; + char *component, *copy, *tofree, *partial, *tmp; int error; assert(path[0] == '/'); @@ -163,12 +130,14 @@ create_directory(const char *path) */ copy = tofree = checked_strdup(path + 1); - partial = NULL; + partial = checked_strdup(""); for (;;) { component = strsep(©, "/"); if (component == NULL) break; - concat(&partial, &component); + tmp = concat(partial, '/', component); + free(partial); + partial = tmp; //log_debugx("creating \"%s\"", partial); error = mkdir(partial, 0755); if (error != 0 && errno != EEXIST) { @@ -480,6 +449,18 @@ node_expand_defined(struct node *n) return (cumulated_error); } +static bool +node_is_direct_key(const struct node *n) +{ + + if (n->n_parent != NULL && n->n_parent->n_parent == NULL && + strcmp(n->n_key, "/-") == 0) { + return (true); + } + + return (false); +} + bool node_is_direct_map(const struct node *n) { @@ -491,11 +472,7 @@ node_is_direct_map(const struct node *n) n = n->n_parent; } - assert(n->n_key != NULL); - if (strcmp(n->n_key, "/-") != 0) - return (false); - - return (true); + return (node_is_direct_key(n)); } bool @@ -561,7 +538,6 @@ static char * node_path_x(const struct node *n, char *x) { char *path; - size_t len; if (n->n_parent == NULL) return (x); @@ -570,24 +546,13 @@ node_path_x(const struct node *n, char *x) * Return "/-" for direct maps only if we were asked for path * to the "/-" node itself, not to any of its subnodes. */ - if (n->n_parent->n_parent == NULL && - strcmp(n->n_key, "/-") == 0 && - x[0] != '\0') { + if (node_is_direct_key(n) && x[0] != '\0') return (x); - } assert(n->n_key[0] != '\0'); - path = separated_concat(n->n_key, x, '/'); + path = concat(n->n_key, '/', x); free(x); - /* - * Strip trailing slash. - */ - len = strlen(path); - assert(len > 0); - if (path[len - 1] == '/') - path[len - 1] = '\0'; - return (node_path_x(n->n_parent, path)); } @@ -598,8 +563,19 @@ node_path_x(const struct node *n, char *x) char * node_path(const struct node *n) { + char *path; + size_t len; - return (node_path_x(n, checked_strdup(""))); + path = node_path_x(n, checked_strdup("")); + + /* + * Strip trailing slash, unless the whole path is "/". + */ + len = strlen(path); + if (len > 1 && path[len - 1] == '/') + path[len - 1] = '\0'; + + return (path); } static char * @@ -607,9 +583,11 @@ node_options_x(const struct node *n, char *x) { char *options; - options = separated_concat(x, n->n_options, ','); - if (n->n_parent == NULL) - return (options); + if (n == NULL) + return (x); + + options = concat(x, ',', n->n_options); + free(x); return (node_options_x(n->n_parent, options)); } @@ -683,23 +661,25 @@ node_find_x(struct node *node, const char *path) char *tmp; size_t tmplen; - //log_debugx("looking up %s in %s", path, node->n_key); + //log_debugx("looking up %s in %s", path, node_path(node)); - tmp = node_path(node); - tmplen = strlen(tmp); - if (strncmp(tmp, path, tmplen) != 0) { + if (!node_is_direct_key(node)) { + tmp = node_path(node); + tmplen = strlen(tmp); + if (strncmp(tmp, path, tmplen) != 0) { + free(tmp); + return (NULL); + } + if (path[tmplen] != '/' && path[tmplen] != '\0') { + /* + * If we have two map entries like 'foo' and 'foobar', make + * sure the search for 'foobar' won't match 'foo' instead. + */ + free(tmp); + return (NULL); + } free(tmp); - return (NULL); } - if (path[tmplen] != '/' && path[tmplen] != '\0') { - /* - * If we have two map entries like 'foo' and 'foobar', make - * sure the search for 'foobar' won't match 'foo' instead. - */ - free(tmp); - return (NULL); - } - free(tmp); TAILQ_FOREACH(child, &node->n_children, n_next) { found = node_find_x(child, path); @@ -707,6 +687,9 @@ node_find_x(struct node *node, const char *path) return (found); } + if (node->n_parent == NULL || node_is_direct_key(node)) + return (NULL); + return (node); } @@ -715,9 +698,12 @@ node_find(struct node *root, const char *path) { struct node *node; + assert(root->n_parent == NULL); + node = node_find_x(root, path); - if (node == root) - return (NULL); + if (node != NULL) + assert(node != root); + return (node); } diff --git a/usr.sbin/autofs/common.h b/usr.sbin/autofs/common.h index 16a8d7395ae9..686a89d0bb9f 100644 --- a/usr.sbin/autofs/common.h +++ b/usr.sbin/autofs/common.h @@ -70,7 +70,7 @@ void log_warnx(const char *, ...) __printflike(1, 2); void log_debugx(const char *, ...) __printf0like(1, 2); char *checked_strdup(const char *); -char *separated_concat(const char *s1, const char *s2, char separator); +char *concat(const char *s1, char separator, const char *s2); void create_directory(const char *path); struct node *node_new_root(void); diff --git a/usr.sbin/autofs/popen.c b/usr.sbin/autofs/popen.c index 6cd964daa61f..e11488082442 100644 --- a/usr.sbin/autofs/popen.c +++ b/usr.sbin/autofs/popen.c @@ -104,7 +104,7 @@ auto_popen(const char *argv0, ...) if (arg == NULL) break; - command = separated_concat(command, arg, ' '); + command = concat(command, ' ', arg); } va_end(ap); diff --git a/usr.sbin/gpioctl/gpioctl.8 b/usr.sbin/gpioctl/gpioctl.8 index a0bf6536f8f3..dc2a5541fb0e 100644 --- a/usr.sbin/gpioctl/gpioctl.8 +++ b/usr.sbin/gpioctl/gpioctl.8 @@ -27,7 +27,7 @@ .\" .\" $FreeBSD$ .\" -.Dd November 7, 2013 +.Dd March 8, 2015 .Dt GPIOCTL 1 .Os .Sh NAME @@ -35,20 +35,25 @@ .Nd GPIO control utility .Sh SYNOPSIS .Nm -.Cm -l .Op Fl f Ar ctldev +.Cm -l .Op Fl v .Nm -.Cm -t .Op Fl f Ar ctldev +.Cm -t .Ar pin .Nm -.Cm -c .Op Fl f Ar ctldev +.Cm -c .Ar pin .Ar flag .Op flag ... .Nm +.Op Fl f Ar ctldev +.Cm -n +.Ar pin +.Ar pin-name +.Nm .Op Cm -f Ar ctldev .Ar pin .Ar [0|1] @@ -60,7 +65,8 @@ utility could be used to manage GPIO pins from userland and list available pins. The options are as follows: .Bl -tag -width ".Fl f Ar ctldev" .It Fl c Ar pin Ar flag Op flag ... -Configure pin by setting provided flags. The following flags are currently defined: +Configure pin by setting provided flags. +The following flags are currently defined: .Bl -tag -offset indent -width ".Cm PULSE" .It Cm IN Input pin @@ -87,6 +93,8 @@ If not specified, defaults to .Pa /dev/gpioc0 .It Fl l list available pins +.It Fl n Ar pin Ar pin-name +set the name used to describe the pin .It Fl t Ar pin toggle value of provided pin number .It Fl v diff --git a/usr.sbin/gpioctl/gpioctl.c b/usr.sbin/gpioctl/gpioctl.c index 37f84fa16b90..38e53e7f2fee 100644 --- a/usr.sbin/gpioctl/gpioctl.c +++ b/usr.sbin/gpioctl/gpioctl.c @@ -68,6 +68,7 @@ usage(void) fprintf(stderr, "\tgpioctl [-f ctldev] -l [-v]\n"); fprintf(stderr, "\tgpioctl [-f ctldev] -t pin\n"); fprintf(stderr, "\tgpioctl [-f ctldev] -c pin flag ...\n"); + fprintf(stderr, "\tgpioctl [-f ctldev] -n pin pin-name\n"); fprintf(stderr, "\tgpioctl [-f ctldev] pin [0|1]\n"); exit(1); } @@ -182,11 +183,11 @@ main(int argc, char **argv) char *ctlfile = NULL; int pinn, pinv, ch; int flags, flag, ok; - int config, toggle, verbose, list; + int config, list, name, toggle, verbose; - config = toggle = verbose = list = pinn = 0; + config = toggle = verbose = list = name = pinn = 0; - while ((ch = getopt(argc, argv, "c:f:lt:v")) != -1) { + while ((ch = getopt(argc, argv, "c:f:ln:t:v")) != -1) { switch (ch) { case 'c': config = 1; @@ -200,6 +201,12 @@ main(int argc, char **argv) case 'l': list = 1; break; + case 'n': + name = 1; + pinn = str2int(optarg, &ok); + if (!ok) + fail("Invalid pin number: %s\n", optarg); + break; case 't': toggle = 1; pinn = str2int(optarg, &ok); @@ -225,6 +232,19 @@ main(int argc, char **argv) exit(1); } + /* Set the pin name. */ + if (name) { + if (argc == 0) { + usage(); + exit(1); + } + if (gpio_pin_set_name(handle, pinn, argv[0]) < 0) { + perror("gpio_pin_set_name"); + exit(1); + } + exit(0); + } + if (list) { dump_pins(handle, verbose); gpio_close(handle);