mirror of
https://git.FreeBSD.org/src.git
synced 2024-11-22 07:20:00 +00:00
Step 2.4: Stop using 'struct mbuf_ext_pgs' in drivers.
Reviewed by: gallatin, hselasky Differential Revision: https://reviews.freebsd.org/D24598
This commit is contained in:
parent
c4ee38f8e8
commit
6fbcdeb6f1
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=360576
@ -900,12 +900,10 @@ ktls_base_wr_size(struct tlspcb *tlsp)
|
||||
static u_int
|
||||
ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
{
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
struct tls_record_layer *hdr;
|
||||
u_int plen, mlen;
|
||||
|
||||
MBUF_EXT_PGS_ASSERT(m_tls);
|
||||
ext_pgs = &m_tls->m_ext_pgs;
|
||||
hdr = (void *)m_tls->m_epg_hdr;
|
||||
plen = ntohs(hdr->tls_length);
|
||||
|
||||
@ -924,8 +922,8 @@ ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
* trim the length to avoid sending any of the trailer. There
|
||||
* is no way to send a partial trailer currently.
|
||||
*/
|
||||
if (mlen > TLS_HEADER_LENGTH + plen - ext_pgs->trail_len)
|
||||
mlen = TLS_HEADER_LENGTH + plen - ext_pgs->trail_len;
|
||||
if (mlen > TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len)
|
||||
mlen = TLS_HEADER_LENGTH + plen - m_tls->m_ext_pgs.trail_len;
|
||||
|
||||
|
||||
/*
|
||||
@ -953,7 +951,6 @@ ktls_tcp_payload_length(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
static u_int
|
||||
ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
{
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
struct tls_record_layer *hdr;
|
||||
u_int offset, plen;
|
||||
#ifdef INVARIANTS
|
||||
@ -961,14 +958,13 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
#endif
|
||||
|
||||
MBUF_EXT_PGS_ASSERT(m_tls);
|
||||
ext_pgs = &m_tls->m_ext_pgs;
|
||||
hdr = (void *)m_tls->m_epg_hdr;
|
||||
plen = ntohs(hdr->tls_length);
|
||||
#ifdef INVARIANTS
|
||||
mlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
|
||||
MPASS(mlen < TLS_HEADER_LENGTH + plen);
|
||||
#endif
|
||||
if (mtod(m_tls, vm_offset_t) <= ext_pgs->hdr_len)
|
||||
if (mtod(m_tls, vm_offset_t) <= m_tls->m_ext_pgs.hdr_len)
|
||||
return (0);
|
||||
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
|
||||
/*
|
||||
@ -979,8 +975,8 @@ ktls_payload_offset(struct tlspcb *tlsp, struct mbuf *m_tls)
|
||||
* the offset at the last byte of the record payload
|
||||
* to send the last cipher block.
|
||||
*/
|
||||
offset = min(mtod(m_tls, vm_offset_t) - ext_pgs->hdr_len,
|
||||
(plen - TLS_HEADER_LENGTH - ext_pgs->trail_len) - 1);
|
||||
offset = min(mtod(m_tls, vm_offset_t) - m_tls->m_ext_pgs.hdr_len,
|
||||
(plen - TLS_HEADER_LENGTH - m_tls->m_ext_pgs.trail_len) - 1);
|
||||
return (rounddown(offset, AES_BLOCK_LEN));
|
||||
}
|
||||
return (0);
|
||||
@ -1003,19 +999,17 @@ static int
|
||||
ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
|
||||
int *nsegsp)
|
||||
{
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
struct tls_record_layer *hdr;
|
||||
u_int imm_len, offset, plen, wr_len, tlen;
|
||||
|
||||
MBUF_EXT_PGS_ASSERT(m_tls);
|
||||
ext_pgs = &m_tls->m_ext_pgs;
|
||||
|
||||
/*
|
||||
* Determine the size of the TLS record payload to send
|
||||
* excluding header and trailer.
|
||||
*/
|
||||
tlen = ktls_tcp_payload_length(tlsp, m_tls);
|
||||
if (tlen <= ext_pgs->hdr_len) {
|
||||
if (tlen <= m_tls->m_ext_pgs.hdr_len) {
|
||||
/*
|
||||
* For requests that only want to send the TLS header,
|
||||
* send a tunnelled packet as immediate data.
|
||||
@ -1041,7 +1035,7 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
|
||||
}
|
||||
|
||||
hdr = (void *)m_tls->m_epg_hdr;
|
||||
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - ext_pgs->trail_len;
|
||||
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
|
||||
if (tlen < plen) {
|
||||
plen = tlen;
|
||||
offset = ktls_payload_offset(tlsp, m_tls);
|
||||
@ -1058,14 +1052,14 @@ ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
|
||||
*/
|
||||
imm_len = 0;
|
||||
if (offset == 0)
|
||||
imm_len += ext_pgs->hdr_len;
|
||||
imm_len += m_tls->m_ext_pgs.hdr_len;
|
||||
if (plen == tlen)
|
||||
imm_len += AES_BLOCK_LEN;
|
||||
wr_len += roundup2(imm_len, 16);
|
||||
|
||||
/* TLS record payload via DSGL. */
|
||||
*nsegsp = sglist_count_mbuf_epg(m_tls, ext_pgs->hdr_len + offset,
|
||||
plen - (ext_pgs->hdr_len + offset));
|
||||
*nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len + offset,
|
||||
plen - (m_tls->m_ext_pgs.hdr_len + offset));
|
||||
wr_len += ktls_sgl_size(*nsegsp);
|
||||
|
||||
wr_len = roundup2(wr_len, 16);
|
||||
@ -1466,7 +1460,6 @@ ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
|
||||
struct ip *ip, newip;
|
||||
struct ip6_hdr *ip6, newip6;
|
||||
struct tcphdr *tcp, newtcp;
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
caddr_t out;
|
||||
|
||||
TXQ_LOCK_ASSERT_OWNED(txq);
|
||||
@ -1474,7 +1467,6 @@ ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
|
||||
|
||||
/* Locate the template TLS header. */
|
||||
MBUF_EXT_PGS_ASSERT(m_tls);
|
||||
ext_pgs = &m_tls->m_ext_pgs;
|
||||
|
||||
/* This should always be the last TLS record in a chain. */
|
||||
MPASS(m_tls->m_next == NULL);
|
||||
@ -1577,7 +1569,6 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
struct ulptx_idata *idata;
|
||||
struct cpl_tx_sec_pdu *sec_pdu;
|
||||
struct cpl_tx_data *tx_data;
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
struct tls_record_layer *hdr;
|
||||
char *iv, *out;
|
||||
u_int aad_start, aad_stop;
|
||||
@ -1603,20 +1594,19 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
|
||||
/* Locate the TLS header. */
|
||||
MBUF_EXT_PGS_ASSERT(m_tls);
|
||||
ext_pgs = &m_tls->m_ext_pgs;
|
||||
hdr = (void *)m_tls->m_epg_hdr;
|
||||
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - ext_pgs->trail_len;
|
||||
plen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) - m_tls->m_ext_pgs.trail_len;
|
||||
|
||||
/* Determine how much of the TLS record to send. */
|
||||
tlen = ktls_tcp_payload_length(tlsp, m_tls);
|
||||
if (tlen <= ext_pgs->hdr_len) {
|
||||
if (tlen <= m_tls->m_ext_pgs.hdr_len) {
|
||||
/*
|
||||
* For requests that only want to send the TLS header,
|
||||
* send a tunnelled packet as immediate data.
|
||||
*/
|
||||
#ifdef VERBOSE_TRACES
|
||||
CTR3(KTR_CXGBE, "%s: tid %d header-only TLS record %u",
|
||||
__func__, tlsp->tid, (u_int)ext_pgs->seqno);
|
||||
__func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno);
|
||||
#endif
|
||||
return (ktls_write_tunnel_packet(txq, dst, m, m_tls, available,
|
||||
tcp_seqno, pidx));
|
||||
@ -1626,7 +1616,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
offset = ktls_payload_offset(tlsp, m_tls);
|
||||
#ifdef VERBOSE_TRACES
|
||||
CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
|
||||
__func__, tlsp->tid, (u_int)ext_pgs->seqno, offset);
|
||||
__func__, tlsp->tid, (u_int)m_tls->m_ext_pgs.seqno, offset);
|
||||
#endif
|
||||
if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
|
||||
txq->kern_tls_fin_short++;
|
||||
@ -1681,10 +1671,10 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
*/
|
||||
tx_max_offset = mtod(m_tls, vm_offset_t);
|
||||
if (tx_max_offset > TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
|
||||
ext_pgs->trail_len) {
|
||||
m_tls->m_ext_pgs.trail_len) {
|
||||
/* Always send the full trailer. */
|
||||
tx_max_offset = TLS_HEADER_LENGTH + ntohs(hdr->tls_length) -
|
||||
ext_pgs->trail_len;
|
||||
m_tls->m_ext_pgs.trail_len;
|
||||
}
|
||||
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC &&
|
||||
tx_max_offset > TLS_HEADER_LENGTH) {
|
||||
@ -1799,15 +1789,15 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
|
||||
/* Recalculate 'nsegs' if cached value is not available. */
|
||||
if (nsegs == 0)
|
||||
nsegs = sglist_count_mbuf_epg(m_tls, ext_pgs->hdr_len +
|
||||
offset, plen - (ext_pgs->hdr_len + offset));
|
||||
nsegs = sglist_count_mbuf_epg(m_tls, m_tls->m_ext_pgs.hdr_len +
|
||||
offset, plen - (m_tls->m_ext_pgs.hdr_len + offset));
|
||||
|
||||
/* Calculate the size of the TLS work request. */
|
||||
twr_len = ktls_base_wr_size(tlsp);
|
||||
|
||||
imm_len = 0;
|
||||
if (offset == 0)
|
||||
imm_len += ext_pgs->hdr_len;
|
||||
imm_len += m_tls->m_ext_pgs.hdr_len;
|
||||
if (plen == tlen)
|
||||
imm_len += AES_BLOCK_LEN;
|
||||
twr_len += roundup2(imm_len, 16);
|
||||
@ -1923,13 +1913,13 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
cipher_stop = 0;
|
||||
|
||||
sec_pdu->pldlen = htobe32(16 + plen -
|
||||
(ext_pgs->hdr_len + offset));
|
||||
(m_tls->m_ext_pgs.hdr_len + offset));
|
||||
|
||||
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
|
||||
sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
|
||||
sec_pdu->ivgen_hdrlen = htobe32(
|
||||
tlsp->scmd0_short.ivgen_hdrlen |
|
||||
V_SCMD_HDR_LEN(offset == 0 ? ext_pgs->hdr_len : 0));
|
||||
V_SCMD_HDR_LEN(offset == 0 ? m_tls->m_ext_pgs.hdr_len : 0));
|
||||
|
||||
txq->kern_tls_short++;
|
||||
} else {
|
||||
@ -1942,7 +1932,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
aad_start = 1;
|
||||
aad_stop = TLS_HEADER_LENGTH;
|
||||
iv_offset = TLS_HEADER_LENGTH + 1;
|
||||
cipher_start = ext_pgs->hdr_len + 1;
|
||||
cipher_start = m_tls->m_ext_pgs.hdr_len + 1;
|
||||
if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
|
||||
cipher_stop = 0;
|
||||
auth_start = cipher_start;
|
||||
@ -1981,7 +1971,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
|
||||
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
|
||||
|
||||
sec_pdu->scmd1 = htobe64(ext_pgs->seqno);
|
||||
sec_pdu->scmd1 = htobe64(m_tls->m_ext_pgs.seqno);
|
||||
|
||||
/* Key context */
|
||||
out = (void *)(sec_pdu + 1);
|
||||
@ -2021,8 +2011,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
tx_data->rsvd = htobe32(tcp_seqno);
|
||||
} else {
|
||||
tx_data->len = htobe32(V_TX_DATA_MSS(mss) |
|
||||
V_TX_LENGTH(tlen - (ext_pgs->hdr_len + offset)));
|
||||
tx_data->rsvd = htobe32(tcp_seqno + ext_pgs->hdr_len + offset);
|
||||
V_TX_LENGTH(tlen - (m_tls->m_ext_pgs.hdr_len + offset)));
|
||||
tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_ext_pgs.hdr_len + offset);
|
||||
}
|
||||
tx_data->flags = htobe32(F_TX_BYPASS);
|
||||
if (last_wr && tcp->th_flags & TH_PUSH)
|
||||
@ -2031,8 +2021,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
/* Populate the TLS header */
|
||||
out = (void *)(tx_data + 1);
|
||||
if (offset == 0) {
|
||||
memcpy(out, m_tls->m_epg_hdr, ext_pgs->hdr_len);
|
||||
out += ext_pgs->hdr_len;
|
||||
memcpy(out, m_tls->m_epg_hdr, m_tls->m_ext_pgs.hdr_len);
|
||||
out += m_tls->m_ext_pgs.hdr_len;
|
||||
}
|
||||
|
||||
/* AES IV for a short record. */
|
||||
@ -2067,8 +2057,8 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
|
||||
/* SGL for record payload */
|
||||
sglist_reset(txq->gl);
|
||||
if (sglist_append_mbuf_epg(txq->gl, m_tls, ext_pgs->hdr_len + offset,
|
||||
plen - (ext_pgs->hdr_len + offset)) != 0) {
|
||||
if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_ext_pgs.hdr_len + offset,
|
||||
plen - (m_tls->m_ext_pgs.hdr_len + offset)) != 0) {
|
||||
#ifdef INVARIANTS
|
||||
panic("%s: failed to append sglist", __func__);
|
||||
#endif
|
||||
@ -2090,7 +2080,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
|
||||
txq->kern_tls_waste += mtod(m_tls, vm_offset_t);
|
||||
else
|
||||
txq->kern_tls_waste += mtod(m_tls, vm_offset_t) -
|
||||
(ext_pgs->hdr_len + offset);
|
||||
(m_tls->m_ext_pgs.hdr_len + offset);
|
||||
}
|
||||
|
||||
txsd = &txq->sdesc[pidx];
|
||||
|
@ -1924,19 +1924,17 @@ aiotx_free_job(struct kaiocb *job)
|
||||
static void
|
||||
aiotx_free_pgs(struct mbuf *m)
|
||||
{
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
struct kaiocb *job;
|
||||
vm_page_t pg;
|
||||
|
||||
MBUF_EXT_PGS_ASSERT(m);
|
||||
ext_pgs = &m->m_ext_pgs;
|
||||
job = m->m_ext.ext_arg1;
|
||||
#ifdef VERBOSE_TRACES
|
||||
CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__,
|
||||
m->m_len, jobtotid(job));
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < ext_pgs->npgs; i++) {
|
||||
for (int i = 0; i < m->m_ext_pgs.npgs; i++) {
|
||||
pg = PHYS_TO_VM_PAGE(m->m_epg_pa[i]);
|
||||
vm_page_unwire(pg, PQ_ACTIVE);
|
||||
}
|
||||
@ -1954,7 +1952,6 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
|
||||
struct vmspace *vm;
|
||||
vm_page_t pgs[MBUF_PEXT_MAX_PGS];
|
||||
struct mbuf *m, *top, *last;
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
vm_map_t map;
|
||||
vm_offset_t start;
|
||||
int i, mlen, npages, pgoff;
|
||||
@ -1992,16 +1989,15 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
|
||||
break;
|
||||
}
|
||||
|
||||
ext_pgs = &m->m_ext_pgs;
|
||||
ext_pgs->first_pg_off = pgoff;
|
||||
ext_pgs->npgs = npages;
|
||||
m->m_ext_pgs.first_pg_off = pgoff;
|
||||
m->m_ext_pgs.npgs = npages;
|
||||
if (npages == 1) {
|
||||
KASSERT(mlen + pgoff <= PAGE_SIZE,
|
||||
("%s: single page is too large (off %d len %d)",
|
||||
__func__, pgoff, mlen));
|
||||
ext_pgs->last_pg_len = mlen;
|
||||
m->m_ext_pgs.last_pg_len = mlen;
|
||||
} else {
|
||||
ext_pgs->last_pg_len = mlen - (PAGE_SIZE - pgoff) -
|
||||
m->m_ext_pgs.last_pg_len = mlen - (PAGE_SIZE - pgoff) -
|
||||
(npages - 2) * PAGE_SIZE;
|
||||
}
|
||||
for (i = 0; i < npages; i++)
|
||||
|
@ -680,13 +680,11 @@ sbtls_recover_record(struct mbuf *mb, int wait, uint32_t tcp_old, uint32_t *ptcp
|
||||
static int
|
||||
mlx5e_sq_tls_populate(struct mbuf *mb, uint64_t *pseq)
|
||||
{
|
||||
struct mbuf_ext_pgs *ext_pgs;
|
||||
|
||||
for (; mb != NULL; mb = mb->m_next) {
|
||||
if (!(mb->m_flags & M_NOMAP))
|
||||
continue;
|
||||
ext_pgs = &mb->m_ext_pgs;
|
||||
*pseq = ext_pgs->seqno;
|
||||
*pseq = mb->m_ext_pgs.seqno;
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
|
Loading…
Reference in New Issue
Block a user