1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-29 08:08:37 +00:00

Ensure that ERROR chunks are always padded by implementing this

in the routine, which queues an ERROR chunk, instead on relyinh
on the callers to do so. Since one caller missed this, this actially
fixes a bug.

MFC after:	1 week
This commit is contained in:
Michael Tuexen 2015-09-11 13:54:33 +00:00
parent 3236151ea8
commit e629b9fc56
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=287669
4 changed files with 38 additions and 26 deletions

View File

@ -66,6 +66,8 @@ __FBSDID("$FreeBSD$");
*/
#define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
/* Largest length of a chunk */
#define SCTP_MAX_CHUNK_LENGTH 0xffff
/* Number of addresses where we just skip the counting */
#define SCTP_COUNT_LIMIT 40

View File

@ -2513,11 +2513,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
SCTP_BUF_LEN(merr) = sizeof(*phd);
SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
if (SCTP_BUF_NEXT(merr)) {
if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
sctp_m_freem(merr);
} else {
sctp_queue_op_err(stcb, merr);
}
sctp_queue_op_err(stcb, merr);
} else {
sctp_m_freem(merr);
}

View File

@ -5602,16 +5602,12 @@ __attribute__((noinline))
SCTP_BUF_LEN(mm) = sizeof(*phd);
SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
if (SCTP_BUF_NEXT(mm)) {
if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(len) - len, NULL) == NULL) {
sctp_m_freem(mm);
} else {
#ifdef SCTP_MBUF_LOGGING
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
sctp_log_mbc(SCTP_BUF_NEXT(mm), SCTP_MBUF_ICOPY);
}
#endif
sctp_queue_op_err(stcb, mm);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
sctp_log_mbc(SCTP_BUF_NEXT(mm), SCTP_MBUF_ICOPY);
}
#endif
sctp_queue_op_err(stcb, mm);
} else {
sctp_m_freem(mm);
}

View File

@ -8850,9 +8850,37 @@ sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
*/
struct sctp_chunkhdr *hdr;
struct sctp_tmit_chunk *chk;
struct mbuf *mat;
struct mbuf *mat, *last_mbuf;
uint32_t chunk_length;
uint16_t padding_length;
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
if (op_err == NULL) {
return;
}
last_mbuf = NULL;
chunk_length = 0;
for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
chunk_length += SCTP_BUF_LEN(mat);
if (SCTP_BUF_NEXT(mat) == NULL) {
last_mbuf = mat;
}
}
if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
sctp_m_freem(op_err);
return;
}
padding_length = chunk_length % 4;
if (padding_length != 0) {
padding_length = 4 - padding_length;
}
if (padding_length != 0) {
if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
sctp_m_freem(op_err);
return;
}
}
sctp_alloc_a_chunk(stcb, chk);
if (chk == NULL) {
/* no memory */
@ -8860,15 +8888,7 @@ sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
return;
}
chk->copy_by_ref = 0;
SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
if (op_err == NULL) {
sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
return;
}
chk->send_size = 0;
for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
chk->send_size += SCTP_BUF_LEN(mat);
}
chk->send_size = (uint16_t) chunk_length;
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->snd_count = 0;
chk->asoc = &stcb->asoc;
@ -8878,9 +8898,7 @@ sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
hdr->chunk_type = SCTP_OPERATION_ERROR;
hdr->chunk_flags = 0;
hdr->chunk_length = htons(chk->send_size);
TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
chk,
sctp_next);
TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
chk->asoc->ctrl_queue_cnt++;
}