1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-28 08:02:54 +00:00

Re-implement LQM, this time according to the rfc.

PR:		11293
MFC after:	4 weeks
This commit is contained in:
Brian Somers 2004-06-30 12:24:56 +00:00
parent 6e7a069c00
commit a57095e7f7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=131327
10 changed files with 208 additions and 67 deletions

View File

@ -85,14 +85,14 @@ acf_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp, u_short *proto)
/* We expect the packet not to be compressed */
bp = mbuf_Read(bp, cp, 2);
if (cp[0] != HDLC_ADDR) {
p->hdlc.lqm.SaveInErrors++;
p->hdlc.lqm.ifInErrors++;
p->hdlc.stats.badaddr++;
log_Printf(LogDEBUG, "acf_LayerPull: addr 0x%02x\n", cp[0]);
m_freem(bp);
return NULL;
}
if (cp[1] != HDLC_UI) {
p->hdlc.lqm.SaveInErrors++;
p->hdlc.lqm.ifInErrors++;
p->hdlc.stats.badcommand++;
log_Printf(LogDEBUG, "acf_LayerPull: control 0x%02x\n", cp[1]);
m_freem(bp);

View File

@ -106,6 +106,7 @@ async_LayerPush(struct bundle *bundle, struct link *l, struct mbuf *bp,
struct physical *p = link2physical(l);
u_char *cp, *sp, *ep;
struct mbuf *wp;
size_t oldcnt;
int cnt;
if (!p || m_length(bp) > HDLCSIZE) {
@ -113,6 +114,8 @@ async_LayerPush(struct bundle *bundle, struct link *l, struct mbuf *bp,
return NULL;
}
oldcnt = m_length(bp);
cp = p->async.xbuff;
ep = cp + HDLCSIZE - 10;
wp = bp;
@ -134,6 +137,7 @@ async_LayerPush(struct bundle *bundle, struct link *l, struct mbuf *bp,
m_freem(bp);
bp = m_get(cnt, MB_ASYNCOUT);
memcpy(MBUF_CTOP(bp), p->async.xbuff, cnt);
bp->priv = cnt - oldcnt;
log_DumpBp(LogASYNC, "Write", bp);
return bp;

View File

@ -318,22 +318,26 @@ hdlc_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp,
log_DumpBp(LogHDLC, "hdlc_LayerPull:", bp);
fcs = hdlc_Fcs(MBUF_CTOP(bp), bp->m_len);
bp = m_pullup(bp);
len = m_length(bp);
fcs = hdlc_Fcs(MBUF_CTOP(bp), len);
log_Printf(LogDEBUG, "%s: hdlc_LayerPull: fcs = %04x (%s)\n",
p->link.name, fcs, (fcs == GOODFCS) ? "good" : "BAD!");
p->hdlc.lqm.ifInOctets += len + 1; /* plus 1 flag octet! */
if (fcs != GOODFCS) {
p->hdlc.lqm.SaveInErrors++;
p->hdlc.lqm.ifInErrors++;
p->hdlc.stats.badfcs++;
m_freem(bp);
return NULL;
}
p->hdlc.lqm.SaveInOctets += bp->m_len + 1;
p->hdlc.lqm.SaveInPackets++;
/* Either done here or by the sync layer */
p->hdlc.lqm.lqr.InGoodOctets += len + 1; /* plus 1 flag octet! */
p->hdlc.lqm.ifInUniPackets++;
len = m_length(bp);
if (len < 4) { /* rfc1662 section 4.3 */
m_freem(bp);
bp = NULL;

View File

@ -73,16 +73,21 @@ struct hdlc {
struct pppTimer timer; /* When to send */
int method; /* bit-mask for LQM_* from lqr.h */
u_int32_t OutPackets; /* Packets sent by me */
u_int32_t OutOctets; /* Octets sent by me */
u_int32_t SaveInPackets; /* Packets received from peer */
u_int32_t SaveInDiscards; /* Discards */
u_int32_t SaveInErrors; /* Errors */
u_int32_t SaveInOctets; /* Octets received from peer */
u_int32_t ifOutUniPackets; /* Packets sent by me */
u_int32_t ifOutOctets; /* Octets sent by me */
u_int32_t ifInUniPackets; /* Packets received from peer */
u_int32_t ifInDiscards; /* Discards */
u_int32_t ifInErrors; /* Errors */
u_int32_t ifInOctets; /* Octets received from peer (unused) */
struct {
u_int32_t InGoodOctets; /* Good octets received from peer */
u_int32_t OutLQRs; /* LQRs sent by me */
u_int32_t SaveInLQRs; /* LQRs received from peer */
u_int32_t InLQRs; /* LQRs received from peer */
struct lqrsavedata Save; /* Our last LQR */
struct lqrsavedata prevSave; /* Our last-but-one LQR (analysis) */
struct lqrdata peer; /* Last LQR from peer */
int peer_timeout; /* peers max lqr timeout */
int resent; /* Resent last packet `resent' times */

View File

@ -147,6 +147,34 @@ link_QueueBytes(struct link *l)
return bytes;
}
void
link_PendingLowPriorityData(struct link *l, size_t *pkts, size_t *octets)
{
struct mqueue *queue, *highest;
struct mbuf *m;
size_t len;
/*
* This is all rfc1989 stuff... because our LQR packet is going to bypass
* everything that's not in the highest priority queue, we must be able to
* subtract that data from our outgoing packet/octet counts. However,
* we've already async-encoded our data at this point, but the async
* encodings MUSTn't be a part of the LQR-reported payload :( So, we have
* the async layer record how much it's padded the packet in the mbuf's
* priv field, and when we calculate our outgoing LQR values we subtract
* this value for each packet from the octet count sent.
*/
highest = LINK_HIGHQ(l);
*pkts = *octets = 0;
for (queue = l->Queue; queue < highest; queue++) {
len = queue->len;
*pkts += len;
for (m = queue->top; len--; m = m->m_nextpkt)
*octets += m_length(m) - m->priv;
}
}
struct mbuf *
link_Dequeue(struct link *l)
{
@ -232,6 +260,7 @@ link_PushPacket(struct link *l, struct mbuf *bp, struct bundle *b, int pri,
if(pri < 0 || pri >= LINK_QUEUES(l))
pri = 0;
bp->priv = 0; /* Adjusted by the async layer ! */
for (layer = l->nlayers; layer && bp; layer--)
if (l->layer[layer - 1]->push != NULL)
bp = (*l->layer[layer - 1]->push)(b, l, bp, pri, &proto);
@ -360,7 +389,7 @@ Despatch(struct bundle *bundle, struct link *l, struct mbuf *bp, u_short proto)
bp = m_pullup(proto_Prepend(bp, proto, 0, 0));
lcp_SendProtoRej(&l->lcp, MBUF_CTOP(bp), bp->m_len);
if (p) {
p->hdlc.lqm.SaveInDiscards++;
p->hdlc.lqm.ifInDiscards++;
p->hdlc.stats.unknownproto++;
}
m_freem(bp);

View File

@ -65,6 +65,7 @@ extern void link_SequenceQueue(struct link *);
extern void link_DeleteQueue(struct link *);
extern size_t link_QueueLen(struct link *);
extern size_t link_QueueBytes(struct link *);
extern void link_PendingLowPriorityData(struct link *, size_t *, size_t *);
extern struct mbuf *link_Dequeue(struct link *);
extern void link_PushPacket(struct link *, struct mbuf *, struct bundle *,

View File

@ -146,6 +146,14 @@ SendLqrData(struct lcp *lcp)
bp = m_get(sizeof(struct lqrdata) + extra, MB_LQROUT);
bp->m_len -= extra;
bp->m_offset += extra;
/*
* Send on the highest priority queue. We send garbage - the real data
* is written by lqr_LayerPush() where we know how to fill in all the
* fields. Note, lqr_LayerPush() ``knows'' that we're pushing onto the
* highest priority queue, and factors out packet & octet values from
* other queues!
*/
link_PushPacket(lcp->fsm.link, bp, lcp->fsm.bundle,
LINK_QUEUES(lcp->fsm.link) - 1, PROTO_LQR);
}
@ -202,8 +210,6 @@ lqr_Input(struct bundle *bundle, struct link *l, struct mbuf *bp)
return NULL;
}
p->hdlc.lqm.lqr.SaveInLQRs++;
len = m_length(bp);
if (len != sizeof(struct lqrdata))
log_Printf(LogWARN, "lqr_Input: Got packet size %d, expecting %ld !\n",
@ -213,7 +219,6 @@ lqr_Input(struct bundle *bundle, struct link *l, struct mbuf *bp)
lcp_SendProtoRej(lcp, MBUF_CTOP(bp), bp->m_len);
} else {
struct lqrdata *lqr;
u_int32_t lastLQR;
bp = m_pullup(bp);
lqr = (struct lqrdata *)MBUF_CTOP(bp);
@ -222,27 +227,32 @@ lqr_Input(struct bundle *bundle, struct link *l, struct mbuf *bp)
" expecting 0x%08lx\n",
(u_long)ntohl(lqr->MagicNumber), (u_long)lcp->his_magic);
else {
/*
* Remember our PeerInLQRs, then convert byte order and save
*/
lastLQR = p->hdlc.lqm.lqr.peer.PeerInLQRs;
struct lqrdata lastlqr;
memcpy(&lastlqr, &p->hdlc.lqm.lqr.peer, sizeof lastlqr);
lqr_ChangeOrder(lqr, &p->hdlc.lqm.lqr.peer);
lqr_Dump(l->name, "Input", &p->hdlc.lqm.lqr.peer);
/* we have received an LQR from peer */
/* we have received an LQR from our peer */
p->hdlc.lqm.lqr.resent = 0;
/* Snapshot our state when the LQR packet was received */
memcpy(&p->hdlc.lqm.lqr.prevSave, &p->hdlc.lqm.lqr.Save,
sizeof p->hdlc.lqm.lqr.prevSave);
p->hdlc.lqm.lqr.Save.InLQRs = ++p->hdlc.lqm.lqr.InLQRs;
p->hdlc.lqm.lqr.Save.InPackets = p->hdlc.lqm.ifInUniPackets;
p->hdlc.lqm.lqr.Save.InDiscards = p->hdlc.lqm.ifInDiscards;
p->hdlc.lqm.lqr.Save.InErrors = p->hdlc.lqm.ifInErrors;
p->hdlc.lqm.lqr.Save.InOctets = p->hdlc.lqm.lqr.InGoodOctets;
lqr_Analyse(&p->hdlc, &lastlqr, &p->hdlc.lqm.lqr.peer);
/*
* Generate an LQR response if we're not running an LQR timer OR
* two successive LQR's PeerInLQRs are the same OR we're not going to
* send our next one before the peers max timeout.
* two successive LQR's PeerInLQRs are the same.
*/
if (p->hdlc.lqm.timer.load == 0 ||
!(p->hdlc.lqm.method & LQM_LQR) ||
(lastLQR && lastLQR == p->hdlc.lqm.lqr.peer.PeerInLQRs) ||
(p->hdlc.lqm.lqr.peer_timeout &&
p->hdlc.lqm.timer.rest * 100 / SECTICKS >
p->hdlc.lqm.lqr.peer_timeout))
if (p->hdlc.lqm.timer.load == 0 || !(p->hdlc.lqm.method & LQM_LQR) ||
(lastlqr.PeerInLQRs &&
lastlqr.PeerInLQRs == p->hdlc.lqm.lqr.peer.PeerInLQRs))
SendLqrData(lcp);
}
}
@ -355,12 +365,57 @@ lqr_Dump(const char *link, const char *message, const struct lqrdata *lqr)
}
}
void
lqr_Analyse(const struct hdlc *hdlc, const struct lqrdata *oldlqr,
const struct lqrdata *newlqr)
{
u_int32_t LQRs, transitLQRs, pkts, octets, disc, err;
if (!newlqr->PeerInLQRs) /* No analysis possible yet! */
return;
log_Printf(LogLQM, "Analysis:\n");
LQRs = (newlqr->LastOutLQRs - oldlqr->LastOutLQRs) -
(newlqr->PeerInLQRs - oldlqr->PeerInLQRs);
transitLQRs = hdlc->lqm.lqr.OutLQRs - newlqr->LastOutLQRs;
pkts = (newlqr->LastOutPackets - oldlqr->LastOutPackets) -
(newlqr->PeerInPackets - oldlqr->PeerInPackets);
octets = (newlqr->LastOutOctets - oldlqr->LastOutOctets) -
(newlqr->PeerInOctets - oldlqr->PeerInOctets);
log_Printf(LogLQM, " Outbound lossage: %d LQR%s (%d en route), %d packet%s,"
" %d octet%s\n", (int)LQRs, LQRs == 1 ? "" : "s", (int)transitLQRs,
(int)pkts, pkts == 1 ? "" : "s",
(int)octets, octets == 1 ? "" : "s");
pkts = (newlqr->PeerOutPackets - oldlqr->PeerOutPackets) -
(hdlc->lqm.lqr.Save.InPackets - hdlc->lqm.lqr.prevSave.InPackets);
octets = (newlqr->PeerOutOctets - oldlqr->PeerOutOctets) -
(hdlc->lqm.lqr.Save.InOctets - hdlc->lqm.lqr.prevSave.InOctets);
log_Printf(LogLQM, " Inbound lossage: %d packet%s, %d octet%s\n",
(int)pkts, pkts == 1 ? "" : "s",
(int)octets, octets == 1 ? "" : "s");
disc = newlqr->PeerInDiscards - oldlqr->PeerInDiscards;
err = newlqr->PeerInErrors - oldlqr->PeerInErrors;
if (disc && err)
log_Printf(LogLQM, " Likely due to both peer congestion"
" and physical errors\n");
else if (disc)
log_Printf(LogLQM, " Likely due to peer congestion\n");
else if (err)
log_Printf(LogLQM, " Likely due to physical errors\n");
else if (pkts)
log_Printf(LogLQM, " Likely due to transport "
"congestion\n");
}
static struct mbuf *
lqr_LayerPush(struct bundle *b, struct link *l, struct mbuf *bp,
int pri, u_short *proto)
{
struct physical *p = link2physical(l);
int len;
int len, layer, extra_async_bytes;
if (!p) {
/* Oops - can't happen :-] */
@ -368,7 +423,10 @@ lqr_LayerPush(struct bundle *b, struct link *l, struct mbuf *bp,
return NULL;
}
/*
bp = m_pullup(bp);
len = m_length(bp);
/*-
* From rfc1989:
*
* All octets which are included in the FCS calculation MUST be counted,
@ -377,50 +435,72 @@ lqr_LayerPush(struct bundle *b, struct link *l, struct mbuf *bp,
* MUST be counted. All other octets (such as additional flag
* sequences, and escape bits or octets) MUST NOT be counted.
*
* As we're stacked before the HDLC layer (otherwise HDLC wouldn't be
* As we're stacked higher than the HDLC layer (otherwise HDLC wouldn't be
* able to calculate the FCS), we must not forget about these additional
* bytes when we're asynchronous.
*
* We're also expecting to be stacked *before* the proto and acf layers.
* If we were after these, it makes alignment more of a pain, and we
* don't do LQR without these layers.
* We're also expecting to be stacked *before* the likes of the proto and
* acf layers (to avoid alignment issues), so deal with this too.
*/
bp = m_pullup(bp);
len = m_length(bp);
if (!physical_IsSync(p))
p->hdlc.lqm.OutOctets += hdlc_WrapperOctets(&l->lcp, *proto);
p->hdlc.lqm.OutOctets += acf_WrapperOctets(&l->lcp, *proto) +
proto_WrapperOctets(&l->lcp, *proto) + len + 1;
p->hdlc.lqm.OutPackets++;
extra_async_bytes = 0;
p->hdlc.lqm.ifOutUniPackets++;
p->hdlc.lqm.ifOutOctets += len + 1; /* plus 1 flag octet! */
for (layer = 0; layer < l->nlayers; layer++)
switch (l->layer[layer]->type) {
case LAYER_ACF:
p->hdlc.lqm.ifOutOctets += acf_WrapperOctets(&l->lcp, *proto);
break;
case LAYER_ASYNC:
/* Not included - see rfc1989 */
break;
case LAYER_HDLC:
p->hdlc.lqm.ifOutOctets += hdlc_WrapperOctets(&l->lcp, *proto);
break;
case LAYER_LQR:
layer = l->nlayers;
break;
case LAYER_PROTO:
p->hdlc.lqm.ifOutOctets += proto_WrapperOctets(&l->lcp, *proto);
break;
case LAYER_SYNC:
/* Nothing to add on */
break;
default:
log_Printf(LogWARN, "Oops, don't know how to do octets for %s layer\n",
l->layer[layer]->name);
break;
}
if (*proto == PROTO_LQR) {
/* Overwrite the entire packet (created in SendLqrData()) */
struct lqrdata lqr;
size_t pending_pkts, pending_octets;
p->hdlc.lqm.lqr.OutLQRs++;
/*
* We need to compensate for the fact that we're pushing our data
* onto the highest priority queue by factoring out packet & octet
* values from other queues!
*/
link_PendingLowPriorityData(l, &pending_pkts, &pending_octets);
memset(&lqr, '\0', sizeof lqr);
lqr.MagicNumber = p->link.lcp.want_magic;
lqr.LastOutLQRs = p->hdlc.lqm.lqr.peer.PeerOutLQRs;
lqr.LastOutPackets = p->hdlc.lqm.lqr.peer.PeerOutPackets;
lqr.LastOutOctets = p->hdlc.lqm.lqr.peer.PeerOutOctets;
lqr.PeerInLQRs = p->hdlc.lqm.lqr.SaveInLQRs;
lqr.PeerInPackets = p->hdlc.lqm.SaveInPackets;
lqr.PeerInDiscards = p->hdlc.lqm.SaveInDiscards;
lqr.PeerInErrors = p->hdlc.lqm.SaveInErrors;
lqr.PeerInOctets = p->hdlc.lqm.SaveInOctets;
lqr.PeerOutPackets = p->hdlc.lqm.OutPackets;
lqr.PeerOutOctets = p->hdlc.lqm.OutOctets;
if (p->hdlc.lqm.lqr.peer.LastOutLQRs == p->hdlc.lqm.lqr.OutLQRs) {
/*
* only increment if it's the first time or we've got a reply
* from the last one
*/
lqr.PeerOutLQRs = ++p->hdlc.lqm.lqr.OutLQRs;
lqr_Dump(l->name, "Output", &lqr);
} else {
lqr.PeerOutLQRs = p->hdlc.lqm.lqr.OutLQRs;
lqr_Dump(l->name, "Output (again)", &lqr);
}
lqr.PeerInLQRs = p->hdlc.lqm.lqr.Save.InLQRs;
lqr.PeerInPackets = p->hdlc.lqm.lqr.Save.InPackets;
lqr.PeerInDiscards = p->hdlc.lqm.lqr.Save.InDiscards;
lqr.PeerInErrors = p->hdlc.lqm.lqr.Save.InErrors;
lqr.PeerInOctets = p->hdlc.lqm.lqr.Save.InOctets;
lqr.PeerOutLQRs = p->hdlc.lqm.lqr.OutLQRs;
lqr.PeerOutPackets = p->hdlc.lqm.ifOutUniPackets - pending_pkts;
/* Don't forget our ``flag'' octets.... */
lqr.PeerOutOctets = p->hdlc.lqm.ifOutOctets - pending_octets - pending_pkts;
lqr_Dump(l->name, "Output", &lqr);
lqr_ChangeOrder(&lqr, (struct lqrdata *)MBUF_CTOP(bp));
}
@ -431,9 +511,11 @@ static struct mbuf *
lqr_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp, u_short *proto)
{
/*
* We mark the packet as ours but don't do anything 'till it's dispatched
* to lqr_Input()
* This is the ``Rx'' process from rfc1989, although a part of it is
* actually performed by sync_LayerPull() & hdlc_LayerPull() so that
* our octet counts are correct.
*/
if (*proto == PROTO_LQR)
m_settype(bp, MB_LQRIN);
return bp;

View File

@ -46,6 +46,14 @@ struct lqrdata {
u_int32_t PeerOutOctets; /* Peers OutOctets (hdlc.h) */
};
struct lqrsavedata { /* Saved on receipt of an LQR */
u_int32_t InLQRs; /* From ifInLQRs */
u_int32_t InPackets; /* From ifInPackets */
u_int32_t InDiscards; /* From ifInDiscards */
u_int32_t InErrors; /* From ifInErrors */
u_int32_t InOctets; /* From InGoodOctets ! */
};
/*
* We support LQR and ECHO as LQM method
*/
@ -56,10 +64,13 @@ struct mbuf;
struct physical;
struct lcp;
struct fsm;
struct hdlc;
struct link;
struct bundle;
extern void lqr_Dump(const char *, const char *, const struct lqrdata *);
extern void lqr_Analyse(const struct hdlc *, const struct lqrdata *,
const struct lqrdata *);
extern void lqr_ChangeOrder(struct lqrdata *, struct lqrdata *);
extern void lqr_Start(struct lcp *);
extern void lqr_reStart(struct lcp *);

View File

@ -35,6 +35,7 @@ struct mbuf {
short m_type; /* MB_* below */
struct mbuf *m_next; /* link to next mbuf */
struct mbuf *m_nextpkt; /* link to next packet */
unsigned long priv; /* private data - holds HDLC escape count */
/* buffer space is malloc()d directly after the header */
};

View File

@ -54,6 +54,7 @@ sync_LayerPush(struct bundle *bundle, struct link *l, struct mbuf *bp,
{
log_DumpBp(LogSYNC, "Write", bp);
m_settype(bp, MB_SYNCOUT);
bp->priv = 0;
return bp;
}
@ -62,6 +63,7 @@ sync_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp,
u_short *proto)
{
struct physical *p = link2physical(l);
int len;
if (!p)
log_Printf(LogERROR, "Can't Pull a sync packet from a logical link\n");
@ -69,8 +71,10 @@ sync_LayerPull(struct bundle *b, struct link *l, struct mbuf *bp,
log_DumpBp(LogSYNC, "Read", bp);
/* Either done here or by the HDLC layer */
p->hdlc.lqm.SaveInOctets += m_length(bp) + 1;
p->hdlc.lqm.SaveInPackets++;
len = m_length(bp);
p->hdlc.lqm.ifInOctets += len + 1; /* plus 1 flag octet! */
p->hdlc.lqm.lqr.InGoodOctets += len + 1; /* plus 1 flag octet! */
p->hdlc.lqm.ifInUniPackets++;
m_settype(bp, MB_SYNCIN);
}