1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-12 09:58:36 +00:00

Minor fixups + add locking.

o change time to MPSAFE callout
o make debug printfs conditional on DUMMYNET_DEBUG and runtime controllable
  by net.inet.ip.dummynet.debug
o make boot-time printf dependent on bootverbose

Sponsored by:	FreeBSD Foundation
This commit is contained in:
Sam Leffler 2003-09-17 00:54:04 +00:00
parent e636fe824a
commit 91176902bc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=120140

View File

@ -27,8 +27,7 @@
* $FreeBSD$
*/
#define DEB(x)
#define DDB(x) x
#define DUMMYNET_DEBUG
/*
* This module implements IP dummynet, a bandwidth limiter/delay emulator
@ -123,7 +122,7 @@ static void ready_event(struct dn_flow_queue *q);
static struct dn_pipe *all_pipes = NULL ; /* list of all pipes */
static struct dn_flow_set *all_flow_sets = NULL ;/* list of all flow_sets */
static struct callout_handle dn_timeout;
static struct callout dn_timeout;
#ifdef SYSCTL_NODE
SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
@ -153,6 +152,31 @@ SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
CTLFLAG_RD, &red_max_pkt_size, 0, "RED Max packet size");
#endif
#define DUMMYNET_DEBUG
#ifdef DUMMYNET_DEBUG
int dummynet_debug = 0;
#ifdef SYSCTL_NODE
SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dummynet_debug,
0, "control debugging printfs");
#endif
#define DPRINTF(X) if (dummynet_debug) printf X
#else
#define DPRINTF(X)
#endif
static struct mtx dummynet_mtx;
/*
* NB: Recursion is needed to deal with re-entry via ICMP. That is,
* a packet may be dispatched via ip_input from dummynet_io and
* re-enter through ip_output. Yech.
*/
#define DUMMYNET_LOCK_INIT() \
mtx_init(&dummynet_mtx, "dummynet", NULL, MTX_DEF | MTX_RECURSE)
#define DUMMYNET_LOCK_DESTROY() mtx_destroy(&dummynet_mtx)
#define DUMMYNET_LOCK() mtx_lock(&dummynet_mtx)
#define DUMMYNET_UNLOCK() mtx_unlock(&dummynet_mtx)
#define DUMMYNET_LOCK_ASSERT() mtx_assert(&dummynet_mtx, MA_OWNED)
static int config_pipe(struct dn_pipe *p);
static int ip_dn_ctl(struct sockopt *sopt);
@ -201,14 +225,14 @@ heap_init(struct dn_heap *h, int new_size)
struct dn_heap_entry *p;
if (h->size >= new_size ) {
printf("dummynet: heap_init, Bogus call, have %d want %d\n",
printf("dummynet: %s, Bogus call, have %d want %d\n", __func__,
h->size, new_size);
return 0 ;
}
new_size = (new_size + HEAP_INCREMENT ) & ~HEAP_INCREMENT ;
p = malloc(new_size * sizeof(*p), M_DUMMYNET, M_NOWAIT);
if (p == NULL) {
printf("dummynet: heap_init, resize %d failed\n", new_size );
printf("dummynet: %s, resize %d failed\n", __func__, new_size );
return 1 ; /* error */
}
if (h->size > 0) {
@ -434,6 +458,7 @@ transmit_event(struct dn_pipe *pipe)
case DN_TO_BDG_FWD :
if (!BDG_LOADED) {
/* somebody unloaded the bridge module. Drop pkt */
/* XXX rate limit */
printf("dummynet: dropping bridged packet trapped in pipe\n");
m_freem(pkt->dn_m);
break;
@ -525,6 +550,8 @@ ready_event(struct dn_flow_queue *q)
struct dn_pipe *p = q->fs->pipe ;
int p_was_empty ;
DUMMYNET_LOCK_ASSERT();
if (p == NULL) {
printf("dummynet: ready_event- pipe is gone\n");
return ;
@ -589,14 +616,16 @@ ready_event_wfq(struct dn_pipe *p)
struct dn_heap *sch = &(p->scheduler_heap);
struct dn_heap *neh = &(p->not_eligible_heap) ;
DUMMYNET_LOCK_ASSERT();
if (p->if_name[0] == 0) /* tx clock is simulated */
p->numbytes += ( curr_time - p->sched_time ) * p->bandwidth;
else { /* tx clock is for real, the ifq must be empty or this is a NOP */
if (p->ifp && p->ifp->if_snd.ifq_head != NULL)
return ;
else {
DEB(printf("dummynet: pipe %d ready from %s --\n",
p->pipe_nr, p->if_name);)
DPRINTF(("dummynet: pipe %d ready from %s --\n",
p->pipe_nr, p->if_name));
}
}
@ -705,7 +734,6 @@ dummynet(void * __unused unused)
{
void *p ; /* generic parameter to handler */
struct dn_heap *h ;
int s ;
struct dn_heap *heaps[3];
int i;
struct dn_pipe *pe ;
@ -713,14 +741,15 @@ dummynet(void * __unused unused)
heaps[0] = &ready_heap ; /* fixed-rate queues */
heaps[1] = &wfq_ready_heap ; /* wfq queues */
heaps[2] = &extract_heap ; /* delay line */
s = splimp(); /* see note on top, splnet() is not enough */
DUMMYNET_LOCK();
curr_time++ ;
for (i=0; i < 3 ; i++) {
h = heaps[i];
while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time) ) {
DDB(if (h->p[0].key > curr_time)
if (h->p[0].key > curr_time)
printf("dummynet: warning, heap %d is %d ticks late\n",
i, (int)(curr_time - h->p[0].key));)
i, (int)(curr_time - h->p[0].key));
p = h->p[0].object ; /* store a copy before heap_extract */
heap_extract(h, NULL); /* need to extract before processing */
if (i == 0)
@ -746,8 +775,9 @@ dummynet(void * __unused unused)
q->S = q->F + 1 ; /* mark timestamp as invalid */
pe->sum -= q->fs->weight ;
}
splx(s);
dn_timeout = timeout(dummynet, NULL, 1);
DUMMYNET_UNLOCK();
callout_reset(&dn_timeout, 1, dummynet, NULL);
}
/*
@ -758,6 +788,7 @@ if_tx_rdy(struct ifnet *ifp)
{
struct dn_pipe *p;
DUMMYNET_LOCK();
for (p = all_pipes; p ; p = p->next )
if (p->ifp == ifp)
break ;
@ -767,16 +798,18 @@ if_tx_rdy(struct ifnet *ifp)
for (p = all_pipes; p ; p = p->next )
if (!strcmp(p->if_name, buf) ) {
p->ifp = ifp ;
DEB(printf("dummynet: ++ tx rdy from %s (now found)\n", buf);)
DPRINTF(("dummynet: ++ tx rdy from %s (now found)\n", buf));
break ;
}
}
if (p != NULL) {
DEB(printf("dummynet: ++ tx rdy from %s%d - qlen %d\n", ifp->if_name,
ifp->if_unit, ifp->if_snd.ifq_len);)
DPRINTF(("dummynet: ++ tx rdy from %s%d - qlen %d\n", ifp->if_name,
ifp->if_unit, ifp->if_snd.ifq_len));
p->numbytes = 0 ; /* mark ready for I/O */
ready_event_wfq(p);
}
DUMMYNET_UNLOCK();
return 0;
}
@ -937,7 +970,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
/* queue in bytes or packets ? */
u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
DEB(printf("\ndummynet: %d q: %2u ", (int) curr_time, q_size);)
DPRINTF(("\ndummynet: %d q: %2u ", (int) curr_time, q_size));
/* average queue size estimation */
if (q_size != 0) {
@ -963,7 +996,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
}
}
DEB(printf("dummynet: avg: %u ", SCALE_VAL(q->avg));)
DPRINTF(("dummynet: avg: %u ", SCALE_VAL(q->avg)));
/* should i drop ? */
@ -982,7 +1015,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
p_b = SCALE_MUL((int64_t) fs->c_3, (int64_t) q->avg) - fs->c_4;
} else {
q->count = -1;
DEB(printf("dummynet: - drop"););
DPRINTF(("dummynet: - drop"));
return 1 ;
}
} else if (q->avg > fs->min_th) {
@ -1004,7 +1037,7 @@ red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
*/
if (SCALE_MUL(p_b, SCALE((int64_t) q->count)) > q->random) {
q->count = 0;
DEB(printf("dummynet: - red drop");)
DPRINTF(("dummynet: - red drop"));
/* after a drop we calculate a new random value */
q->random = random() & 0xffff;
return 1; /* drop */
@ -1086,7 +1119,6 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
struct dn_pipe *pipe ;
u_int64_t len = m->m_pkthdr.len ;
struct dn_flow_queue *q = NULL ;
int s = splimp();
int is_pipe;
#if IPFW2
ipfw_insn *cmd = fwa->rule->cmd + fwa->rule->act_ofs;
@ -1100,6 +1132,7 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
pipe_nr &= 0xffff ;
DUMMYNET_LOCK();
/*
* This is a dummynet rule, so we expect an O_PIPE or O_QUEUE rule.
*/
@ -1240,21 +1273,21 @@ dummynet_io(struct mbuf *m, int pipe_nr, int dir, struct ip_fw_args *fwa)
if (pipe->numbytes >= 0) { /* pipe is idle */
if (pipe->scheduler_heap.elements != 1)
printf("dummynet: OUCH! pipe should have been idle!\n");
DEB(printf("dummynet: waking up pipe %d at %d\n",
pipe->pipe_nr, (int)(q->F >> MY_M)); )
DPRINTF(("dummynet: waking up pipe %d at %d\n",
pipe->pipe_nr, (int)(q->F >> MY_M)));
pipe->sched_time = curr_time ;
ready_event_wfq(pipe);
}
}
}
done:
splx(s);
DUMMYNET_UNLOCK();
return 0;
dropit:
splx(s);
if (q)
q->drops++ ;
DUMMYNET_UNLOCK();
m_freem(m);
return ( (fs && (fs->flags_fs & DN_NOERROR)) ? 0 : ENOBUFS);
}
@ -1283,6 +1316,8 @@ purge_flow_set(struct dn_flow_set *fs, int all)
struct dn_flow_queue *q, *qn ;
int i ;
DUMMYNET_LOCK_ASSERT();
for (i = 0 ; i <= fs->rq_size ; i++ ) {
for (q = fs->rq[i] ; q ; q = qn ) {
for (pkt = q->head ; pkt ; )
@ -1334,10 +1369,8 @@ dummynet_flush()
{
struct dn_pipe *curr_p, *p ;
struct dn_flow_set *fs, *curr_fs;
int s ;
s = splimp() ;
DUMMYNET_LOCK();
/* remove all references to pipes ...*/
flush_pipe_ptrs(NULL);
/* prevent future matches... */
@ -1349,7 +1382,8 @@ dummynet_flush()
heap_free(&ready_heap);
heap_free(&wfq_ready_heap);
heap_free(&extract_heap);
splx(s) ;
DUMMYNET_UNLOCK();
/*
* Now purge all queued pkts and delete all pipes
*/
@ -1393,6 +1427,7 @@ dn_rule_delete(void *r)
struct dn_pkt *pkt ;
struct dn_flow_set *fs ;
DUMMYNET_LOCK();
/*
* If the rule references a queue (dn_flow_set), then scan
* the flow set, otherwise scan pipes. Should do either, but doing
@ -1407,6 +1442,7 @@ dn_rule_delete(void *r)
if (pkt->rule == r)
pkt->rule = ip_fw_default_rule ;
}
DUMMYNET_UNLOCK();
}
/*
@ -1516,7 +1552,7 @@ set_fs_parms(struct dn_flow_set *x, struct dn_flow_set *src)
static int
config_pipe(struct dn_pipe *p)
{
int i, r, s;
int i, r;
struct dn_flow_set *pfs = &(p->fs);
struct dn_flow_queue *q;
@ -1534,6 +1570,8 @@ config_pipe(struct dn_pipe *p)
return EINVAL ;
if (p->pipe_nr != 0) { /* this is a pipe */
struct dn_pipe *x, *a, *b;
DUMMYNET_LOCK();
/* locate pipe */
for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ;
a = b , b = b->next) ;
@ -1552,15 +1590,12 @@ config_pipe(struct dn_pipe *p)
x->idle_heap.offset=OFFSET_OF(struct dn_flow_queue, heap_pos);
} else {
x = b;
s = splimp();
/* Flush accumulated credit for all queues */
for (i = 0; i <= x->fs.rq_size; i++)
for (q = x->fs.rq[i]; q; q = q->next)
q->numbytes = 0;
splx(s);
}
s = splimp();
x->bandwidth = p->bandwidth ;
x->numbytes = 0; /* just in case... */
bcopy(p->if_name, x->if_name, sizeof(p->if_name) );
@ -1572,8 +1607,8 @@ config_pipe(struct dn_pipe *p)
if ( x->fs.rq == NULL ) { /* a new pipe */
r = alloc_hash(&(x->fs), pfs) ;
if (r) {
DUMMYNET_UNLOCK();
free(x, M_DUMMYNET);
splx(s);
return r ;
}
x->next = b ;
@ -1582,10 +1617,11 @@ config_pipe(struct dn_pipe *p)
else
a->next = x ;
}
splx(s);
DUMMYNET_UNLOCK();
} else { /* config queue */
struct dn_flow_set *x, *a, *b ;
DUMMYNET_LOCK();
/* locate flow_set */
for (a=NULL, b=all_flow_sets ; b && b->fs_nr < pfs->fs_nr ;
a = b , b = b->next) ;
@ -1595,6 +1631,7 @@ config_pipe(struct dn_pipe *p)
return EINVAL ;
x = malloc(sizeof(struct dn_flow_set), M_DUMMYNET, M_NOWAIT|M_ZERO);
if (x == NULL) {
DUMMYNET_UNLOCK();
printf("dummynet: no memory for new flow_set\n");
return ENOSPC;
}
@ -1611,14 +1648,13 @@ config_pipe(struct dn_pipe *p)
return EINVAL ;
x = b;
}
s = splimp();
set_fs_parms(x, pfs);
if ( x->rq == NULL ) { /* a new flow_set */
r = alloc_hash(x, pfs) ;
if (r) {
DUMMYNET_UNLOCK();
free(x, M_DUMMYNET);
splx(s);
return r ;
}
x->next = b;
@ -1627,7 +1663,7 @@ config_pipe(struct dn_pipe *p)
else
a->next = x;
}
splx(s);
DUMMYNET_UNLOCK();
}
return 0 ;
}
@ -1680,6 +1716,8 @@ dummynet_drain()
struct dn_pipe *p;
struct dn_pkt *pkt;
DUMMYNET_LOCK_ASSERT();
heap_free(&ready_heap);
heap_free(&wfq_ready_heap);
heap_free(&extract_heap);
@ -1701,8 +1739,6 @@ dummynet_drain()
static int
delete_pipe(struct dn_pipe *p)
{
int s ;
if (p->pipe_nr == 0 && p->fs.fs_nr == 0)
return EINVAL ;
if (p->pipe_nr != 0 && p->fs.fs_nr != 0)
@ -1711,13 +1747,14 @@ delete_pipe(struct dn_pipe *p)
struct dn_pipe *a, *b;
struct dn_flow_set *fs;
DUMMYNET_LOCK();
/* locate pipe */
for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ;
a = b , b = b->next) ;
if (b == NULL || (b->pipe_nr != p->pipe_nr) )
if (b == NULL || (b->pipe_nr != p->pipe_nr) ) {
DUMMYNET_UNLOCK();
return EINVAL ; /* not found */
s = splimp() ;
}
/* unlink from list of pipes */
if (a == NULL)
@ -1740,18 +1777,21 @@ delete_pipe(struct dn_pipe *p)
/* remove reference to here from extract_heap and wfq_ready_heap */
pipe_remove_from_heap(&extract_heap, b);
pipe_remove_from_heap(&wfq_ready_heap, b);
splx(s);
DUMMYNET_UNLOCK();
free(b, M_DUMMYNET);
} else { /* this is a WF2Q queue (dn_flow_set) */
struct dn_flow_set *a, *b;
DUMMYNET_LOCK();
/* locate set */
for (a = NULL, b = all_flow_sets ; b && b->fs_nr < p->fs.fs_nr ;
a = b , b = b->next) ;
if (b == NULL || (b->fs_nr != p->fs.fs_nr) )
if (b == NULL || (b->fs_nr != p->fs.fs_nr) ) {
DUMMYNET_UNLOCK();
return EINVAL ; /* not found */
}
s = splimp() ;
if (a == NULL)
all_flow_sets = b->next ;
else
@ -1769,7 +1809,7 @@ delete_pipe(struct dn_pipe *p)
#endif
}
purge_flow_set(b, 1);
splx(s);
DUMMYNET_UNLOCK();
}
return 0 ;
}
@ -1783,6 +1823,8 @@ dn_copy_set(struct dn_flow_set *set, char *bp)
int i, copied = 0 ;
struct dn_flow_queue *q, *qp = (struct dn_flow_queue *)bp;
DUMMYNET_LOCK_ASSERT();
for (i = 0 ; i <= set->rq_size ; i++)
for (q = set->rq[i] ; q ; q = q->next, qp++ ) {
if (q->hash_slot != i)
@ -1811,9 +1853,10 @@ dummynet_get(struct sockopt *sopt)
size_t size ;
struct dn_flow_set *set ;
struct dn_pipe *p ;
int s, error=0 ;
int error=0 ;
s = splimp();
/* XXX lock held too long */
DUMMYNET_LOCK();
/*
* compute size of data structures: list of pipes and flow_sets.
*/
@ -1825,7 +1868,7 @@ dummynet_get(struct sockopt *sopt)
set->rq_elements * sizeof(struct dn_flow_queue);
buf = malloc(size, M_TEMP, M_NOWAIT);
if (buf == 0) {
splx(s);
DUMMYNET_UNLOCK();
return ENOBUFS ;
}
for (p = all_pipes, bp = buf ; p ; p = p->next ) {
@ -1864,7 +1907,8 @@ dummynet_get(struct sockopt *sopt)
bp += sizeof( *set ) ;
bp = dn_copy_set( set, bp );
}
splx(s);
DUMMYNET_UNLOCK();
error = sooptcopyout(sopt, buf, size);
free(buf, M_TEMP);
return error ;
@ -1927,7 +1971,11 @@ ip_dn_ctl(struct sockopt *sopt)
static void
ip_dn_init(void)
{
printf("DUMMYNET initialized (011031)\n");
if (bootverbose)
printf("DUMMYNET initialized (011031)\n");
DUMMYNET_LOCK_INIT();
all_pipes = NULL ;
all_flow_sets = NULL ;
ready_heap.size = ready_heap.elements = 0 ;
@ -1938,27 +1986,40 @@ ip_dn_init(void)
extract_heap.size = extract_heap.elements = 0 ;
extract_heap.offset = 0 ;
ip_dn_ctl_ptr = ip_dn_ctl;
ip_dn_io_ptr = dummynet_io;
ip_dn_ruledel_ptr = dn_rule_delete;
bzero(&dn_timeout, sizeof(struct callout_handle));
dn_timeout = timeout(dummynet, NULL, 1);
callout_init(&dn_timeout, CALLOUT_MPSAFE);
callout_reset(&dn_timeout, 1, dummynet, NULL);
}
#ifdef KLD_MODULE
static void
ip_dn_destroy(void)
{
ip_dn_ctl_ptr = NULL;
ip_dn_io_ptr = NULL;
ip_dn_ruledel_ptr = NULL;
callout_stop(&dn_timeout);
dummynet_flush();
DUMMYNET_LOCK_DESTROY();
}
#endif /* KLD_MODULE */
static int
dummynet_modevent(module_t mod, int type, void *data)
{
int s;
switch (type) {
case MOD_LOAD:
s = splimp();
if (DUMMYNET_LOADED) {
splx(s);
printf("DUMMYNET already loaded\n");
return EEXIST ;
}
ip_dn_init();
splx(s);
break;
case MOD_UNLOAD:
@ -1966,13 +2027,7 @@ dummynet_modevent(module_t mod, int type, void *data)
printf("dummynet statically compiled, cannot unload\n");
return EINVAL ;
#else
s = splimp();
untimeout(dummynet, NULL, dn_timeout);
dummynet_flush();
ip_dn_ctl_ptr = NULL;
ip_dn_io_ptr = NULL;
ip_dn_ruledel_ptr = NULL;
splx(s);
ip_dn_destroy();
#endif
break ;
default: