1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-20 11:11:24 +00:00
freebsd/sys/kern/uipc_mbuf2.c

454 lines
13 KiB
C
Raw Normal View History

/* $KAME: uipc_mbuf2.c,v 1.31 2001/11/28 11:08:53 itojun Exp $ */
/* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */
/*-
* Copyright (C) 1999 WIDE Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*-
* Copyright (c) 1982, 1986, 1988, 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*#define PULLDOWN_DEBUG*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <security/mac/mac_framework.h>
static MALLOC_DEFINE(M_PACKET_TAGS, MBUF_TAG_MEM_NAME,
"packet-attached information");
/* can't call it m_dup(), as freebsd[34] uses m_dup() with different arg */
2002-03-19 21:25:46 +00:00
static struct mbuf *m_dup1(struct mbuf *, int, int, int);
/*
* ensure that [off, off + len) is contiguous on the mbuf chain "m".
* packet chain before "off" is kept untouched.
* if offp == NULL, the target will start at <retval, 0> on resulting chain.
* if offp != NULL, the target will start at <retval, *offp> on resulting chain.
*
* on error return (NULL return value), original "m" will be freed.
*
* XXX: M_TRAILINGSPACE/M_LEADINGSPACE only permitted on writable ext_buf.
*/
struct mbuf *
m_pulldown(struct mbuf *m, int off, int len, int *offp)
{
struct mbuf *n, *o;
int hlen, tlen, olen;
int writable;
/* check invalid arguments. */
if (m == NULL)
panic("m == NULL in m_pulldown()");
if (len > MCLBYTES) {
m_freem(m);
return NULL; /* impossible */
}
#ifdef PULLDOWN_DEBUG
{
struct mbuf *t;
printf("before:");
for (t = m; t; t = t->m_next)
printf(" %d", t->m_len);
printf("\n");
}
#endif
n = m;
while (n != NULL && off > 0) {
if (n->m_len > off)
break;
off -= n->m_len;
n = n->m_next;
}
/* be sure to point non-empty mbuf */
while (n != NULL && n->m_len == 0)
n = n->m_next;
if (!n) {
m_freem(m);
return NULL; /* mbuf chain too short */
}
/*
* XXX: This code is flawed because it considers a "writable" mbuf
* data region to require all of the following:
* (i) mbuf _has_ to have M_EXT set; if it is just a regular
* mbuf, it is still not considered "writable."
* (ii) since mbuf has M_EXT, the ext_type _has_ to be
* EXT_CLUSTER. Anything else makes it non-writable.
* (iii) M_WRITABLE() must evaluate true.
* Ideally, the requirement should only be (iii).
*
* If we're writable, we're sure we're writable, because the ref. count
* cannot increase from 1, as that would require posession of mbuf
* n by someone else (which is impossible). However, if we're _not_
* writable, we may eventually become writable )if the ref. count drops
* to 1), but we'll fail to notice it unless we re-evaluate
* M_WRITABLE(). For now, we only evaluate once at the beginning and
* live with this.
*/
/*
* XXX: This is dumb. If we're just a regular mbuf with no M_EXT,
* then we're not "writable," according to this code.
*/
writable = 0;
if ((n->m_flags & M_EXT) == 0 ||
(n->m_ext.ext_type == EXT_CLUSTER && M_WRITABLE(n)))
writable = 1;
/*
* the target data is on <n, off>.
* if we got enough data on the mbuf "n", we're done.
*/
if ((off == 0 || offp) && len <= n->m_len - off && writable)
goto ok;
/*
* when len <= n->m_len - off and off != 0, it is a special case.
* len bytes from <n, off> sits in single mbuf, but the caller does
* not like the starting position (off).
* chop the current mbuf into two pieces, set off to 0.
*/
if (len <= n->m_len - off) {
o = m_dup1(n, off, n->m_len - off, M_DONTWAIT);
if (o == NULL) {
m_freem(m);
return NULL; /* ENOBUFS */
}
n->m_len = off;
o->m_next = n->m_next;
n->m_next = o;
n = n->m_next;
off = 0;
goto ok;
}
/*
* we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
* and construct contiguous mbuf with m_len == len.
* note that hlen + tlen == len, and tlen > 0.
*/
hlen = n->m_len - off;
tlen = len - hlen;
/*
* ensure that we have enough trailing data on mbuf chain.
* if not, we can do nothing about the chain.
*/
olen = 0;
for (o = n->m_next; o != NULL; o = o->m_next)
olen += o->m_len;
if (hlen + olen < len) {
m_freem(m);
return NULL; /* mbuf chain too short */
}
/*
* easy cases first.
* we need to use m_copydata() to get data from <n->m_next, 0>.
*/
if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
&& writable) {
m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
n->m_len += tlen;
m_adj(n->m_next, tlen);
goto ok;
}
if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
&& writable) {
n->m_next->m_data -= hlen;
n->m_next->m_len += hlen;
bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
n->m_len -= hlen;
n = n->m_next;
off = 0;
goto ok;
}
/*
* now, we need to do the hard way. don't m_copy as there's no room
* on both end.
*/
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
if (len > MLEN)
o = m_getcl(M_DONTWAIT, m->m_type, 0);
else
o = m_get(M_DONTWAIT, m->m_type);
if (!o) {
m_freem(m);
return NULL; /* ENOBUFS */
}
/* get hlen from <n, off> into <o, 0> */
o->m_len = hlen;
bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen);
n->m_len -= hlen;
/* get tlen from <n->m_next, 0> into <o, hlen> */
m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len);
o->m_len += tlen;
m_adj(n->m_next, tlen);
o->m_next = n->m_next;
n->m_next = o;
n = o;
off = 0;
ok:
#ifdef PULLDOWN_DEBUG
{
struct mbuf *t;
printf("after:");
for (t = m; t; t = t->m_next)
printf("%c%d", t == n ? '*' : ' ', t->m_len);
printf(" (off=%d)\n", off);
}
#endif
if (offp)
*offp = off;
return n;
}
static struct mbuf *
m_dup1(struct mbuf *m, int off, int len, int wait)
{
struct mbuf *n;
int copyhdr;
if (len > MCLBYTES)
return NULL;
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
if (off == 0 && (m->m_flags & M_PKTHDR) != 0)
copyhdr = 1;
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
else
copyhdr = 0;
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
if (len >= MINCLSIZE) {
if (copyhdr == 1)
n = m_getcl(wait, m->m_type, M_PKTHDR);
else
n = m_getcl(wait, m->m_type, 0);
} else {
if (copyhdr == 1)
n = m_gethdr(wait, m->m_type);
else
n = m_get(wait, m->m_type);
}
if (!n)
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
return NULL; /* ENOBUFS */
if (copyhdr && !m_dup_pkthdr(n, m, wait)) {
m_free(n);
return NULL;
}
m_copydata(m, off, len, mtod(n, caddr_t));
n->m_len = len;
return n;
}
/* Free a packet tag. */
void
m_tag_free_default(struct m_tag *t)
{
#ifdef MAC
if (t->m_tag_id == PACKET_TAG_MACLABEL)
mac_mbuf_tag_destroy(t);
#endif
free(t, M_PACKET_TAGS);
}
/* Get a packet tag structure along with specified data following. */
struct m_tag *
m_tag_alloc(u_int32_t cookie, int type, int len, int wait)
{
struct m_tag *t;
MBUF_CHECKSLEEP(wait);
if (len < 0)
return NULL;
t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, wait);
if (t == NULL)
return NULL;
m_tag_setup(t, cookie, type, len);
t->m_tag_free = m_tag_free_default;
return t;
}
/* Unlink and free a packet tag. */
void
m_tag_delete(struct mbuf *m, struct m_tag *t)
{
KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", m, t));
m_tag_unlink(m, t);
m_tag_free(t);
}
/* Unlink and free a packet tag chain, starting from given tag. */
void
m_tag_delete_chain(struct mbuf *m, struct m_tag *t)
{
struct m_tag *p, *q;
KASSERT(m, ("m_tag_delete_chain: null mbuf"));
if (t != NULL)
p = t;
else
p = SLIST_FIRST(&m->m_pkthdr.tags);
if (p == NULL)
return;
while ((q = SLIST_NEXT(p, m_tag_link)) != NULL)
m_tag_delete(m, q);
m_tag_delete(m, p);
}
/*
* Strip off all tags that would normally vanish when
* passing through a network interface. Only persistent
* tags will exist after this; these are expected to remain
* so long as the mbuf chain exists, regardless of the
* path the mbufs take.
*/
void
m_tag_delete_nonpersistent(struct mbuf *m)
{
struct m_tag *p, *q;
SLIST_FOREACH_SAFE(p, &m->m_pkthdr.tags, m_tag_link, q)
if ((p->m_tag_id & MTAG_PERSISTENT) == 0)
m_tag_delete(m, p);
}
/* Find a tag, starting from a given position. */
struct m_tag *
m_tag_locate(struct mbuf *m, u_int32_t cookie, int type, struct m_tag *t)
{
struct m_tag *p;
KASSERT(m, ("m_tag_locate: null mbuf"));
if (t == NULL)
p = SLIST_FIRST(&m->m_pkthdr.tags);
else
p = SLIST_NEXT(t, m_tag_link);
while (p != NULL) {
if (p->m_tag_cookie == cookie && p->m_tag_id == type)
return p;
p = SLIST_NEXT(p, m_tag_link);
}
return NULL;
}
/* Copy a single tag. */
struct m_tag *
m_tag_copy(struct m_tag *t, int how)
{
struct m_tag *p;
MBUF_CHECKSLEEP(how);
KASSERT(t, ("m_tag_copy: null tag"));
p = m_tag_alloc(t->m_tag_cookie, t->m_tag_id, t->m_tag_len, how);
if (p == NULL)
return (NULL);
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
#ifdef MAC
/*
* XXXMAC: we should probably pass off the initialization, and
* copying here? can we hide that PACKET_TAG_MACLABEL is
* special from the mbuf code?
*/
if (t->m_tag_id == PACKET_TAG_MACLABEL) {
if (mac_mbuf_tag_init(p, how) != 0) {
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
m_tag_free(p);
return (NULL);
}
mac_mbuf_tag_copy(t, p);
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
} else
#endif
bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
return p;
}
/*
* Copy two tag chains. The destination mbuf (to) loses any attached
* tags even if the operation fails. This should not be a problem, as
* m_tag_copy_chain() is typically called with a newly-allocated
* destination mbuf.
*/
int
m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
{
struct m_tag *p, *t, *tprev = NULL;
MBUF_CHECKSLEEP(how);
KASSERT(to && from,
("m_tag_copy_chain: null argument, to %p from %p", to, from));
m_tag_delete_chain(to, NULL);
SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
t = m_tag_copy(p, how);
if (t == NULL) {
m_tag_delete_chain(to, NULL);
return 0;
}
if (tprev == NULL)
SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
else
SLIST_INSERT_AFTER(tprev, t, m_tag_link);
tprev = t;
}
return 1;
}