1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-11-30 08:19:09 +00:00

pf: Remove support for 'scrub fragment crop|drop-ovl'

The crop/drop-ovl fragment scrub modes are not very useful and likely to confuse
users into making poor choices.
It's also a fairly large amount of complex code, so just remove the support
altogether.

Users who have 'scrub fragment crop|drop-ovl' in their pf configuration will be
implicitly converted to 'scrub fragment reassemble'.

Reviewed by:	gnn, eri
Relnotes:	yes
Differential Revision:	https://reviews.freebsd.org/D3466
This commit is contained in:
Kristof Provost 2015-08-27 21:27:47 +00:00
parent f6295033c1
commit 64b3b4d611
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=287222
6 changed files with 40 additions and 518 deletions

View File

@ -31,6 +31,11 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20150827:
pf no longer supports 'scrub fragment crop' or 'scrub fragment drop-ovl'
These configurations are now automatically interpreted as
'scrub fragment reassemble'.
20150817:
Kernel-loadable modules for the random(4) device are back. To use
them, the kernel must have

View File

@ -1197,8 +1197,8 @@ scrub_opt : NODF {
;
fragcache : FRAGMENT REASSEMBLE { $$ = 0; /* default */ }
| FRAGMENT FRAGCROP { $$ = PFRULE_FRAGCROP; }
| FRAGMENT FRAGDROP { $$ = PFRULE_FRAGDROP; }
| FRAGMENT FRAGCROP { $$ = 0; }
| FRAGMENT FRAGDROP { $$ = 0; }
;
antispoof : ANTISPOOF logquick antispoof_ifspc af antispoof_opts {

View File

@ -990,12 +990,7 @@ print_rule(struct pf_rule *r, const char *anchor_call, int verbose, int numeric)
if (r->rule_flag & PFRULE_REASSEMBLE_TCP)
printf(" reassemble tcp");
if (r->rule_flag & PFRULE_FRAGDROP)
printf(" fragment drop-ovl");
else if (r->rule_flag & PFRULE_FRAGCROP)
printf(" fragment crop");
else
printf(" fragment reassemble");
printf(" fragment reassemble");
}
if (r->label[0])
printf(" label \"%s\"", r->label);

View File

@ -666,33 +666,6 @@ packet, and only the completed packet is passed on to the filter.
The advantage is that filter rules have to deal only with complete
packets, and can ignore fragments.
The drawback of caching fragments is the additional memory cost.
But the full reassembly method is the only method that currently works
with NAT.
This is the default behavior of a
.Ar scrub
rule if no fragmentation modifier is supplied.
.It Ar fragment crop
The default fragment reassembly method is expensive, hence the option
to crop is provided.
In this case,
.Xr pf 4
will track the fragments and cache a small range descriptor.
Duplicate fragments are dropped and overlaps are cropped.
Thus data will only occur once on the wire with ambiguities resolving to
the first occurrence.
Unlike the
.Ar fragment reassemble
modifier, fragments are not buffered, they are passed as soon as they
are received.
The
.Ar fragment crop
reassembly mechanism does not yet work with NAT.
.It Ar fragment drop-ovl
This option is similar to the
.Ar fragment crop
modifier except that all overlapping or duplicate fragments will be
dropped, and all further corresponding fragments will be
dropped as well.
.It Ar reassemble tcp
Statefully normalizes TCP connections.
.Ar scrub reassemble tcp
@ -2987,8 +2960,7 @@ state-opt = ( "max" number | "no-sync" | timeout | "sloppy" | "pflow" |
"overload" "\*(Lt" string "\*(Gt" [ "flush" ] |
"if-bound" | "floating" )
fragmentation = [ "fragment reassemble" | "fragment crop" |
"fragment drop-ovl" ]
fragmentation = [ "fragment reassemble" ]
timeout-list = timeout [ [ "," ] timeout-list ]
timeout = ( "tcp.first" | "tcp.opening" | "tcp.established" |

View File

@ -598,8 +598,6 @@ struct pf_rule {
/* scrub flags */
#define PFRULE_NODF 0x0100
#define PFRULE_FRAGCROP 0x0200 /* non-buffering frag cache */
#define PFRULE_FRAGDROP 0x0400 /* drop funny fragments */
#define PFRULE_RANDOMID 0x0800
#define PFRULE_REASSEMBLE_TCP 0x1000
#define PFRULE_SET_TOS 0x2000

View File

@ -88,12 +88,6 @@ struct pf_fragment {
RB_ENTRY(pf_fragment) fr_entry;
TAILQ_ENTRY(pf_fragment) frag_next;
uint8_t fr_flags; /* status flags */
#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */
#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */
#define PFFRAG_DROP 0x0004 /* Drop all fragments */
#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER))
uint16_t fr_max; /* fragment data max */
uint32_t fr_timeout;
uint16_t fr_maxlen; /* maximum length of single fragment */
TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
@ -123,13 +117,9 @@ TAILQ_HEAD(pf_fragqueue, pf_fragment);
TAILQ_HEAD(pf_cachequeue, pf_fragment);
static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue);
#define V_pf_fragqueue VNET(pf_fragqueue)
static VNET_DEFINE(struct pf_cachequeue, pf_cachequeue);
#define V_pf_cachequeue VNET(pf_cachequeue)
RB_HEAD(pf_frag_tree, pf_fragment);
static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree);
#define V_pf_frag_tree VNET(pf_frag_tree)
static VNET_DEFINE(struct pf_frag_tree, pf_cache_tree);
#define V_pf_cache_tree VNET(pf_cache_tree)
static int pf_frag_compare(struct pf_fragment *,
struct pf_fragment *);
static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
@ -150,8 +140,6 @@ static struct mbuf *pf_join_fragment(struct pf_fragment *);
#ifdef INET
static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t);
static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
static struct mbuf *pf_fragcache(struct mbuf **, struct ip*,
struct pf_fragment **, int, int, int *);
#endif /* INET */
#ifdef INET6
static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
@ -197,7 +185,6 @@ pf_normalize_init(void)
uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
TAILQ_INIT(&V_pf_fragqueue);
TAILQ_INIT(&V_pf_cachequeue);
}
void
@ -236,8 +223,6 @@ pf_purge_expired_fragments(void)
PF_FRAG_LOCK();
while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
KASSERT((BUFFER_FRAGMENTS(frag)),
("BUFFER_FRAGMENTS(frag) == 0: %s", __FUNCTION__));
if (frag->fr_timeout > expire)
break;
@ -245,19 +230,6 @@ pf_purge_expired_fragments(void)
pf_free_fragment(frag);
}
while ((frag = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue)) != NULL) {
KASSERT((!BUFFER_FRAGMENTS(frag)),
("BUFFER_FRAGMENTS(frag) != 0: %s", __FUNCTION__));
if (frag->fr_timeout > expire)
break;
DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
pf_free_fragment(frag);
KASSERT((TAILQ_EMPTY(&V_pf_cachequeue) ||
TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue) != frag),
("!(TAILQ_EMPTY() || TAILQ_LAST() == farg): %s",
__FUNCTION__));
}
PF_FRAG_UNLOCK();
}
@ -267,7 +239,7 @@ pf_purge_expired_fragments(void)
static void
pf_flush_fragments(void)
{
struct pf_fragment *frag, *cache;
struct pf_fragment *frag;
int goal;
PF_FRAG_ASSERT();
@ -278,10 +250,7 @@ pf_flush_fragments(void)
frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
if (frag)
pf_free_fragment(frag);
cache = TAILQ_LAST(&V_pf_cachequeue, pf_cachequeue);
if (cache)
pf_free_fragment(cache);
if (frag == NULL && cache == NULL)
else
break;
}
}
@ -295,27 +264,12 @@ pf_free_fragment(struct pf_fragment *frag)
PF_FRAG_ASSERT();
/* Free all fragments */
if (BUFFER_FRAGMENTS(frag)) {
for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
frent = TAILQ_FIRST(&frag->fr_queue)) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
frent = TAILQ_FIRST(&frag->fr_queue)) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
m_freem(frent->fe_m);
uma_zfree(V_pf_frent_z, frent);
}
} else {
for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
frent = TAILQ_FIRST(&frag->fr_queue)) {
TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
KASSERT((TAILQ_EMPTY(&frag->fr_queue) ||
TAILQ_FIRST(&frag->fr_queue)->fe_off >
frent->fe_len),
("! (TAILQ_EMPTY() || TAILQ_FIRST()->fe_off >"
" frent->fe_len): %s", __func__));
uma_zfree(V_pf_frent_z, frent);
}
m_freem(frent->fe_m);
uma_zfree(V_pf_frent_z, frent);
}
pf_remove_fragment(frag);
@ -332,13 +286,8 @@ pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
if (frag != NULL) {
/* XXX Are we sure we want to update the timeout? */
frag->fr_timeout = time_uptime;
if (BUFFER_FRAGMENTS(frag)) {
TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
} else {
TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
TAILQ_INSERT_HEAD(&V_pf_cachequeue, frag, frag_next);
}
TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
}
return (frag);
@ -351,15 +300,9 @@ pf_remove_fragment(struct pf_fragment *frag)
PF_FRAG_ASSERT();
if (BUFFER_FRAGMENTS(frag)) {
RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
uma_zfree(V_pf_frag_z, frag);
} else {
RB_REMOVE(pf_frag_tree, &V_pf_cache_tree, frag);
TAILQ_REMOVE(&V_pf_cachequeue, frag, frag_next);
uma_zfree(V_pf_frag_z, frag);
}
RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
uma_zfree(V_pf_frag_z, frag);
}
static struct pf_frent *
@ -431,7 +374,6 @@ pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
}
*(struct pf_fragment_cmp *)frag = *key;
frag->fr_flags = 0;
frag->fr_timeout = time_second;
frag->fr_maxlen = frent->fe_len;
TAILQ_INIT(&frag->fr_queue);
@ -782,312 +724,6 @@ pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
}
#endif /* INET6 */
#ifdef INET
static struct mbuf *
pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
int drop, int *nomem)
{
struct mbuf *m = *m0;
struct pf_frent *frp, *fra, *cur = NULL;
int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
u_int16_t off = ntohs(h->ip_off) << 3;
u_int16_t max = ip_len + off;
int hosed = 0;
PF_FRAG_ASSERT();
KASSERT((*frag == NULL || !BUFFER_FRAGMENTS(*frag)),
("!(*frag == NULL || !BUFFER_FRAGMENTS(*frag)): %s", __FUNCTION__));
/* Create a new range queue for this packet */
if (*frag == NULL) {
*frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
if (*frag == NULL) {
pf_flush_fragments();
*frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
if (*frag == NULL)
goto no_mem;
}
/* Get an entry for the queue */
cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
if (cur == NULL) {
uma_zfree(V_pf_frag_z, *frag);
*frag = NULL;
goto no_mem;
}
(*frag)->fr_flags = PFFRAG_NOBUFFER;
(*frag)->fr_max = 0;
(*frag)->fr_src.v4 = h->ip_src;
(*frag)->fr_dst.v4 = h->ip_dst;
(*frag)->fr_af = AF_INET;
(*frag)->fr_proto = h->ip_p;
(*frag)->fr_id = h->ip_id;
(*frag)->fr_timeout = time_uptime;
cur->fe_off = off;
cur->fe_len = max; /* TODO: fe_len = max - off ? */
TAILQ_INIT(&(*frag)->fr_queue);
TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
RB_INSERT(pf_frag_tree, &V_pf_cache_tree, *frag);
TAILQ_INSERT_HEAD(&V_pf_cachequeue, *frag, frag_next);
DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
goto pass;
}
/*
* Find a fragment after the current one:
* - off contains the real shifted offset.
*/
frp = NULL;
TAILQ_FOREACH(fra, &(*frag)->fr_queue, fr_next) {
if (fra->fe_off > off)
break;
frp = fra;
}
KASSERT((frp != NULL || fra != NULL),
("!(frp != NULL || fra != NULL): %s", __FUNCTION__));
if (frp != NULL) {
int precut;
precut = frp->fe_len - off;
if (precut >= ip_len) {
/* Fragment is entirely a duplicate */
DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
h->ip_id, frp->fe_off, frp->fe_len, off, max));
goto drop_fragment;
}
if (precut == 0) {
/* They are adjacent. Fixup cache entry */
DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
h->ip_id, frp->fe_off, frp->fe_len, off, max));
frp->fe_len = max;
} else if (precut > 0) {
/* The first part of this payload overlaps with a
* fragment that has already been passed.
* Need to trim off the first part of the payload.
* But to do so easily, we need to create another
* mbuf to throw the original header into.
*/
DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
h->ip_id, precut, frp->fe_off, frp->fe_len, off,
max));
off += precut;
max -= precut;
/* Update the previous frag to encompass this one */
frp->fe_len = max;
if (!drop) {
/* XXX Optimization opportunity
* This is a very heavy way to trim the payload.
* we could do it much faster by diddling mbuf
* internals but that would be even less legible
* than this mbuf magic. For my next trick,
* I'll pull a rabbit out of my laptop.
*/
*m0 = m_dup(m, M_NOWAIT);
if (*m0 == NULL)
goto no_mem;
/* From KAME Project : We have missed this! */
m_adj(*m0, (h->ip_hl << 2) -
(*m0)->m_pkthdr.len);
KASSERT(((*m0)->m_next == NULL),
("(*m0)->m_next != NULL: %s",
__FUNCTION__));
m_adj(m, precut + (h->ip_hl << 2));
m_cat(*m0, m);
m = *m0;
if (m->m_flags & M_PKTHDR) {
int plen = 0;
struct mbuf *t;
for (t = m; t; t = t->m_next)
plen += t->m_len;
m->m_pkthdr.len = plen;
}
h = mtod(m, struct ip *);
KASSERT(((int)m->m_len ==
ntohs(h->ip_len) - precut),
("m->m_len != ntohs(h->ip_len) - precut: %s",
__FUNCTION__));
h->ip_off = htons(ntohs(h->ip_off) +
(precut >> 3));
h->ip_len = htons(ntohs(h->ip_len) - precut);
} else {
hosed++;
}
} else {
/* There is a gap between fragments */
DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
h->ip_id, -precut, frp->fe_off, frp->fe_len, off,
max));
cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
if (cur == NULL)
goto no_mem;
cur->fe_off = off;
cur->fe_len = max;
TAILQ_INSERT_AFTER(&(*frag)->fr_queue, frp, cur, fr_next);
}
}
if (fra != NULL) {
int aftercut;
int merge = 0;
aftercut = max - fra->fe_off;
if (aftercut == 0) {
/* Adjacent fragments */
DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
h->ip_id, off, max, fra->fe_off, fra->fe_len));
fra->fe_off = off;
merge = 1;
} else if (aftercut > 0) {
/* Need to chop off the tail of this fragment */
DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
h->ip_id, aftercut, off, max, fra->fe_off,
fra->fe_len));
fra->fe_off = off;
max -= aftercut;
merge = 1;
if (!drop) {
m_adj(m, -aftercut);
if (m->m_flags & M_PKTHDR) {
int plen = 0;
struct mbuf *t;
for (t = m; t; t = t->m_next)
plen += t->m_len;
m->m_pkthdr.len = plen;
}
h = mtod(m, struct ip *);
KASSERT(((int)m->m_len == ntohs(h->ip_len) - aftercut),
("m->m_len != ntohs(h->ip_len) - aftercut: %s",
__FUNCTION__));
h->ip_len = htons(ntohs(h->ip_len) - aftercut);
} else {
hosed++;
}
} else if (frp == NULL) {
/* There is a gap between fragments */
DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
h->ip_id, -aftercut, off, max, fra->fe_off,
fra->fe_len));
cur = uma_zalloc(V_pf_frent_z, M_NOWAIT);
if (cur == NULL)
goto no_mem;
cur->fe_off = off;
cur->fe_len = max;
TAILQ_INSERT_HEAD(&(*frag)->fr_queue, cur, fr_next);
}
/* Need to glue together two separate fragment descriptors */
if (merge) {
if (cur && fra->fe_off <= cur->fe_len) {
/* Need to merge in a previous 'cur' */
DPFPRINTF(("fragcache[%d]: adjacent(merge "
"%d-%d) %d-%d (%d-%d)\n",
h->ip_id, cur->fe_off, cur->fe_len, off,
max, fra->fe_off, fra->fe_len));
fra->fe_off = cur->fe_off;
TAILQ_REMOVE(&(*frag)->fr_queue, cur, fr_next);
uma_zfree(V_pf_frent_z, cur);
cur = NULL;
} else if (frp && fra->fe_off <= frp->fe_len) {
/* Need to merge in a modified 'frp' */
KASSERT((cur == NULL), ("cur != NULL: %s",
__FUNCTION__));
DPFPRINTF(("fragcache[%d]: adjacent(merge "
"%d-%d) %d-%d (%d-%d)\n",
h->ip_id, frp->fe_off, frp->fe_len, off,
max, fra->fe_off, fra->fe_len));
fra->fe_off = frp->fe_off;
TAILQ_REMOVE(&(*frag)->fr_queue, frp, fr_next);
uma_zfree(V_pf_frent_z, frp);
frp = NULL;
}
}
}
if (hosed) {
/*
* We must keep tracking the overall fragment even when
* we're going to drop it anyway so that we know when to
* free the overall descriptor. Thus we drop the frag late.
*/
goto drop_fragment;
}
pass:
/* Update maximum data size */
if ((*frag)->fr_max < max)
(*frag)->fr_max = max;
/* This is the last segment */
if (!mff)
(*frag)->fr_flags |= PFFRAG_SEENLAST;
/* Check if we are completely reassembled */
if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
TAILQ_FIRST(&(*frag)->fr_queue)->fe_off == 0 &&
TAILQ_FIRST(&(*frag)->fr_queue)->fe_len == (*frag)->fr_max) {
/* Remove from fragment queue */
DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
(*frag)->fr_max));
pf_free_fragment(*frag);
*frag = NULL;
}
return (m);
no_mem:
*nomem = 1;
/* Still need to pay attention to !IP_MF */
if (!mff && *frag != NULL)
(*frag)->fr_flags |= PFFRAG_SEENLAST;
m_freem(m);
return (NULL);
drop_fragment:
/* Still need to pay attention to !IP_MF */
if (!mff && *frag != NULL)
(*frag)->fr_flags |= PFFRAG_SEENLAST;
if (drop) {
/* This fragment has been deemed bad. Don't reass */
if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
h->ip_id));
(*frag)->fr_flags |= PFFRAG_DROP;
}
m_freem(m);
return (NULL);
}
#endif /* INET */
#ifdef INET6
int
pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag)
@ -1169,8 +805,6 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
{
struct mbuf *m = *m0;
struct pf_rule *r;
struct pf_fragment *frag = NULL;
struct pf_fragment_cmp key;
struct ip *h = mtod(m, struct ip *);
int mff = (ntohs(h->ip_off) & IP_MF);
int hlen = h->ip_hl << 2;
@ -1217,11 +851,15 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
}
/* Check for illegal packets */
if (hlen < (int)sizeof(struct ip))
if (hlen < (int)sizeof(struct ip)) {
REASON_SET(reason, PFRES_NORM);
goto drop;
}
if (hlen > ntohs(h->ip_len))
if (hlen > ntohs(h->ip_len)) {
REASON_SET(reason, PFRES_NORM);
goto drop;
}
/* Clear IP_DF if the rule uses the no-df option */
if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) {
@ -1260,82 +898,21 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
}
max = fragoff + ip_len;
if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
/* Fully buffer all of the fragments
* Might return a completely reassembled mbuf, or NULL */
PF_FRAG_LOCK();
DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
verdict = pf_reassemble(m0, h, dir, reason);
PF_FRAG_UNLOCK();
/* Fully buffer all of the fragments */
PF_FRAG_LOCK();
if (verdict != PF_PASS)
return (PF_DROP);
pf_ip2key(h, dir, &key);
frag = pf_find_fragment(&key, &V_pf_frag_tree);
m = *m0;
if (m == NULL)
return (PF_DROP);
/* Check if we saw the last fragment already */
if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
max > frag->fr_max)
goto bad;
/* Might return a completely reassembled mbuf, or NULL */
DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
verdict = pf_reassemble(m0, h, dir, reason);
PF_FRAG_UNLOCK();
if (verdict != PF_PASS)
return (PF_DROP);
m = *m0;
if (m == NULL)
return (PF_DROP);
h = mtod(m, struct ip *);
} else {
/* non-buffering fragment cache (drops or masks overlaps) */
int nomem = 0;
if (dir == PF_OUT && pd->pf_mtag &&
pd->pf_mtag->flags & PF_TAG_FRAGCACHE) {
/*
* Already passed the fragment cache in the
* input direction. If we continued, it would
* appear to be a dup and would be dropped.
*/
goto fragment_pass;
}
PF_FRAG_LOCK();
pf_ip2key(h, dir, &key);
frag = pf_find_fragment(&key, &V_pf_cache_tree);
/* Check if we saw the last fragment already */
if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
max > frag->fr_max) {
if (r->rule_flag & PFRULE_FRAGDROP)
frag->fr_flags |= PFFRAG_DROP;
goto bad;
}
*m0 = m = pf_fragcache(m0, h, &frag, mff,
(r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
PF_FRAG_UNLOCK();
if (m == NULL) {
if (nomem)
goto no_mem;
goto drop;
}
if (dir == PF_IN) {
/* Use mtag from copied and trimmed mbuf chain. */
pd->pf_mtag = pf_get_mtag(m);
if (pd->pf_mtag == NULL) {
m_freem(m);
*m0 = NULL;
goto no_mem;
}
pd->pf_mtag->flags |= PF_TAG_FRAGCACHE;
}
if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
goto drop;
goto fragment_pass;
}
h = mtod(m, struct ip *);
no_fragment:
/* At this point, only IP_DF is allowed in ip_off */
@ -1346,39 +923,14 @@ pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason,
h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
}
/* not missing a return here */
fragment_pass:
pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos);
if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0)
pd->flags |= PFDESC_IP_REAS;
return (PF_PASS);
no_mem:
REASON_SET(reason, PFRES_MEMORY);
if (r != NULL && r->log)
PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1);
return (PF_DROP);
drop:
REASON_SET(reason, PFRES_NORM);
if (r != NULL && r->log)
PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1);
return (PF_DROP);
bad:
DPFPRINTF(("dropping bad fragment\n"));
/* Free associated fragments */
if (frag != NULL) {
pf_free_fragment(frag);
PF_FRAG_UNLOCK();
}
REASON_SET(reason, PFRES_FRAG);
drop:
if (r != NULL && r->log)
PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd,
1);