1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-25 11:37:56 +00:00
freebsd/sys/netinet6/ip6_input.c

1871 lines
48 KiB
C
Raw Normal View History

/*-
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
2007-12-10 16:03:40 +00:00
*
* $KAME: ip6_input.c,v 1.259 2002/01/21 04:58:09 jinmei Exp $
*/
/*-
* Copyright (c) 1982, 1986, 1988, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ip_input.c 8.2 (Berkeley) 1/4/94
*/
2007-12-10 16:03:40 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_ipfw.h"
#include "opt_ipsec.h"
#include "opt_route.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/sdt.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/errno.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <net/netisr.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_kdtrace.h>
#include <netinet/ip_var.h>
#include <netinet/in_systm.h>
#include <net/if_llatbl.h>
#ifdef INET
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#endif /* INET */
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#include <netinet/in_pcb.h>
#include <netinet/icmp6.h>
#include <netinet6/scope6_var.h>
#include <netinet6/in6_ifattach.h>
#include <netinet6/nd6.h>
#ifdef IPSEC
#include <netipsec/ipsec.h>
#include <netinet6/ip6_ipsec.h>
#include <netipsec/ipsec6.h>
#endif /* IPSEC */
#include <netinet6/ip6protosw.h>
extern struct domain inet6domain;
u_char ip6_protox[IPPROTO_MAX];
VNET_DEFINE(struct in6_ifaddrhead, in6_ifaddrhead);
VNET_DEFINE(struct in6_ifaddrlisthead *, in6_ifaddrhashtbl);
VNET_DEFINE(u_long, in6_ifaddrhmask);
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
static struct netisr_handler ip6_nh = {
.nh_name = "ip6",
.nh_handler = ip6_input,
.nh_proto = NETISR_IPV6,
.nh_policy = NETISR_POLICY_FLOW,
};
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator (DPCPU), as suggested by Peter Wemm, and implement a new per-virtual network stack memory allocator. Modify vnet to use the allocator instead of monolithic global container structures (vinet, ...). This change solves many binary compatibility problems associated with VIMAGE, and restores ELF symbols for virtualized global variables. Each virtualized global variable exists as a "reference copy", and also once per virtual network stack. Virtualized global variables are tagged at compile-time, placing the in a special linker set, which is loaded into a contiguous region of kernel memory. Virtualized global variables in the base kernel are linked as normal, but those in modules are copied and relocated to a reserved portion of the kernel's vnet region with the help of a the kernel linker. Virtualized global variables exist in per-vnet memory set up when the network stack instance is created, and are initialized statically from the reference copy. Run-time access occurs via an accessor macro, which converts from the current vnet and requested symbol to a per-vnet address. When "options VIMAGE" is not compiled into the kernel, normal global ELF symbols will be used instead and indirection is avoided. This change restores static initialization for network stack global variables, restores support for non-global symbols and types, eliminates the need for many subsystem constructors, eliminates large per-subsystem structures that caused many binary compatibility issues both for monitoring applications (netstat) and kernel modules, removes the per-function INIT_VNET_*() macros throughout the stack, eliminates the need for vnet_symmap ksym(2) munging, and eliminates duplicate definitions of virtualized globals under VIMAGE_GLOBALS. Bump __FreeBSD_version and update UPDATING. Portions submitted by: bz Reviewed by: bz, zec Discussed with: gnn, jamie, jeff, jhb, julian, sam Suggested by: peter Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
VNET_DECLARE(struct callout, in6_tmpaddrtimer_ch);
#define V_in6_tmpaddrtimer_ch VNET(in6_tmpaddrtimer_ch)
VNET_DEFINE(struct pfil_head, inet6_pfil_hook);
VNET_PCPUSTAT_DEFINE(struct ip6stat, ip6stat);
VNET_PCPUSTAT_SYSINIT(ip6stat);
#ifdef VIMAGE
VNET_PCPUSTAT_SYSUNINIT(ip6stat);
#endif /* VIMAGE */
struct rwlock in6_ifaddr_lock;
RW_SYSINIT(in6_ifaddr_lock, &in6_ifaddr_lock, "in6_ifaddr_lock");
2008-01-08 19:08:58 +00:00
static void ip6_init2(void *);
static struct ip6aux *ip6_setdstifaddr(struct mbuf *, struct in6_ifaddr *);
static struct ip6aux *ip6_addaux(struct mbuf *);
static struct ip6aux *ip6_findaux(struct mbuf *m);
static void ip6_delaux (struct mbuf *);
2008-01-08 19:08:58 +00:00
static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
#ifdef PULLDOWN_TEST
2008-01-08 19:08:58 +00:00
static struct mbuf *ip6_pullexthdr(struct mbuf *, size_t, int);
#endif
/*
* IP6 initialization: fill in IP6 protocol switch table.
* All protocols not implemented in kernel go to raw IP6 protocol handler.
*/
void
ip6_init(void)
{
struct ip6protosw *pr;
int i;
Conditionally compile out V_ globals while instantiating the appropriate container structures, depending on VIMAGE_GLOBALS compile time option. Make VIMAGE_GLOBALS a new compile-time option, which by default will not be defined, resulting in instatiations of global variables selected for V_irtualization (enclosed in #ifdef VIMAGE_GLOBALS blocks) to be effectively compiled out. Instantiate new global container structures to hold V_irtualized variables: vnet_net_0, vnet_inet_0, vnet_inet6_0, vnet_ipsec_0, vnet_netgraph_0, and vnet_gif_0. Update the VSYM() macro so that depending on VIMAGE_GLOBALS the V_ macros resolve either to the original globals, or to fields inside container structures, i.e. effectively #ifdef VIMAGE_GLOBALS #define V_rt_tables rt_tables #else #define V_rt_tables vnet_net_0._rt_tables #endif Update SYSCTL_V_*() macros to operate either on globals or on fields inside container structs. Extend the internal kldsym() lookups with the ability to resolve selected fields inside the virtualization container structs. This applies only to the fields which are explicitly registered for kldsym() visibility via VNET_MOD_DECLARE() and vnet_mod_register(), currently this is done only in sys/net/if.c. Fix a few broken instances of MODULE_GLOBAL() macro use in SCTP code, and modify the MODULE_GLOBAL() macro to resolve to V_ macros, which in turn result in proper code being generated depending on VIMAGE_GLOBALS. De-virtualize local static variables in sys/contrib/pf/net/pf_subr.c which were prematurely V_irtualized by automated V_ prepending scripts during earlier merging steps. PF virtualization will be done separately, most probably after next PF import. Convert a few variable initializations at instantiation to initialization in init functions, most notably in ipfw. Also convert TUNABLE_INT() initializers for V_ variables to TUNABLE_FETCH_INT() in initializer functions. Discussed at: devsummit Strassburg Reviewed by: bz, julian Approved by: julian (mentor) Obtained from: //depot/projects/vimage-commit2/... X-MFC after: never Sponsored by: NLnet Foundation, The FreeBSD Foundation
2008-12-10 23:12:39 +00:00
TUNABLE_INT_FETCH("net.inet6.ip6.auto_linklocal",
&V_ip6_auto_linklocal);
TUNABLE_INT_FETCH("net.inet6.ip6.accept_rtadv", &V_ip6_accept_rtadv);
TUNABLE_INT_FETCH("net.inet6.ip6.no_radr", &V_ip6_no_radr);
TAILQ_INIT(&V_in6_ifaddrhead);
V_in6_ifaddrhashtbl = hashinit(IN6ADDR_NHASH, M_IFADDR,
&V_in6_ifaddrhmask);
/* Initialize packet filter hooks. */
V_inet6_pfil_hook.ph_type = PFIL_TYPE_AF;
V_inet6_pfil_hook.ph_af = AF_INET6;
if ((i = pfil_head_register(&V_inet6_pfil_hook)) != 0)
printf("%s: WARNING: unable to register pfil hook, "
"error %d\n", __func__, i);
scope6_init();
addrsel_policy_init();
nd6_init();
frag6_init();
V_ip6_desync_factor = arc4random() % MAX_TEMP_DESYNC_FACTOR;
/* Skip global initialization stuff for non-default instances. */
if (!IS_DEFAULT_VNET(curvnet))
return;
#ifdef DIAGNOSTIC
if (sizeof(struct protosw) != sizeof(struct ip6protosw))
panic("sizeof(protosw) != sizeof(ip6protosw)");
#endif
pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
panic("ip6_init");
/* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
for (i = 0; i < IPPROTO_MAX; i++)
ip6_protox[i] = pr - inet6sw;
/*
* Cycle through IP protocols and put them into the appropriate place
* in ip6_protox[].
*/
for (pr = (struct ip6protosw *)inet6domain.dom_protosw;
pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; pr++)
if (pr->pr_domain->dom_family == PF_INET6 &&
pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
/* Be careful to only index valid IP protocols. */
if (pr->pr_protocol < IPPROTO_MAX)
ip6_protox[pr->pr_protocol] = pr - inet6sw;
}
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
netisr_register(&ip6_nh);
}
/*
* The protocol to be inserted into ip6_protox[] must be already registered
* in inet6sw[], either statically or through pf_proto_register().
*/
int
ip6proto_register(short ip6proto)
{
struct ip6protosw *pr;
/* Sanity checks. */
if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
return (EPROTONOSUPPORT);
/*
* The protocol slot must not be occupied by another protocol
* already. An index pointing to IPPROTO_RAW is unused.
*/
pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
return (EPFNOSUPPORT);
if (ip6_protox[ip6proto] != pr - inet6sw) /* IPPROTO_RAW */
return (EEXIST);
/*
* Find the protocol position in inet6sw[] and set the index.
*/
for (pr = (struct ip6protosw *)inet6domain.dom_protosw;
pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; pr++) {
if (pr->pr_domain->dom_family == PF_INET6 &&
pr->pr_protocol && pr->pr_protocol == ip6proto) {
ip6_protox[pr->pr_protocol] = pr - inet6sw;
return (0);
}
}
return (EPROTONOSUPPORT);
}
int
ip6proto_unregister(short ip6proto)
{
struct ip6protosw *pr;
/* Sanity checks. */
if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
return (EPROTONOSUPPORT);
/* Check if the protocol was indeed registered. */
pr = (struct ip6protosw *)pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
return (EPFNOSUPPORT);
if (ip6_protox[ip6proto] == pr - inet6sw) /* IPPROTO_RAW */
return (ENOENT);
/* Reset the protocol slot to IPPROTO_RAW. */
ip6_protox[ip6proto] = pr - inet6sw;
return (0);
}
#ifdef VIMAGE
void
ip6_destroy()
{
int i;
if ((i = pfil_head_unregister(&V_inet6_pfil_hook)) != 0)
printf("%s: WARNING: unable to unregister pfil hook, "
"error %d\n", __func__, i);
hashdestroy(V_in6_ifaddrhashtbl, M_IFADDR, V_in6_ifaddrhmask);
nd6_destroy();
callout_drain(&V_in6_tmpaddrtimer_ch);
}
#endif
static int
ip6_init2_vnet(const void *unused __unused)
{
/* nd6_timer_init */
callout_init(&V_nd6_timer_ch, 0);
Permit buiding kernels with options VIMAGE, restricted to only a single active network stack instance. Turning on options VIMAGE at compile time yields the following changes relative to default kernel build: 1) V_ accessor macros for virtualized variables resolve to structure fields via base pointers, instead of being resolved as fields in global structs or plain global variables. As an example, V_ifnet becomes: options VIMAGE: ((struct vnet_net *) vnet_net)->_ifnet default build: vnet_net_0._ifnet options VIMAGE_GLOBALS: ifnet 2) INIT_VNET_* macros will declare and set up base pointers to be used by V_ accessor macros, instead of resolving to whitespace: INIT_VNET_NET(ifp->if_vnet); becomes struct vnet_net *vnet_net = (ifp->if_vnet)->mod_data[VNET_MOD_NET]; 3) Memory for vnet modules registered via vnet_mod_register() is now allocated at run time in sys/kern/kern_vimage.c, instead of per vnet module structs being declared as globals. If required, vnet modules can now request the framework to provide them with allocated bzeroed memory by filling in the vmi_size field in their vmi_modinfo structures. 4) structs socket, ifnet, inpcbinfo, tcpcb and syncache_head are extended to hold a pointer to the parent vnet. options VIMAGE builds will fill in those fields as required. 5) curvnet is introduced as a new global variable in options VIMAGE builds, always pointing to the default and only struct vnet. 6) struct sysctl_oid has been extended with additional two fields to store major and minor virtualization module identifiers, oid_v_subs and oid_v_mod. SYSCTL_V_* family of macros will fill in those fields accordingly, and store the offset in the appropriate vnet container struct in oid_arg1. In sysctl handlers dealing with virtualized sysctls, the SYSCTL_RESOLVE_V_ARG1() macro will compute the address of the target variable and make it available in arg1 variable for further processing. Unused fields in structs vnet_inet, vnet_inet6 and vnet_ipfw have been deleted. Reviewed by: bz, rwatson Approved by: julian (mentor)
2009-04-30 13:36:26 +00:00
callout_reset(&V_nd6_timer_ch, hz, nd6_timer, curvnet);
/* timer for regeneranation of temporary addresses randomize ID */
callout_init(&V_in6_tmpaddrtimer_ch, 0);
callout_reset(&V_in6_tmpaddrtimer_ch,
(V_ip6_temp_preferred_lifetime - V_ip6_desync_factor -
V_ip6_temp_regen_advance) * hz,
Permit buiding kernels with options VIMAGE, restricted to only a single active network stack instance. Turning on options VIMAGE at compile time yields the following changes relative to default kernel build: 1) V_ accessor macros for virtualized variables resolve to structure fields via base pointers, instead of being resolved as fields in global structs or plain global variables. As an example, V_ifnet becomes: options VIMAGE: ((struct vnet_net *) vnet_net)->_ifnet default build: vnet_net_0._ifnet options VIMAGE_GLOBALS: ifnet 2) INIT_VNET_* macros will declare and set up base pointers to be used by V_ accessor macros, instead of resolving to whitespace: INIT_VNET_NET(ifp->if_vnet); becomes struct vnet_net *vnet_net = (ifp->if_vnet)->mod_data[VNET_MOD_NET]; 3) Memory for vnet modules registered via vnet_mod_register() is now allocated at run time in sys/kern/kern_vimage.c, instead of per vnet module structs being declared as globals. If required, vnet modules can now request the framework to provide them with allocated bzeroed memory by filling in the vmi_size field in their vmi_modinfo structures. 4) structs socket, ifnet, inpcbinfo, tcpcb and syncache_head are extended to hold a pointer to the parent vnet. options VIMAGE builds will fill in those fields as required. 5) curvnet is introduced as a new global variable in options VIMAGE builds, always pointing to the default and only struct vnet. 6) struct sysctl_oid has been extended with additional two fields to store major and minor virtualization module identifiers, oid_v_subs and oid_v_mod. SYSCTL_V_* family of macros will fill in those fields accordingly, and store the offset in the appropriate vnet container struct in oid_arg1. In sysctl handlers dealing with virtualized sysctls, the SYSCTL_RESOLVE_V_ARG1() macro will compute the address of the target variable and make it available in arg1 variable for further processing. Unused fields in structs vnet_inet, vnet_inet6 and vnet_ipfw have been deleted. Reviewed by: bz, rwatson Approved by: julian (mentor)
2009-04-30 13:36:26 +00:00
in6_tmpaddrtimer, curvnet);
return (0);
}
static void
ip6_init2(void *dummy)
{
ip6_init2_vnet(NULL);
}
/* cheat */
/* This must be after route_init(), which is now SI_ORDER_THIRD */
SYSINIT(netinet6init2, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE, ip6_init2, NULL);
static int
ip6_input_hbh(struct mbuf *m, uint32_t *plen, uint32_t *rtalert, int *off,
int *nxt, int *ours)
{
struct ip6_hdr *ip6;
struct ip6_hbh *hbh;
if (ip6_hopopts_input(plen, rtalert, &m, off)) {
#if 0 /*touches NULL pointer*/
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
#endif
goto out; /* m have already been freed */
}
/* adjust pointer */
ip6 = mtod(m, struct ip6_hdr *);
/*
* if the payload length field is 0 and the next header field
* indicates Hop-by-Hop Options header, then a Jumbo Payload
* option MUST be included.
*/
if (ip6->ip6_plen == 0 && *plen == 0) {
/*
* Note that if a valid jumbo payload option is
* contained, ip6_hopopts_input() must set a valid
* (non-zero) payload length to the variable plen.
*/
IP6STAT_INC(ip6s_badoptions);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
(caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
goto out;
}
#ifndef PULLDOWN_TEST
/* ip6_hopopts_input() ensures that mbuf is contiguous */
hbh = (struct ip6_hbh *)(ip6 + 1);
#else
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
sizeof(struct ip6_hbh));
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
goto out;
}
#endif
*nxt = hbh->ip6h_nxt;
/*
* If we are acting as a router and the packet contains a
* router alert option, see if we know the option value.
* Currently, we only support the option value for MLD, in which
* case we should pass the packet to the multicast routing
* daemon.
*/
if (*rtalert != ~0) {
switch (*rtalert) {
case IP6OPT_RTALERT_MLD:
if (V_ip6_forwarding)
*ours = 1;
break;
default:
/*
* RFC2711 requires unrecognized values must be
* silently ignored.
*/
break;
}
}
return (0);
out:
return (1);
}
void
ip6_input(struct mbuf *m)
{
struct ip6_hdr *ip6;
int off = sizeof(struct ip6_hdr), nest;
u_int32_t plen;
u_int32_t rtalert = ~0;
int nxt, ours = 0;
struct ifnet *deliverifp = NULL, *ifp = NULL;
struct in6_addr odst;
struct route_in6 rin6;
int srcrt = 0;
struct llentry *lle = NULL;
struct sockaddr_in6 dst6, *dst;
2003-10-16 19:55:28 +00:00
bzero(&rin6, sizeof(struct route_in6));
#ifdef IPSEC
/*
* should the inner packet be considered authentic?
* see comment in ah4_input().
* NB: m cannot be NULL when passed to the input routine
*/
m->m_flags &= ~M_AUTHIPHDR;
m->m_flags &= ~M_AUTHIPDGM;
#endif /* IPSEC */
/*
* make sure we don't have onion peering information into m_tag.
*/
ip6_delaux(m);
if (m->m_flags & M_FASTFWD_OURS) {
/*
* Firewall changed destination to local.
*/
m->m_flags &= ~M_FASTFWD_OURS;
ours = 1;
deliverifp = m->m_pkthdr.rcvif;
ip6 = mtod(m, struct ip6_hdr *);
goto hbhcheck;
}
/*
* mbuf statistics
*/
if (m->m_flags & M_EXT) {
if (m->m_next)
IP6STAT_INC(ip6s_mext2m);
else
IP6STAT_INC(ip6s_mext1);
} else {
if (m->m_next) {
if (m->m_flags & M_LOOP) {
IP6STAT_INC(ip6s_m2m[V_loif->if_index]);
2013-04-16 11:19:13 +00:00
} else if (m->m_pkthdr.rcvif->if_index < IP6S_M2MMAX)
IP6STAT_INC(
ip6s_m2m[m->m_pkthdr.rcvif->if_index]);
else
IP6STAT_INC(ip6s_m2m[0]);
} else
IP6STAT_INC(ip6s_m1);
}
/* drop the packet if IPv6 operation is disabled on the IF */
if ((ND_IFINFO(m->m_pkthdr.rcvif)->flags & ND6_IFF_IFDISABLED)) {
m_freem(m);
return;
}
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_receive);
IP6STAT_INC(ip6s_total);
#ifndef PULLDOWN_TEST
/*
* L2 bridge code and some other code can return mbuf chain
* that does not conform to KAME requirement. too bad.
* XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
*/
if (m && m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
struct mbuf *n;
if (m->m_pkthdr.len > MHLEN)
n = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
n = m_gethdr(M_NOWAIT, MT_DATA);
if (n == NULL) {
m_freem(m);
return; /* ENOBUFS */
}
m_move_pkthdr(n, m);
m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
n->m_len = n->m_pkthdr.len;
m_freem(m);
m = n;
}
IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), /* nothing */);
#endif
if (m->m_len < sizeof(struct ip6_hdr)) {
struct ifnet *inifp;
inifp = m->m_pkthdr.rcvif;
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
IP6STAT_INC(ip6s_toosmall);
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
return;
}
}
ip6 = mtod(m, struct ip6_hdr *);
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
IP6STAT_INC(ip6s_badvers);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
goto bad;
}
IP6STAT_INC(ip6s_nxthist[ip6->ip6_nxt]);
IP_PROBE(receive, NULL, NULL, ip6, m->m_pkthdr.rcvif, NULL, ip6);
/*
* Check against address spoofing/corruption.
*/
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) ||
IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) {
/*
* XXX: "badscope" is not very suitable for a multicast source.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
!(m->m_flags & M_LOOP)) {
/*
* In this case, the packet should come from the loopback
* interface. However, we cannot just check the if_flags,
* because ip6_mloopback() passes the "actual" interface
* as the outgoing/incoming interface.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) &&
IPV6_ADDR_MC_SCOPE(&ip6->ip6_dst) == 0) {
/*
* RFC4291 2.7:
* Nodes must not originate a packet to a multicast address
* whose scop field contains the reserved value 0; if such
* a packet is received, it must be silently dropped.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
#ifdef ALTQ
if (altq_input != NULL && (*altq_input)(m, AF_INET6) == 0) {
/* packet is dropped by traffic conditioner */
return;
}
#endif
/*
* The following check is not documented in specs. A malicious
* party may be able to use IPv4 mapped addr to confuse tcp/udp stack
* and bypass security checks (act as if it was from 127.0.0.1 by using
* IPv6 src ::ffff:127.0.0.1). Be cautious.
*
* This check chokes if we are in an SIIT cloud. As none of BSDs
* support IPv4-less kernel compilation, we cannot support SIIT
* environment at all. So, it makes more sense for us to reject any
* malicious packets for non-SIIT environment, than try to do a
* partial support for SIIT environment.
*/
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
#if 0
/*
* Reject packets with IPv4 compatible addresses (auto tunnel).
*
* The code forbids auto tunnel relay case in RFC1933 (the check is
* stronger than RFC1933). We may want to re-enable it if mech-xx
* is revised to forbid relaying case.
*/
if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) ||
IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
#endif
#ifdef IPSEC
/*
* Bypass packet filtering for packets previously handled by IPsec.
*/
if (ip6_ipsec_filtertunnel(m))
goto passin;
#endif /* IPSEC */
/*
* Run through list of hooks for input packets.
*
* NB: Beware of the destination address changing
* (e.g. by NAT rewriting). When this happens,
* tell ip6_forward to do the right thing.
*/
odst = ip6->ip6_dst;
/* Jump over all PFIL processing if hooks are not active. */
if (!PFIL_HOOKED(&V_inet6_pfil_hook))
goto passin;
if (pfil_run_hooks(&V_inet6_pfil_hook, &m,
m->m_pkthdr.rcvif, PFIL_IN, NULL))
return;
if (m == NULL) /* consumed by filter */
return;
ip6 = mtod(m, struct ip6_hdr *);
srcrt = !IN6_ARE_ADDR_EQUAL(&odst, &ip6->ip6_dst);
if (m->m_flags & M_FASTFWD_OURS) {
m->m_flags &= ~M_FASTFWD_OURS;
ours = 1;
deliverifp = m->m_pkthdr.rcvif;
goto hbhcheck;
}
if ((m->m_flags & M_IP6_NEXTHOP) &&
m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL) {
/*
* Directly ship the packet on. This allows forwarding
* packets originally destined to us to some other directly
* connected host.
*/
ip6_forward(m, 1);
goto out;
}
passin:
/*
* Disambiguate address scope zones (if there is ambiguity).
* We first make sure that the original source or destination address
* is not in our internal form for scoped addresses. Such addresses
* are not necessarily invalid spec-wise, but we cannot accept them due
* to the usage conflict.
* in6_setscope() then also checks and rejects the cases where src or
* dst are the loopback address and the receiving interface
* is not loopback.
*/
if (in6_clearscope(&ip6->ip6_src) || in6_clearscope(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope); /* XXX */
goto bad;
}
if (in6_setscope(&ip6->ip6_src, m->m_pkthdr.rcvif, NULL) ||
in6_setscope(&ip6->ip6_dst, m->m_pkthdr.rcvif, NULL)) {
IP6STAT_INC(ip6s_badscope);
goto bad;
}
/*
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
* Multicast check. Assume packet is for us to avoid
* prematurely taking locks.
*/
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
ours = 1;
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mcast);
deliverifp = m->m_pkthdr.rcvif;
goto hbhcheck;
}
/*
* Unicast check
*/
bzero(&dst6, sizeof(dst6));
dst6.sin6_family = AF_INET6;
dst6.sin6_len = sizeof(struct sockaddr_in6);
dst6.sin6_addr = ip6->ip6_dst;
ifp = m->m_pkthdr.rcvif;
IF_AFDATA_RLOCK(ifp);
lle = lla_lookup(LLTABLE6(ifp), 0,
(struct sockaddr *)&dst6);
IF_AFDATA_RUNLOCK(ifp);
if ((lle != NULL) && (lle->la_flags & LLE_IFADDR)) {
struct ifaddr *ifa;
struct in6_ifaddr *ia6;
int bad;
bad = 1;
IF_ADDR_RLOCK(ifp);
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr->sa_family != dst6.sin6_family)
continue;
if (sa_equal(&dst6, ifa->ifa_addr))
break;
}
KASSERT(ifa != NULL, ("%s: ifa not found for lle %p",
__func__, lle));
ia6 = (struct in6_ifaddr *)ifa;
if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
/* Count the packet in the ip address stats */
counter_u64_add(ia6->ia_ifa.ifa_ipackets, 1);
counter_u64_add(ia6->ia_ifa.ifa_ibytes,
m->m_pkthdr.len);
/*
* record address information into m_tag.
*/
(void)ip6_setdstifaddr(m, ia6);
bad = 0;
} else {
char ip6bufs[INET6_ADDRSTRLEN];
char ip6bufd[INET6_ADDRSTRLEN];
/* address is not ready, so discard the packet. */
nd6log((LOG_INFO,
"ip6_input: packet to an unready address %s->%s\n",
ip6_sprintf(ip6bufs, &ip6->ip6_src),
ip6_sprintf(ip6bufd, &ip6->ip6_dst)));
}
IF_ADDR_RUNLOCK(ifp);
LLE_RUNLOCK(lle);
if (bad)
goto bad;
else {
ours = 1;
deliverifp = ifp;
goto hbhcheck;
}
}
if (lle != NULL)
LLE_RUNLOCK(lle);
dst = &rin6.ro_dst;
dst->sin6_len = sizeof(struct sockaddr_in6);
dst->sin6_family = AF_INET6;
dst->sin6_addr = ip6->ip6_dst;
rin6.ro_rt = in6_rtalloc1((struct sockaddr *)dst, 0, 0, M_GETFIB(m));
if (rin6.ro_rt)
RT_UNLOCK(rin6.ro_rt);
#define rt6_key(r) ((struct sockaddr_in6 *)((r)->rt_nodes->rn_key))
/*
* Accept the packet if the forwarding interface to the destination
* according to the routing table is the loopback interface,
* unless the associated route has a gateway.
* Note that this approach causes to accept a packet if there is a
* route to the loopback interface for the destination of the packet.
* But we think it's even useful in some situations, e.g. when using
* a special daemon which wants to intercept the packet.
*
* XXX: some OSes automatically make a cloned route for the destination
* of an outgoing packet. If the outgoing interface of the packet
* is a loopback one, the kernel would consider the packet to be
* accepted, even if we have no such address assinged on the interface.
* We check the cloned flag of the route entry to reject such cases,
* assuming that route entries for our own addresses are not made by
* cloning (it should be true because in6_addloop explicitly installs
* the host route). However, we might have to do an explicit check
* while it would be less efficient. Or, should we rather install a
* reject route for such a case?
*/
if (rin6.ro_rt &&
(rin6.ro_rt->rt_flags &
(RTF_HOST|RTF_GATEWAY)) == RTF_HOST &&
#ifdef RTF_WASCLONED
!(rin6.ro_rt->rt_flags & RTF_WASCLONED) &&
#endif
#ifdef RTF_CLONED
!(rin6.ro_rt->rt_flags & RTF_CLONED) &&
#endif
#if 0
/*
* The check below is redundant since the comparison of
* the destination and the key of the rtentry has
* already done through looking up the routing table.
*/
IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
&rt6_key(rin6.ro_rt)->sin6_addr)
#endif
rin6.ro_rt->rt_ifp->if_type == IFT_LOOP) {
int free_ia6 = 0;
struct in6_ifaddr *ia6;
/*
* found the loopback route to the interface address
*/
if (rin6.ro_rt->rt_gateway->sa_family == AF_LINK) {
struct sockaddr_in6 dest6;
bzero(&dest6, sizeof(dest6));
dest6.sin6_family = AF_INET6;
dest6.sin6_len = sizeof(dest6);
dest6.sin6_addr = ip6->ip6_dst;
ia6 = (struct in6_ifaddr *)
ifa_ifwithaddr((struct sockaddr *)&dest6);
if (ia6 == NULL)
goto bad;
free_ia6 = 1;
}
else
ia6 = (struct in6_ifaddr *)rin6.ro_rt->rt_ifa;
/*
* record address information into m_tag.
*/
(void)ip6_setdstifaddr(m, ia6);
/*
* packets to a tentative, duplicated, or somehow invalid
* address must not be accepted.
*/
if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
/* this address is ready */
ours = 1;
deliverifp = ia6->ia_ifp; /* correct? */
/* Count the packet in the ip address stats */
counter_u64_add(ia6->ia_ifa.ifa_ipackets, 1);
counter_u64_add(ia6->ia_ifa.ifa_ibytes,
m->m_pkthdr.len);
if (free_ia6)
ifa_free(&ia6->ia_ifa);
goto hbhcheck;
} else {
char ip6bufs[INET6_ADDRSTRLEN];
char ip6bufd[INET6_ADDRSTRLEN];
/* address is not ready, so discard the packet. */
nd6log((LOG_INFO,
"ip6_input: packet to an unready address %s->%s\n",
ip6_sprintf(ip6bufs, &ip6->ip6_src),
ip6_sprintf(ip6bufd, &ip6->ip6_dst)));
if (free_ia6)
ifa_free(&ia6->ia_ifa);
goto bad;
}
}
/*
* FAITH (Firewall Aided Internet Translator)
*/
if (V_ip6_keepfaith) {
if (rin6.ro_rt && rin6.ro_rt->rt_ifp &&
rin6.ro_rt->rt_ifp->if_type == IFT_FAITH) {
/* XXX do we need more sanity checks? */
ours = 1;
deliverifp = rin6.ro_rt->rt_ifp; /* faith */
goto hbhcheck;
}
}
/*
* Now there is no reason to process the packet if it's not our own
* and we're not a router.
*/
if (!V_ip6_forwarding) {
IP6STAT_INC(ip6s_cantforward);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
goto bad;
}
hbhcheck:
/*
* record address information into m_tag, if we don't have one yet.
* note that we are unable to record it, if the address is not listed
* as our interface address (e.g. multicast addresses, addresses
* within FAITH prefixes and such).
*/
if (deliverifp) {
struct in6_ifaddr *ia6;
if ((ia6 = ip6_getdstifaddr(m)) != NULL) {
ifa_free(&ia6->ia_ifa);
} else {
ia6 = in6_ifawithifp(deliverifp, &ip6->ip6_dst);
if (ia6) {
if (!ip6_setdstifaddr(m, ia6)) {
/*
* XXX maybe we should drop the packet here,
* as we could not provide enough information
* to the upper layers.
*/
}
ifa_free(&ia6->ia_ifa);
}
}
}
/*
* Process Hop-by-Hop options header if it's contained.
* m may be modified in ip6_hopopts_input().
* If a JumboPayload option is included, plen will also be modified.
*/
plen = (u_int32_t)ntohs(ip6->ip6_plen);
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
int error;
error = ip6_input_hbh(m, &plen, &rtalert, &off, &nxt, &ours);
if (error != 0)
goto out;
} else
nxt = ip6->ip6_nxt;
/*
* Check that the amount of data in the buffers
* is as at least much as the IPv6 header would have us expect.
* Trim mbufs if longer than we expect.
* Drop packet if shorter than we expect.
*/
if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) {
IP6STAT_INC(ip6s_tooshort);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
goto bad;
}
if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) {
if (m->m_len == m->m_pkthdr.len) {
m->m_len = sizeof(struct ip6_hdr) + plen;
m->m_pkthdr.len = sizeof(struct ip6_hdr) + plen;
} else
m_adj(m, sizeof(struct ip6_hdr) + plen - m->m_pkthdr.len);
}
/*
* Forward if desirable.
*/
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
if (V_ip6_mrouter &&
IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
/*
* If we are acting as a multicast router, all
* incoming multicast packets are passed to the
* kernel-level multicast forwarding function.
* The packet is returned (relatively) intact; if
* ip6_mforward() returns a non-zero value, the packet
* must be discarded, else it may be accepted below.
Merge final round of MLD changes from p4: ip6_input.c, in6.h: * Add netinet6-specific mbuf flag M_RTALERT_MLD, shadowing M_PROTO6. * Always set this flag if HBH Router Alert option is present for MLD, even when not forwarding. icmp6.c: * In icmp6_input(), spell m->m_pkthdr.rcvif as ifp to be consistent. * Use scope ID for verifying input. Do not apply SSM filters here, no inpcb. * Check for M_RTALERT_MLD when validating MLD traffic, as we can't see IPv6 hop options outside of ip6_input(). in6_mcast.c: * Use KAME scope/zone ID in in6_multi. * Update net.inet6.ip6.mcast.filters implementation to use scope IDs for comparisons. * Fix scope ID treatment in multicast socket option processing. Scope IDs passed in from userland will be ignored as other less ambiguous APIs exist for specifying the link. * Tighten userland input checks in IPv6 SSM delta and full-state ops. * Source filter embedded scope IDs need to be revisited, for now just clear them and ignore them on input. * Adapt KAME behaviour of looking up the scope ID in the default zone for multicast leaves, when the interface is ambiguous. mld6.c: * Tighten origin checks on MLD traffic as per RFC3810 Section 6.2: * ip6_src MAY be the unspecified address for MLDv1 reports. * ip6_src MAY have link-local address scope for MLDv1 reports, MLDv1 queries, and MLDv2 queries. * Perform address field validation *before* accepting queries. * Use KAME scope/zone ID in query/report processing. * Break const correctness for mld_v1_input_report(), mld_v1_input_query() as we temporarily modify the input mbuf chain. * Clear the scope ID before handoff to userland MLD daemon. * Fix MLDv1 old querier present timer processing. With the protocol defaults, hosts should revert to MLDv2 after 260s. * Add net.inet6.mld.v1enable sysctl, default to on. ifmcstat.c: * Use sysctl by default; -K requests kvm(3) if so compiled. mld.4: * Connect man page to build. Tested using PCS.
2009-05-27 18:57:13 +00:00
*
* XXX TODO: Check hlim and multicast scope here to avoid
* unnecessarily calling into ip6_mforward().
*/
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
if (ip6_mforward &&
ip6_mforward(ip6, m->m_pkthdr.rcvif, m)) {
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
IP6STAT_INC(ip6s_cantforward);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
goto bad;
}
} else if (!ours) {
ip6_forward(m, srcrt);
goto out;
2003-10-29 12:49:12 +00:00
}
ip6 = mtod(m, struct ip6_hdr *);
/*
* Malicious party may be able to use IPv4 mapped addr to confuse
* tcp/udp stack and bypass security checks (act as if it was from
* 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious.
*
* For SIIT end node behavior, you may want to disable the check.
* However, you will become vulnerable to attacks using IPv4 mapped
* source.
*/
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
/*
* Tell launch routine the next header
*/
IP6STAT_INC(ip6s_delivered);
in6_ifstat_inc(deliverifp, ifs6_in_deliver);
nest = 0;
while (nxt != IPPROTO_DONE) {
if (V_ip6_hdrnestlimit && (++nest > V_ip6_hdrnestlimit)) {
IP6STAT_INC(ip6s_toomanyhdr);
goto bad;
}
/*
* protection against faulty packet - there should be
* more sanity checks in header chain processing.
*/
if (m->m_pkthdr.len < off) {
IP6STAT_INC(ip6s_tooshort);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
goto bad;
}
#ifdef IPSEC
/*
* enforce IPsec policy checking if we are seeing last header.
* note that we do not visit this with protocols with pcb layer
* code - like udp/tcp/raw ip.
*/
if (ip6_ipsec_input(m, nxt))
goto bad;
#endif /* IPSEC */
Merge final round of MLD changes from p4: ip6_input.c, in6.h: * Add netinet6-specific mbuf flag M_RTALERT_MLD, shadowing M_PROTO6. * Always set this flag if HBH Router Alert option is present for MLD, even when not forwarding. icmp6.c: * In icmp6_input(), spell m->m_pkthdr.rcvif as ifp to be consistent. * Use scope ID for verifying input. Do not apply SSM filters here, no inpcb. * Check for M_RTALERT_MLD when validating MLD traffic, as we can't see IPv6 hop options outside of ip6_input(). in6_mcast.c: * Use KAME scope/zone ID in in6_multi. * Update net.inet6.ip6.mcast.filters implementation to use scope IDs for comparisons. * Fix scope ID treatment in multicast socket option processing. Scope IDs passed in from userland will be ignored as other less ambiguous APIs exist for specifying the link. * Tighten userland input checks in IPv6 SSM delta and full-state ops. * Source filter embedded scope IDs need to be revisited, for now just clear them and ignore them on input. * Adapt KAME behaviour of looking up the scope ID in the default zone for multicast leaves, when the interface is ambiguous. mld6.c: * Tighten origin checks on MLD traffic as per RFC3810 Section 6.2: * ip6_src MAY be the unspecified address for MLDv1 reports. * ip6_src MAY have link-local address scope for MLDv1 reports, MLDv1 queries, and MLDv2 queries. * Perform address field validation *before* accepting queries. * Use KAME scope/zone ID in query/report processing. * Break const correctness for mld_v1_input_report(), mld_v1_input_query() as we temporarily modify the input mbuf chain. * Clear the scope ID before handoff to userland MLD daemon. * Fix MLDv1 old querier present timer processing. With the protocol defaults, hosts should revert to MLDv2 after 260s. * Add net.inet6.mld.v1enable sysctl, default to on. ifmcstat.c: * Use sysctl by default; -K requests kvm(3) if so compiled. mld.4: * Connect man page to build. Tested using PCS.
2009-05-27 18:57:13 +00:00
/*
* Use mbuf flags to propagate Router Alert option to
* ICMPv6 layer, as hop-by-hop options have been stripped.
*/
if (nxt == IPPROTO_ICMPV6 && rtalert != ~0)
m->m_flags |= M_RTALERT_MLD;
nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &off, nxt);
}
goto out;
bad:
m_freem(m);
out:
if (rin6.ro_rt)
RTFREE(rin6.ro_rt);
}
/*
* set/grab in6_ifaddr correspond to IPv6 destination address.
* XXX backward compatibility wrapper
*
* XXXRW: We should bump the refcount on ia6 before sticking it in the m_tag,
* and then bump it when the tag is copied, and release it when the tag is
* freed. Unfortunately, m_tags don't support deep copies (yet), so instead
* we just bump the ia refcount when we receive it. This should be fixed.
*/
2003-10-29 12:49:12 +00:00
static struct ip6aux *
ip6_setdstifaddr(struct mbuf *m, struct in6_ifaddr *ia6)
{
2003-10-29 12:49:12 +00:00
struct ip6aux *ip6a;
2003-10-29 12:49:12 +00:00
ip6a = ip6_addaux(m);
if (ip6a)
ip6a->ip6a_dstia6 = ia6;
return ip6a; /* NULL if failed to set */
}
struct in6_ifaddr *
ip6_getdstifaddr(struct mbuf *m)
{
2003-10-29 12:49:12 +00:00
struct ip6aux *ip6a;
struct in6_ifaddr *ia;
2003-10-29 12:49:12 +00:00
ip6a = ip6_findaux(m);
if (ip6a) {
ia = ip6a->ip6a_dstia6;
ifa_ref(&ia->ia_ifa);
return ia;
} else
return NULL;
}
/*
* Hop-by-Hop options header processing. If a valid jumbo payload option is
* included, the real payload length will be stored in plenp.
*
* rtalertp - XXX: should be stored more smart way
*/
static int
ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
struct mbuf **mp, int *offp)
{
struct mbuf *m = *mp;
int off = *offp, hbhlen;
struct ip6_hbh *hbh;
/* validation of the length of the header */
#ifndef PULLDOWN_TEST
IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), -1);
hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
hbhlen = (hbh->ip6h_len + 1) << 3;
IP6_EXTHDR_CHECK(m, off, hbhlen, -1);
hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
#else
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m,
sizeof(struct ip6_hdr), sizeof(struct ip6_hbh));
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
return -1;
}
hbhlen = (hbh->ip6h_len + 1) << 3;
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
hbhlen);
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
return -1;
}
#endif
off += hbhlen;
hbhlen -= sizeof(struct ip6_hbh);
if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh),
hbhlen, rtalertp, plenp) < 0)
return (-1);
*offp = off;
*mp = m;
return (0);
}
/*
* Search header for all Hop-by-hop options and process each option.
* This function is separate from ip6_hopopts_input() in order to
* handle a case where the sending node itself process its hop-by-hop
* options header. In such a case, the function is called from ip6_output().
*
* The function assumes that hbh header is located right after the IPv6 header
* (RFC2460 p7), opthead is pointer into data content in m, and opthead to
* opthead + hbhlen is located in contiguous memory region.
*/
int
ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
u_int32_t *rtalertp, u_int32_t *plenp)
{
struct ip6_hdr *ip6;
int optlen = 0;
u_int8_t *opt = opthead;
u_int16_t rtalert_val;
u_int32_t jumboplen;
const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh);
for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
switch (*opt) {
case IP6OPT_PAD1:
optlen = 1;
break;
case IP6OPT_PADN:
if (hbhlen < IP6OPT_MINLEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
optlen = *(opt + 1) + 2;
break;
case IP6OPT_ROUTER_ALERT:
/* XXX may need check for alignment */
if (hbhlen < IP6OPT_RTALERT_LEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) {
/* XXX stat */
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 1 - opthead);
return (-1);
}
optlen = IP6OPT_RTALERT_LEN;
bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2);
*rtalertp = ntohs(rtalert_val);
break;
case IP6OPT_JUMBO:
/* XXX may need check for alignment */
if (hbhlen < IP6OPT_JUMBO_LEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
/* XXX stat */
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 1 - opthead);
return (-1);
}
optlen = IP6OPT_JUMBO_LEN;
/*
* IPv6 packets that have non 0 payload length
* must not contain a jumbo payload option.
*/
ip6 = mtod(m, struct ip6_hdr *);
if (ip6->ip6_plen) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt - opthead);
return (-1);
}
/*
* We may see jumbolen in unaligned location, so
* we'd need to perform bcopy().
*/
bcopy(opt + 2, &jumboplen, sizeof(jumboplen));
jumboplen = (u_int32_t)htonl(jumboplen);
#if 1
/*
* if there are multiple jumbo payload options,
* *plenp will be non-zero and the packet will be
* rejected.
* the behavior may need some debate in ipngwg -
* multiple options does not make sense, however,
* there's no explicit mention in specification.
*/
if (*plenp != 0) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 2 - opthead);
return (-1);
}
#endif
/*
* jumbo payload length must be larger than 65535.
*/
if (jumboplen <= IPV6_MAXPACKET) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 2 - opthead);
return (-1);
}
*plenp = jumboplen;
break;
default: /* unknown option */
if (hbhlen < IP6OPT_MINLEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
optlen = ip6_unknown_opt(opt, m,
erroff + opt - opthead);
if (optlen == -1)
return (-1);
optlen += 2;
break;
}
}
return (0);
bad:
m_freem(m);
return (-1);
}
/*
* Unknown option processing.
* The third argument `off' is the offset from the IPv6 header to the option,
* which is necessary if the IPv6 header the and option header and IPv6 header
* is not contiguous in order to return an ICMPv6 error.
*/
int
ip6_unknown_opt(u_int8_t *optp, struct mbuf *m, int off)
{
struct ip6_hdr *ip6;
switch (IP6OPT_TYPE(*optp)) {
case IP6OPT_TYPE_SKIP: /* ignore the option */
return ((int)*(optp + 1));
case IP6OPT_TYPE_DISCARD: /* silently discard */
m_freem(m);
return (-1);
case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off);
return (-1);
case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */
IP6STAT_INC(ip6s_badoptions);
ip6 = mtod(m, struct ip6_hdr *);
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
(m->m_flags & (M_BCAST|M_MCAST)))
m_freem(m);
else
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_OPTION, off);
return (-1);
}
m_freem(m); /* XXX: NOTREACHED */
return (-1);
}
/*
* Create the "control" list for this pcb.
* These functions will not modify mbuf chain at all.
*
* With KAME mbuf chain restriction:
* The routine will be called from upper layer handlers like tcp6_input().
* Thus the routine assumes that the caller (tcp6_input) have already
* called IP6_EXTHDR_CHECK() and all the extension headers are located in the
* very first mbuf on the mbuf chain.
*
* ip6_savecontrol_v4 will handle those options that are possible to be
* set on a v4-mapped socket.
* ip6_savecontrol will directly call ip6_savecontrol_v4 to handle those
* options and handle the v6-only ones itself.
*/
struct mbuf **
ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
int *v4only)
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
#ifdef SO_TIMESTAMP
if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
struct timeval tv;
microtime(&tv);
*mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
SCM_TIMESTAMP, SOL_SOCKET);
if (*mp)
mp = &(*mp)->m_next;
}
#endif
#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y))
/* RFC 2292 sec. 5 */
if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
struct in6_pktinfo pi6;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
pi6.ipi6_addr.s6_addr32[0] = 0;
pi6.ipi6_addr.s6_addr32[1] = 0;
pi6.ipi6_addr.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
pi6.ipi6_addr.s6_addr32[3] = ip->ip_dst.s_addr;
#else
/* We won't hit this code */
bzero(&pi6.ipi6_addr, sizeof(struct in6_addr));
#endif
} else {
bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr));
in6_clearscope(&pi6.ipi6_addr); /* XXX */
}
pi6.ipi6_ifindex =
(m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0;
*mp = sbcreatecontrol((caddr_t) &pi6,
sizeof(struct in6_pktinfo),
IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) {
int hlim;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
hlim = ip->ip_ttl;
#else
/* We won't hit this code */
hlim = 0;
#endif
} else {
hlim = ip6->ip6_hlim & 0xff;
}
*mp = sbcreatecontrol((caddr_t) &hlim, sizeof(int),
IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if ((inp->inp_flags & IN6P_TCLASS) != 0) {
int tclass;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
tclass = ip->ip_tos;
#else
/* We won't hit this code */
tclass = 0;
#endif
} else {
u_int32_t flowinfo;
flowinfo = (u_int32_t)ntohl(ip6->ip6_flow & IPV6_FLOWINFO_MASK);
flowinfo >>= 20;
tclass = flowinfo & 0xff;
}
*mp = sbcreatecontrol((caddr_t) &tclass, sizeof(int),
IPV6_TCLASS, IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if (v4only != NULL) {
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
*v4only = 1;
} else {
*v4only = 0;
}
}
return (mp);
}
void
ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp)
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
int v4only = 0;
mp = ip6_savecontrol_v4(in6p, m, mp, &v4only);
if (v4only)
return;
/*
* IPV6_HOPOPTS socket option. Recall that we required super-user
* privilege for the option (see ip6_ctloutput), but it might be too
* strict, since there might be some hop-by-hop options which can be
* returned to normal user.
* See also RFC 2292 section 6 (or RFC 3542 section 8).
*/
if ((in6p->inp_flags & IN6P_HOPOPTS) != 0) {
/*
* Check if a hop-by-hop options header is contatined in the
* received packet, and if so, store the options as ancillary
* data. Note that a hop-by-hop options header must be
* just after the IPv6 header, which is assured through the
* IPv6 input processing.
*/
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
struct ip6_hbh *hbh;
int hbhlen = 0;
#ifdef PULLDOWN_TEST
struct mbuf *ext;
#endif
#ifndef PULLDOWN_TEST
hbh = (struct ip6_hbh *)(ip6 + 1);
hbhlen = (hbh->ip6h_len + 1) << 3;
#else
ext = ip6_pullexthdr(m, sizeof(struct ip6_hdr),
ip6->ip6_nxt);
if (ext == NULL) {
IP6STAT_INC(ip6s_tooshort);
return;
}
hbh = mtod(ext, struct ip6_hbh *);
hbhlen = (hbh->ip6h_len + 1) << 3;
if (hbhlen != ext->m_len) {
m_freem(ext);
IP6STAT_INC(ip6s_tooshort);
return;
}
#endif
/*
* XXX: We copy the whole header even if a
* jumbo payload option is included, the option which
* is to be removed before returning according to
* RFC2292.
* Note: this constraint is removed in RFC3542
*/
*mp = sbcreatecontrol((caddr_t)hbh, hbhlen,
IS2292(in6p, IPV6_2292HOPOPTS, IPV6_HOPOPTS),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
#ifdef PULLDOWN_TEST
m_freem(ext);
#endif
}
}
if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) {
int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr);
/*
* Search for destination options headers or routing
* header(s) through the header chain, and stores each
* header as ancillary data.
* Note that the order of the headers remains in
* the chain of ancillary data.
*/
while (1) { /* is explicit loop prevention necessary? */
struct ip6_ext *ip6e = NULL;
int elen;
#ifdef PULLDOWN_TEST
struct mbuf *ext = NULL;
#endif
/*
* if it is not an extension header, don't try to
* pull it from the chain.
*/
switch (nxt) {
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
goto loopend;
}
#ifndef PULLDOWN_TEST
if (off + sizeof(*ip6e) > m->m_len)
goto loopend;
ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off);
if (nxt == IPPROTO_AH)
elen = (ip6e->ip6e_len + 2) << 2;
else
elen = (ip6e->ip6e_len + 1) << 3;
if (off + elen > m->m_len)
goto loopend;
#else
ext = ip6_pullexthdr(m, off, nxt);
if (ext == NULL) {
IP6STAT_INC(ip6s_tooshort);
return;
}
ip6e = mtod(ext, struct ip6_ext *);
if (nxt == IPPROTO_AH)
elen = (ip6e->ip6e_len + 2) << 2;
else
elen = (ip6e->ip6e_len + 1) << 3;
if (elen != ext->m_len) {
m_freem(ext);
IP6STAT_INC(ip6s_tooshort);
return;
}
#endif
switch (nxt) {
case IPPROTO_DSTOPTS:
if (!(in6p->inp_flags & IN6P_DSTOPTS))
break;
*mp = sbcreatecontrol((caddr_t)ip6e, elen,
IS2292(in6p,
IPV6_2292DSTOPTS, IPV6_DSTOPTS),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
break;
case IPPROTO_ROUTING:
if (!(in6p->inp_flags & IN6P_RTHDR))
break;
*mp = sbcreatecontrol((caddr_t)ip6e, elen,
IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
break;
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
/*
* other cases have been filtered in the above.
* none will visit this case. here we supply
* the code just in case (nxt overwritten or
* other cases).
*/
#ifdef PULLDOWN_TEST
m_freem(ext);
#endif
goto loopend;
}
/* proceed with the next header. */
off += elen;
nxt = ip6e->ip6e_nxt;
ip6e = NULL;
#ifdef PULLDOWN_TEST
m_freem(ext);
ext = NULL;
#endif
}
loopend:
;
}
}
#undef IS2292
void
ip6_notify_pmtu(struct inpcb *in6p, struct sockaddr_in6 *dst, u_int32_t *mtu)
{
struct socket *so;
struct mbuf *m_mtu;
struct ip6_mtuinfo mtuctl;
so = in6p->inp_socket;
if (mtu == NULL)
return;
#ifdef DIAGNOSTIC
if (so == NULL) /* I believe this is impossible */
panic("ip6_notify_pmtu: socket is NULL");
#endif
bzero(&mtuctl, sizeof(mtuctl)); /* zero-clear for safety */
mtuctl.ip6m_mtu = *mtu;
mtuctl.ip6m_addr = *dst;
if (sa6_recoverscope(&mtuctl.ip6m_addr))
return;
if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof(mtuctl),
IPV6_PATHMTU, IPPROTO_IPV6)) == NULL)
return;
if (sbappendaddr(&so->so_rcv, (struct sockaddr *)dst, NULL, m_mtu)
== 0) {
m_freem(m_mtu);
/* XXX: should count statistics */
} else
sorwakeup(so);
return;
}
#ifdef PULLDOWN_TEST
/*
* pull single extension header from mbuf chain. returns single mbuf that
* contains the result, or NULL on error.
*/
static struct mbuf *
ip6_pullexthdr(struct mbuf *m, size_t off, int nxt)
{
struct ip6_ext ip6e;
size_t elen;
struct mbuf *n;
#ifdef DIAGNOSTIC
switch (nxt) {
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
printf("ip6_pullexthdr: invalid nxt=%d\n", nxt);
}
#endif
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxt == IPPROTO_AH)
elen = (ip6e.ip6e_len + 2) << 2;
else
elen = (ip6e.ip6e_len + 1) << 3;
if (elen > MLEN)
n = m_getcl(M_NOWAIT, MT_DATA, 0);
else
n = m_get(M_NOWAIT, MT_DATA);
if (n == NULL)
return NULL;
m_copydata(m, off, elen, mtod(n, caddr_t));
n->m_len = elen;
return n;
}
#endif
/*
* Get pointer to the previous header followed by the header
* currently processed.
* XXX: This function supposes that
* M includes all headers,
* the next header field and the header length field of each header
* are valid, and
* the sum of each header length equals to OFF.
* Because of these assumptions, this function must be called very
* carefully. Moreover, it will not be used in the near future when
* we develop `neater' mechanism to process extension headers.
*/
char *
ip6_get_prevhdr(struct mbuf *m, int off)
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
if (off == sizeof(struct ip6_hdr))
return (&ip6->ip6_nxt);
else {
int len, nxt;
struct ip6_ext *ip6e = NULL;
nxt = ip6->ip6_nxt;
len = sizeof(struct ip6_hdr);
while (len < off) {
ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len);
switch (nxt) {
case IPPROTO_FRAGMENT:
len += sizeof(struct ip6_frag);
break;
case IPPROTO_AH:
len += (ip6e->ip6e_len + 2) << 2;
break;
default:
len += (ip6e->ip6e_len + 1) << 3;
break;
}
nxt = ip6e->ip6e_nxt;
}
if (ip6e)
return (&ip6e->ip6e_nxt);
else
return NULL;
}
}
/*
* get next header offset. m will be retained.
*/
int
ip6_nexthdr(struct mbuf *m, int off, int proto, int *nxtp)
{
struct ip6_hdr ip6;
struct ip6_ext ip6e;
struct ip6_frag fh;
/* just in case */
if (m == NULL)
panic("ip6_nexthdr: m == NULL");
if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off)
return -1;
switch (proto) {
case IPPROTO_IPV6:
if (m->m_pkthdr.len < off + sizeof(ip6))
return -1;
m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6);
if (nxtp)
*nxtp = ip6.ip6_nxt;
off += sizeof(ip6);
return off;
case IPPROTO_FRAGMENT:
/*
* terminate parsing if it is not the first fragment,
* it does not make sense to parse through it.
*/
if (m->m_pkthdr.len < off + sizeof(fh))
return -1;
m_copydata(m, off, sizeof(fh), (caddr_t)&fh);
/* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */
if (fh.ip6f_offlg & IP6F_OFF_MASK)
return -1;
if (nxtp)
*nxtp = fh.ip6f_nxt;
off += sizeof(struct ip6_frag);
return off;
case IPPROTO_AH:
if (m->m_pkthdr.len < off + sizeof(ip6e))
return -1;
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxtp)
*nxtp = ip6e.ip6e_nxt;
off += (ip6e.ip6e_len + 2) << 2;
return off;
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
if (m->m_pkthdr.len < off + sizeof(ip6e))
return -1;
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxtp)
*nxtp = ip6e.ip6e_nxt;
off += (ip6e.ip6e_len + 1) << 3;
return off;
case IPPROTO_NONE:
case IPPROTO_ESP:
case IPPROTO_IPCOMP:
/* give up */
return -1;
default:
return -1;
}
return -1;
}
/*
* get offset for the last header in the chain. m will be kept untainted.
*/
int
ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp)
{
int newoff;
int nxt;
if (!nxtp) {
nxt = -1;
nxtp = &nxt;
}
while (1) {
newoff = ip6_nexthdr(m, off, proto, nxtp);
if (newoff < 0)
return off;
else if (newoff < off)
return -1; /* invalid */
else if (newoff == off)
return newoff;
off = newoff;
proto = *nxtp;
}
}
static struct ip6aux *
ip6_addaux(struct mbuf *m)
{
struct m_tag *mtag;
mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
if (!mtag) {
mtag = m_tag_get(PACKET_TAG_IPV6_INPUT, sizeof(struct ip6aux),
M_NOWAIT);
if (mtag) {
m_tag_prepend(m, mtag);
bzero(mtag + 1, sizeof(struct ip6aux));
}
}
2003-10-29 12:49:12 +00:00
return mtag ? (struct ip6aux *)(mtag + 1) : NULL;
}
static struct ip6aux *
ip6_findaux(struct mbuf *m)
{
struct m_tag *mtag;
mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
2003-10-29 12:49:12 +00:00
return mtag ? (struct ip6aux *)(mtag + 1) : NULL;
}
static void
ip6_delaux(struct mbuf *m)
{
struct m_tag *mtag;
mtag = m_tag_find(m, PACKET_TAG_IPV6_INPUT, NULL);
if (mtag)
m_tag_delete(m, mtag);
}
/*
* System control for IP6
*/
u_char inet6ctlerrmap[PRC_NCMDS] = {
0, 0, 0, 0,
0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
EMSGSIZE, EHOSTUNREACH, 0, 0,
0, 0, 0, 0,
ENOPROTOOPT
};