Bug 729511: Import usrsctp library rev 8279 rs=biesi
authorRandell Jesup <rjesup@jesup.org>
Wed, 03 Oct 2012 03:22:43 -0400
changeset 115888 9fb243800b26203a8548abc5e7905c183dcd2355
parent 115887 86bf1e4e8de779f4b8829e8efe4f20392b65ff6f
child 115889 d85136cf566a0bf4d90ab636e35a8c9f1477fb24
push id239
push userakeybl@mozilla.com
push dateThu, 03 Jan 2013 21:54:43 +0000
treeherdermozilla-release@3a7b66445659 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbiesi
bugs729511
milestone18.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 729511: Import usrsctp library rev 8279 rs=biesi
netwerk/sctp/src/netinet/sctp.h
netwerk/sctp/src/netinet/sctp_asconf.c
netwerk/sctp/src/netinet/sctp_asconf.h
netwerk/sctp/src/netinet/sctp_auth.c
netwerk/sctp/src/netinet/sctp_auth.h
netwerk/sctp/src/netinet/sctp_bsd_addr.c
netwerk/sctp/src/netinet/sctp_bsd_addr.h
netwerk/sctp/src/netinet/sctp_callout.c
netwerk/sctp/src/netinet/sctp_callout.h
netwerk/sctp/src/netinet/sctp_cc_functions.c
netwerk/sctp/src/netinet/sctp_constants.h
netwerk/sctp/src/netinet/sctp_crc32.c
netwerk/sctp/src/netinet/sctp_crc32.h
netwerk/sctp/src/netinet/sctp_dtrace_declare.h
netwerk/sctp/src/netinet/sctp_dtrace_define.h
netwerk/sctp/src/netinet/sctp_hashdriver.c
netwerk/sctp/src/netinet/sctp_hashdriver.h
netwerk/sctp/src/netinet/sctp_header.h
netwerk/sctp/src/netinet/sctp_indata.c
netwerk/sctp/src/netinet/sctp_indata.h
netwerk/sctp/src/netinet/sctp_input.c
netwerk/sctp/src/netinet/sctp_input.h
netwerk/sctp/src/netinet/sctp_lock_userspace.h
netwerk/sctp/src/netinet/sctp_os.h
netwerk/sctp/src/netinet/sctp_os_userspace.h
netwerk/sctp/src/netinet/sctp_output.c
netwerk/sctp/src/netinet/sctp_output.h
netwerk/sctp/src/netinet/sctp_pcb.c
netwerk/sctp/src/netinet/sctp_pcb.h
netwerk/sctp/src/netinet/sctp_peeloff.c
netwerk/sctp/src/netinet/sctp_peeloff.h
netwerk/sctp/src/netinet/sctp_process_lock.h
netwerk/sctp/src/netinet/sctp_sha1.c
netwerk/sctp/src/netinet/sctp_sha1.h
netwerk/sctp/src/netinet/sctp_ss_functions.c
netwerk/sctp/src/netinet/sctp_structs.h
netwerk/sctp/src/netinet/sctp_sysctl.c
netwerk/sctp/src/netinet/sctp_sysctl.h
netwerk/sctp/src/netinet/sctp_timer.c
netwerk/sctp/src/netinet/sctp_timer.h
netwerk/sctp/src/netinet/sctp_uio.h
netwerk/sctp/src/netinet/sctp_userspace.c
netwerk/sctp/src/netinet/sctp_usrreq.c
netwerk/sctp/src/netinet/sctp_var.h
netwerk/sctp/src/netinet/sctputil.c
netwerk/sctp/src/netinet/sctputil.h
netwerk/sctp/src/netinet6/sctp6_usrreq.c
netwerk/sctp/src/netinet6/sctp6_var.h
netwerk/sctp/src/user_atomic.h
netwerk/sctp/src/user_environment.c
netwerk/sctp/src/user_environment.h
netwerk/sctp/src/user_inpcb.h
netwerk/sctp/src/user_ip6_var.h
netwerk/sctp/src/user_ip_icmp.h
netwerk/sctp/src/user_malloc.h
netwerk/sctp/src/user_mbuf.c
netwerk/sctp/src/user_mbuf.h
netwerk/sctp/src/user_queue.h
netwerk/sctp/src/user_recv_thread.c
netwerk/sctp/src/user_recv_thread.h
netwerk/sctp/src/user_route.h
netwerk/sctp/src/user_sctp_timer_iterate.c
netwerk/sctp/src/user_socket.c
netwerk/sctp/src/user_socketvar.h
netwerk/sctp/src/user_uma.h
netwerk/sctp/src/usrsctp.h
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp.h
@@ -0,0 +1,628 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 235990 2012-05-25 11:14:08Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_H_
+#define _NETINET_SCTP_H_
+
+#if (defined(__APPLE__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Darwin))
+#include <stdint.h>
+#endif
+
+#include <sys/types.h>
+
+
+#if !defined(__Userspace_os_Windows)
+#define SCTP_PACKED __attribute__((packed))
+#else
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#endif
+
+/*
+ * SCTP protocol - RFC2960.
+ */
+struct sctphdr {
+	uint16_t src_port;	/* source port */
+	uint16_t dest_port;	/* destination port */
+	uint32_t v_tag;		/* verification tag of packet */
+	uint32_t checksum;	/* Adler32 C-Sum */
+	/* chunks follow... */
+} SCTP_PACKED;
+
+/*
+ * SCTP Chunks
+ */
+struct sctp_chunkhdr {
+	uint8_t chunk_type;	/* chunk type */
+	uint8_t chunk_flags;	/* chunk flags */
+	uint16_t chunk_length;	/* chunk length */
+	/* optional params follow */
+} SCTP_PACKED;
+
+/*
+ * SCTP chunk parameters
+ */
+struct sctp_paramhdr {
+	uint16_t param_type;	/* parameter type */
+	uint16_t param_length;	/* parameter length */
+} SCTP_PACKED;
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO			0x00000001
+#define SCTP_ASSOCINFO			0x00000002
+#define SCTP_INITMSG			0x00000003
+#define SCTP_NODELAY			0x00000004
+#define SCTP_AUTOCLOSE			0x00000005
+#define SCTP_SET_PEER_PRIMARY_ADDR	0x00000006
+#define SCTP_PRIMARY_ADDR		0x00000007
+#define SCTP_ADAPTATION_LAYER		0x00000008
+/* same as above */
+#define SCTP_ADAPTION_LAYER		0x00000008
+#define SCTP_DISABLE_FRAGMENTS		0x00000009
+#define SCTP_PEER_ADDR_PARAMS 		0x0000000a
+#define SCTP_DEFAULT_SEND_PARAM		0x0000000b
+/* ancillary data/notification interest options */
+#define SCTP_EVENTS			0x0000000c /* deprecated */
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR	0x0000000d
+#define SCTP_MAXSEG 			0x0000000e
+#define SCTP_DELAYED_SACK               0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE        0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT     0x00000011
+/* authentication support */
+#define SCTP_AUTH_CHUNK 		0x00000012
+#define SCTP_AUTH_KEY 			0x00000013
+#define SCTP_HMAC_IDENT 		0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 		0x00000015
+#define SCTP_AUTH_DELETE_KEY 		0x00000016
+#define SCTP_USE_EXT_RCVINFO		0x00000017
+#define SCTP_AUTO_ASCONF		0x00000018 /* rw */
+#define SCTP_MAXBURST			0x00000019 /* rw */
+#define SCTP_MAX_BURST			0x00000019 /* rw */
+/* assoc level context */
+#define SCTP_CONTEXT                    0x0000001a /* rw */
+/* explicit EOR signalling */
+#define SCTP_EXPLICIT_EOR               0x0000001b
+#define SCTP_REUSE_PORT                 0x0000001c /* rw */
+#define SCTP_AUTH_DEACTIVATE_KEY	0x0000001d
+#define SCTP_EVENT                      0x0000001e
+#define SCTP_RECVRCVINFO                0x0000001f
+#define SCTP_RECVNXTINFO                0x00000020
+#define SCTP_DEFAULT_SNDINFO            0x00000021
+#define SCTP_DEFAULT_PRINFO             0x00000022
+#define SCTP_PEER_ADDR_THLDS            0x00000023
+#define SCTP_REMOTE_UDP_ENCAPS_PORT     0x00000024
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS			0x00000100
+#define SCTP_GET_PEER_ADDR_INFO		0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 		0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 		0x00000103
+#define SCTP_GET_ASSOC_NUMBER           0x00000104 /* ro */
+#define SCTP_GET_ASSOC_ID_LIST          0x00000105 /* ro */
+#define SCTP_TIMEOUTS                   0x00000106
+
+/*
+ * user socket options: BSD implementation specific
+ */
+/*
+ * Blocking I/O is enabled on any TCP type socket by default. For the UDP
+ * model if this is turned on then the socket buffer is shared for send
+ * resources amongst all associations.  The default for the UDP model is that
+ * is SS_NBIO is set.  Which means all associations have a separate send
+ * limit BUT they will NOT ever BLOCK instead you will get an error back
+ * EAGAIN if you try to send too much. If you want the blocking semantics you
+ * set this option at the cost of sharing one socket send buffer size amongst
+ * all associations. Peeled off sockets turn this option off and block. But
+ * since both TCP and peeled off sockets have only one assoc per socket this
+ * is fine. It probably does NOT make sense to set this on SS_NBIO on a TCP
+ * model OR peeled off UDP model, but we do allow you to do so. You just use
+ * the normal syscall to toggle SS_NBIO the way you want.
+ *
+ * Blocking I/O is controlled by the SS_NBIO flag on the socket state so_state
+ * field.
+ */
+
+#define SCTP_ENABLE_STREAM_RESET	0x00000900 /* struct sctp_assoc_value */
+#define SCTP_RESET_STREAMS		0x00000901 /* struct sctp_reset_streams */
+#define SCTP_RESET_ASSOC		0x00000902 /* sctp_assoc_t */
+#define SCTP_ADD_STREAMS		0x00000903 /* struct sctp_add_streams */
+
+/* For enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ 	0x00000001
+#define SCTP_ENABLE_RESET_ASSOC_REQ 	0x00000002
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ 	0x00000004
+#define SCTP_ENABLE_VALUE_MASK		0x00000007
+/* For reset streams */
+#define SCTP_STREAM_RESET_INCOMING	0x00000001
+#define SCTP_STREAM_RESET_OUTGOING	0x00000002
+
+
+/* here on down are more implementation specific */
+#define SCTP_SET_DEBUG_LEVEL		0x00001005
+#define SCTP_CLR_STAT_LOG               0x00001007
+/* CMT ON/OFF socket option */
+#define SCTP_CMT_ON_OFF                 0x00001200
+#define SCTP_CMT_USE_DAC                0x00001201
+/* JRS - Pluggable Congestion Control Socket option */
+#define SCTP_PLUGGABLE_CC               0x00001202
+/* RS - Pluggable Stream Scheduling Socket option */
+#define SCTP_PLUGGABLE_SS		0x00001203
+#define SCTP_SS_VALUE			0x00001204
+#define SCTP_CC_OPTION			0x00001205 /* Options for CC modules */
+/* read only */
+#define SCTP_GET_SNDBUF_USE		0x00001101
+#define SCTP_GET_STAT_LOG		0x00001103
+#define SCTP_PCB_STATUS			0x00001104
+#define SCTP_GET_NONCE_VALUES           0x00001105
+
+
+/* Special hook for dynamically setting primary for all assoc's,
+ * this is a write only option that requires root privilege.
+ */
+#define SCTP_SET_DYNAMIC_PRIMARY        0x00002001
+
+/* VRF (virtual router feature) and multi-VRF support
+ * options. VRF's provide splits within a router
+ * that give the views of multiple routers. A
+ * standard host, without VRF support, is just
+ * a single VRF. If VRF's are supported then
+ * the transport must be VRF aware. This means
+ * that every socket call coming in must be directed
+ * within the endpoint to one of the VRF's it belongs
+ * to. The endpoint, before binding, may select
+ * the "default" VRF it is in by using a set socket
+ * option with SCTP_VRF_ID. This will also
+ * get propagated to the default VRF. Once the
+ * endpoint binds an address then it CANNOT add
+ * additional VRF's to become a Multi-VRF endpoint.
+ *
+ * Before BINDING additional VRF's can be added with
+ * the SCTP_ADD_VRF_ID call or deleted with
+ * SCTP_DEL_VRF_ID.
+ *
+ * Associations are ALWAYS contained inside a single
+ * VRF. They cannot reside in two (or more) VRF's. Incoming
+ * packets, assuming the router is VRF aware, can always
+ * tell us what VRF they arrived on. A host not supporting
+ * any VRF's will find that the packets always arrived on the
+ * single VRF that the host has.
+ *
+ */
+
+#define SCTP_VRF_ID			0x00003001
+#define SCTP_ADD_VRF_ID			0x00003002
+#define SCTP_GET_VRF_IDS		0x00003003
+#define SCTP_GET_ASOC_VRF               0x00003004
+#define SCTP_DEL_VRF_ID                 0x00003005
+
+/*
+ * If you enable packet logging you can get
+ * a poor mans ethereal output in binary
+ * form. Note this is a compile option to
+ * the kernel,  SCTP_PACKET_LOGGING, and
+ * without it in your kernel you
+ * will get a EOPNOTSUPP
+ */
+#define SCTP_GET_PACKET_LOG             0x00004001
+
+/*
+ * hidden implementation specific options these are NOT user visible (should
+ * move out of sctp.h)
+ */
+/* sctp_bindx() flags as hidden socket options */
+#define SCTP_BINDX_ADD_ADDR		0x00008001
+#define SCTP_BINDX_REM_ADDR		0x00008002
+/* Hidden socket option that gets the addresses */
+#define SCTP_GET_PEER_ADDRESSES		0x00008003
+#define SCTP_GET_LOCAL_ADDRESSES	0x00008004
+/* return the total count in bytes needed to hold all local addresses bound */
+#define SCTP_GET_LOCAL_ADDR_SIZE	0x00008005
+/* Return the total count in bytes needed to hold the remote address */
+#define SCTP_GET_REMOTE_ADDR_SIZE	0x00008006
+/* hidden option for connectx */
+#define SCTP_CONNECT_X			0x00008007
+/* hidden option for connectx_delayed, part of sendx */
+#define SCTP_CONNECT_X_DELAYED		0x00008008
+#define SCTP_CONNECT_X_COMPLETE         0x00008009
+/* hidden socket option based sctp_peeloff */
+#define SCTP_PEELOFF                    0x0000800a
+/* the real worker for sctp_getaddrlen() */
+#define SCTP_GET_ADDR_LEN               0x0000800b
+#if defined(__APPLE__)
+/* temporary workaround for Apple listen() issue, no args used */
+#define SCTP_LISTEN_FIX			0x0000800c
+#endif
+#if defined(__Windows__)
+/* workaround for Cygwin on Windows: returns the SOCKET handle */
+#define SCTP_GET_HANDLE			0x0000800d
+#endif
+/* Debug things that need to be purged */
+#define SCTP_SET_INITIAL_DBG_SEQ	0x00009f00
+
+/* JRS - Supported congestion control modules for pluggable
+ * congestion control
+ */
+/* Standard TCP Congestion Control */
+#define SCTP_CC_RFC2581		0x00000000
+/* High Speed TCP Congestion Control (Floyd) */
+#define SCTP_CC_HSTCP		0x00000001
+/* HTCP Congestion Control */
+#define SCTP_CC_HTCP		0x00000002
+/* RTCC Congestion Control - RFC2581 plus */
+#define SCTP_CC_RTCC            0x00000003
+
+#define SCTP_CC_OPT_RTCC_SETMODE	0x00002000
+#define SCTP_CC_OPT_USE_DCCC_ECN	0x00002001
+#define SCTP_CC_OPT_STEADY_STEP         0x00002002
+
+#define SCTP_CMT_OFF            0
+#define SCTP_CMT_BASE           1
+#define SCTP_CMT_RPV1           2
+#define SCTP_CMT_RPV2           3
+#define SCTP_CMT_MPTCP          4
+#define SCTP_CMT_MAX            SCTP_CMT_MPTCP
+
+/* RS - Supported stream scheduling modules for pluggable
+ * stream scheduling
+ */
+/* Default simple round-robin */
+#define SCTP_SS_DEFAULT			0x00000000
+/* Real round-robin */
+#define SCTP_SS_ROUND_ROBIN		0x00000001
+/* Real round-robin per packet */
+#define SCTP_SS_ROUND_ROBIN_PACKET	0x00000002
+/* Priority */
+#define SCTP_SS_PRIORITY		0x00000003
+/* Fair Bandwidth */
+#define SCTP_SS_FAIR_BANDWITH		0x00000004
+/* First-come, first-serve */
+#define SCTP_SS_FIRST_COME		0x00000005
+
+
+/* fragment interleave constants
+ * setting must be one of these or
+ * EINVAL returned.
+ */
+#define SCTP_FRAG_LEVEL_0    0x00000000
+#define SCTP_FRAG_LEVEL_1    0x00000001
+#define SCTP_FRAG_LEVEL_2    0x00000002
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED			0x0000
+#define SCTP_BOUND			0x1000
+#define SCTP_LISTEN			0x2000
+#define SCTP_COOKIE_WAIT		0x0002
+#define SCTP_COOKIE_ECHOED		0x0004
+#define SCTP_ESTABLISHED		0x0008
+#define SCTP_SHUTDOWN_SENT		0x0010
+#define SCTP_SHUTDOWN_RECEIVED		0x0020
+#define SCTP_SHUTDOWN_ACK_SENT		0x0040
+#define SCTP_SHUTDOWN_PENDING		0x0080
+
+/*
+ * SCTP operational error codes (user visible)
+ */
+#define SCTP_CAUSE_NO_ERROR		0x0000
+#define SCTP_CAUSE_INVALID_STREAM	0x0001
+#define SCTP_CAUSE_MISSING_PARAM	0x0002
+#define SCTP_CAUSE_STALE_COOKIE		0x0003
+#define SCTP_CAUSE_OUT_OF_RESC		0x0004
+#define SCTP_CAUSE_UNRESOLVABLE_ADDR	0x0005
+#define SCTP_CAUSE_UNRECOG_CHUNK	0x0006
+#define SCTP_CAUSE_INVALID_PARAM	0x0007
+#define SCTP_CAUSE_UNRECOG_PARAM	0x0008
+#define SCTP_CAUSE_NO_USER_DATA		0x0009
+#define SCTP_CAUSE_COOKIE_IN_SHUTDOWN	0x000a
+#define SCTP_CAUSE_RESTART_W_NEWADDR	0x000b
+#define SCTP_CAUSE_USER_INITIATED_ABT	0x000c
+#define SCTP_CAUSE_PROTOCOL_VIOLATION	0x000d
+
+/* Error causes from RFC5061 */
+#define SCTP_CAUSE_DELETING_LAST_ADDR	0x00a0
+#define SCTP_CAUSE_RESOURCE_SHORTAGE	0x00a1
+#define SCTP_CAUSE_DELETING_SRC_ADDR	0x00a2
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK	0x00a3
+#define SCTP_CAUSE_REQUEST_REFUSED	0x00a4
+
+/* Error causes from nat-draft */
+#define SCTP_CAUSE_NAT_COLLIDING_STATE  0x00b0
+#define SCTP_CAUSE_NAT_MISSING_STATE    0x00b1
+
+/* Error causes from RFC4895 */
+#define SCTP_CAUSE_UNSUPPORTED_HMACID	0x0105
+
+/*
+ * error cause parameters (user visible)
+ */
+struct sctp_error_cause {
+	uint16_t code;
+	uint16_t length;
+	/* optional cause-specific info may follow */
+} SCTP_PACKED;
+
+struct sctp_error_invalid_stream {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_INVALID_STREAM */
+	uint16_t stream_id;	/* stream id of the DATA in error */
+	uint16_t reserved;
+} SCTP_PACKED;
+
+struct sctp_error_missing_param {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_MISSING_PARAM */
+	uint32_t num_missing_params;	/* number of missing parameters */
+	/* uint16_t param_type's follow */
+} SCTP_PACKED;
+
+struct sctp_error_stale_cookie {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_STALE_COOKIE */
+	uint32_t stale_time;	/* time in usec of staleness */
+} SCTP_PACKED;
+
+struct sctp_error_out_of_resource {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_OUT_OF_RESOURCES */
+} SCTP_PACKED;
+
+struct sctp_error_unresolv_addr {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_UNRESOLVABLE_ADDR */
+
+} SCTP_PACKED;
+
+struct sctp_error_unrecognized_chunk {
+	struct sctp_error_cause cause;	/* code=SCTP_ERROR_UNRECOG_CHUNK */
+	struct sctp_chunkhdr ch;/* header from chunk in error */
+} SCTP_PACKED;
+
+/*
+ * Main SCTP chunk types we place these here so natd and f/w's in user land
+ * can find them.
+ */
+/************0x00 series ***********/
+#define SCTP_DATA		0x00
+#define SCTP_INITIATION		0x01
+#define SCTP_INITIATION_ACK	0x02
+#define SCTP_SELECTIVE_ACK	0x03
+#define SCTP_HEARTBEAT_REQUEST	0x04
+#define SCTP_HEARTBEAT_ACK	0x05
+#define SCTP_ABORT_ASSOCIATION	0x06
+#define SCTP_SHUTDOWN		0x07
+#define SCTP_SHUTDOWN_ACK	0x08
+#define SCTP_OPERATION_ERROR	0x09
+#define SCTP_COOKIE_ECHO	0x0a
+#define SCTP_COOKIE_ACK		0x0b
+#define SCTP_ECN_ECHO		0x0c
+#define SCTP_ECN_CWR		0x0d
+#define SCTP_SHUTDOWN_COMPLETE	0x0e
+/* RFC4895 */
+#define SCTP_AUTHENTICATION     0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK 0x10
+/************0x40 series ***********/
+/************0x80 series ***********/
+/* RFC5061 */
+#define	SCTP_ASCONF_ACK		0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED	0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET       0x82
+
+/* RFC4820                         */
+#define SCTP_PAD_CHUNK          0x84
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN	0xc0
+/* RFC5061 */
+#define SCTP_ASCONF		0xc1
+
+
+/* ABORT and SHUTDOWN COMPLETE FLAG */
+#define SCTP_HAD_NO_TCB		0x01
+
+/* Packet dropped flags */
+#define SCTP_FROM_MIDDLE_BOX	SCTP_HAD_NO_TCB
+#define SCTP_BADCRC		0x02
+#define SCTP_PACKET_TRUNCATED	0x04
+
+/* Flag for ECN -CWR */
+#define SCTP_CWR_REDUCE_OVERRIDE 0x01
+#define SCTP_CWR_IN_SAME_WINDOW  0x02
+
+#define SCTP_SAT_NETWORK_MIN	400	/* min ms for RTT to set satellite
+					 * time */
+#define SCTP_SAT_NETWORK_BURST_INCR  2	/* how many times to multiply maxburst
+					 * in sat */
+
+/* Data Chuck Specific Flags */
+#define SCTP_DATA_FRAG_MASK        0x03
+#define SCTP_DATA_MIDDLE_FRAG      0x00
+#define SCTP_DATA_LAST_FRAG        0x01
+#define SCTP_DATA_FIRST_FRAG       0x02
+#define SCTP_DATA_NOT_FRAG         0x03
+#define SCTP_DATA_UNORDERED        0x04
+#define SCTP_DATA_SACK_IMMEDIATELY 0x08
+/* ECN Nonce: SACK Chunk Specific Flags */
+#define SCTP_SACK_NONCE_SUM        0x01
+
+/* CMT DAC algorithm SACK flag */
+#define SCTP_SACK_CMT_DAC          0x80
+
+/*
+ * PCB flags (in sctp_flags bitmask).
+ * Note the features and flags are meant
+ * for use by netstat.
+ */
+#define SCTP_PCB_FLAGS_UDPTYPE		0x00000001
+#define SCTP_PCB_FLAGS_TCPTYPE		0x00000002
+#define SCTP_PCB_FLAGS_BOUNDALL		0x00000004
+#define SCTP_PCB_FLAGS_ACCEPTING	0x00000008
+#define SCTP_PCB_FLAGS_UNBOUND		0x00000010
+#define SCTP_PCB_FLAGS_CLOSE_IP         0x00040000
+#define SCTP_PCB_FLAGS_WAS_CONNECTED    0x00080000
+#define SCTP_PCB_FLAGS_WAS_ABORTED      0x00100000
+/* TCP model support */
+
+#define SCTP_PCB_FLAGS_CONNECTED	0x00200000
+#define SCTP_PCB_FLAGS_IN_TCPPOOL	0x00400000
+#define SCTP_PCB_FLAGS_DONT_WAKE	0x00800000
+#define SCTP_PCB_FLAGS_WAKEOUTPUT	0x01000000
+#define SCTP_PCB_FLAGS_WAKEINPUT	0x02000000
+#define SCTP_PCB_FLAGS_BOUND_V6		0x04000000
+#define SCTP_PCB_FLAGS_BLOCKING_IO	0x08000000
+#define SCTP_PCB_FLAGS_SOCKET_GONE	0x10000000
+#define SCTP_PCB_FLAGS_SOCKET_ALLGONE	0x20000000
+#define SCTP_PCB_FLAGS_SOCKET_CANT_READ	0x40000000
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS		(SCTP_PCB_FLAGS_BOUNDALL|\
+					 SCTP_PCB_FLAGS_WAKEINPUT|\
+					 SCTP_PCB_FLAGS_BOUND_V6)
+
+
+/*
+ * PCB Features (in sctp_features bitmask)
+ */
+#define SCTP_PCB_FLAGS_DO_NOT_PMTUD      0x00000001
+#define SCTP_PCB_FLAGS_EXT_RCVINFO       0x00000002 /* deprecated */
+#define SCTP_PCB_FLAGS_DONOT_HEARTBEAT   0x00000004
+#define SCTP_PCB_FLAGS_FRAG_INTERLEAVE   0x00000008
+#define SCTP_PCB_FLAGS_INTERLEAVE_STRMS  0x00000010
+#define SCTP_PCB_FLAGS_DO_ASCONF         0x00000020
+#define SCTP_PCB_FLAGS_AUTO_ASCONF       0x00000040
+#define SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE  0x00000080
+/* socket options */
+#define SCTP_PCB_FLAGS_NODELAY           0x00000100
+#define SCTP_PCB_FLAGS_AUTOCLOSE         0x00000200
+#define SCTP_PCB_FLAGS_RECVDATAIOEVNT    0x00000400 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVASSOCEVNT     0x00000800
+#define SCTP_PCB_FLAGS_RECVPADDREVNT     0x00001000
+#define SCTP_PCB_FLAGS_RECVPEERERR       0x00002000
+#define SCTP_PCB_FLAGS_RECVSENDFAILEVNT  0x00004000 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT  0x00008000
+#define SCTP_PCB_FLAGS_ADAPTATIONEVNT    0x00010000
+#define SCTP_PCB_FLAGS_PDAPIEVNT         0x00020000
+#define SCTP_PCB_FLAGS_AUTHEVNT          0x00040000
+#define SCTP_PCB_FLAGS_STREAM_RESETEVNT  0x00080000
+#define SCTP_PCB_FLAGS_NO_FRAGMENT       0x00100000
+#define SCTP_PCB_FLAGS_EXPLICIT_EOR      0x00400000
+#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4   0x00800000
+#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS  0x01000000
+#define SCTP_PCB_FLAGS_PORTREUSE         0x02000000
+#define SCTP_PCB_FLAGS_DRYEVNT           0x04000000
+#define SCTP_PCB_FLAGS_RECVRCVINFO       0x08000000
+#define SCTP_PCB_FLAGS_RECVNXTINFO       0x10000000
+#define SCTP_PCB_FLAGS_ASSOC_RESETEVNT   0x20000000
+#define SCTP_PCB_FLAGS_STREAM_CHANGEEVNT 0x40000000
+#define SCTP_PCB_FLAGS_RECVNSENDFAILEVNT 0x80000000
+
+/*-
+ * mobility_features parameters (by micchie).Note
+ * these features are applied against the
+ * sctp_mobility_features flags.. not the sctp_features
+ * flags.
+ */
+#define SCTP_MOBILITY_BASE               0x00000001
+#define SCTP_MOBILITY_FASTHANDOFF        0x00000002
+#define SCTP_MOBILITY_PRIM_DELETED       0x00000004
+
+
+#define SCTP_SMALLEST_PMTU 512	 /* smallest pmtu allowed when disabling PMTU discovery */
+
+#if defined(__Userspace_os_Windows)
+#pragma pack()
+#endif
+#undef SCTP_PACKED
+
+#include <netinet/sctp_uio.h>
+
+/* This dictates the size of the packet
+ * collection buffer. This only applies
+ * if SCTP_PACKET_LOGGING is enabled in
+ * your config.
+ */
+#define SCTP_PACKET_LOG_SIZE 65536
+
+/* Maximum delays and such a user can set for options that
+ * take ms.
+ */
+#define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */
+#define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */
+#define SCTP_MAX_COOKIE_LIFE  3600000 /* 1 hour in ms */
+
+
+/* Types of logging/KTR tracing  that can be enabled via the
+ * sysctl net.inet.sctp.sctp_logging. You must also enable
+ * SUBSYS tracing.
+ * Note that you must have the SCTP option in the kernel
+ * to enable these as well.
+ */
+#define SCTP_BLK_LOGGING_ENABLE				0x00000001
+#define SCTP_CWND_MONITOR_ENABLE			0x00000002
+#define SCTP_CWND_LOGGING_ENABLE			0x00000004
+#define SCTP_FLIGHT_LOGGING_ENABLE			0x00000020
+#define SCTP_FR_LOGGING_ENABLE				0x00000040
+#define SCTP_LOCK_LOGGING_ENABLE			0x00000080
+#define SCTP_MAP_LOGGING_ENABLE				0x00000100
+#define SCTP_MBCNT_LOGGING_ENABLE			0x00000200
+#define SCTP_MBUF_LOGGING_ENABLE			0x00000400
+#define SCTP_NAGLE_LOGGING_ENABLE			0x00000800
+#define SCTP_RECV_RWND_LOGGING_ENABLE			0x00001000
+#define SCTP_RTTVAR_LOGGING_ENABLE			0x00002000
+#define SCTP_SACK_LOGGING_ENABLE			0x00004000
+#define SCTP_SACK_RWND_LOGGING_ENABLE			0x00008000
+#define SCTP_SB_LOGGING_ENABLE				0x00010000
+#define SCTP_STR_LOGGING_ENABLE				0x00020000
+#define SCTP_WAKE_LOGGING_ENABLE			0x00040000
+#define SCTP_LOG_MAXBURST_ENABLE			0x00080000
+#define SCTP_LOG_RWND_ENABLE    			0x00100000
+#define SCTP_LOG_SACK_ARRIVALS_ENABLE			0x00200000
+#define SCTP_LTRACE_CHUNK_ENABLE			0x00400000
+#define SCTP_LTRACE_ERROR_ENABLE			0x00800000
+#define SCTP_LAST_PACKET_TRACING			0x01000000
+#define SCTP_THRESHOLD_LOGGING				0x02000000
+#define SCTP_LOG_AT_SEND_2_SCTP				0x04000000
+#define SCTP_LOG_AT_SEND_2_OUTQ				0x08000000
+#define SCTP_LOG_TRY_ADVANCE				0x10000000
+
+#endif				/* !_NETINET_SCTP_H_ */
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_asconf.c
@@ -0,0 +1,3487 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 240250 2012-09-08 20:54:54Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_timer.h>
+
+/*
+ * debug flags:
+ * SCTP_DEBUG_ASCONF1: protocol info, general info and errors
+ * SCTP_DEBUG_ASCONF2: detailed info
+ */
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 1
+#endif
+
+/*
+ * RFC 5061
+ *
+ * An ASCONF parameter queue exists per asoc which holds the pending address
+ * operations.  Lists are updated upon receipt of ASCONF-ACK.
+ *
+ * A restricted_addrs list exists per assoc to hold local addresses that are
+ * not (yet) usable by the assoc as a source address.  These addresses are
+ * either pending an ASCONF operation (and exist on the ASCONF parameter
+ * queue), or they are permanently restricted (the peer has returned an
+ * ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF).
+ *
+ * Deleted addresses are always immediately removed from the lists as they will
+ * (shortly) no longer exist in the kernel.  We send ASCONFs as a courtesy,
+ * only if allowed.
+ */
+
+/*
+ * ASCONF parameter processing.
+ * response_required: set if a reply is required (eg. SUCCESS_REPORT).
+ * returns a mbuf to an "error" response parameter or NULL/"success" if ok.
+ * FIX: allocating this many mbufs on the fly is pretty inefficient...
+ */
+static struct mbuf *
+sctp_asconf_success_response(uint32_t id)
+{
+	struct mbuf *m_reply = NULL;
+	struct sctp_asconf_paramhdr *aph;
+
+	m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr),
+					0, M_DONTWAIT, 1, MT_DATA);
+	if (m_reply == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_success_response: couldn't get mbuf!\n");
+		return (NULL);
+	}
+	aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+	aph->correlation_id = id;
+	aph->ph.param_type = htons(SCTP_SUCCESS_REPORT);
+	aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr);
+	SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+	aph->ph.param_length = htons(aph->ph.param_length);
+
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t *error_tlv,
+			   uint16_t tlv_length)
+{
+	struct mbuf *m_reply = NULL;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_error_cause *error;
+	uint8_t *tlv;
+
+	m_reply = sctp_get_mbuf_for_msg((sizeof(struct sctp_asconf_paramhdr) +
+					 tlv_length +
+					 sizeof(struct sctp_error_cause)),
+					0, M_DONTWAIT, 1, MT_DATA);
+	if (m_reply == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_error_response: couldn't get mbuf!\n");
+		return (NULL);
+	}
+	aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+	error = (struct sctp_error_cause *)(aph + 1);
+
+	aph->correlation_id = id;
+	aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND);
+	error->code = htons(cause);
+	error->length = tlv_length + sizeof(struct sctp_error_cause);
+	aph->ph.param_length = error->length +
+	    sizeof(struct sctp_asconf_paramhdr);
+
+	if (aph->ph.param_length > MLEN) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_error_response: tlv_length (%xh) too big\n",
+			tlv_length);
+		sctp_m_freem(m_reply);	/* discard */
+		return (NULL);
+	}
+	if (error_tlv != NULL) {
+		tlv = (uint8_t *) (error + 1);
+		memcpy(tlv, error_tlv, tlv_length);
+	}
+	SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+	error->length = htons(error->length);
+	aph->ph.param_length = htons(aph->ph.param_length);
+
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_add_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *aph,
+                           struct sctp_tcb *stcb, int send_hb, int response_required)
+{
+	struct sctp_nets *net;
+	struct mbuf *m_reply = NULL;
+	struct sockaddr_storage sa_store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, param_length, aparam_length;
+	struct sockaddr *sa;
+	int zero_address = 0;
+	int bad_address = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+	param_length = ntohs(ph->param_length);
+
+	sa = (struct sockaddr *)&sa_store;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = (struct sockaddr_in *)&sa_store;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_port = stcb->rport;
+		sin->sin_addr.s_addr = v4addr->addr;
+		if ((sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+		    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+			bad_address = 1;
+		}
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = (struct sockaddr_in6 *)&sa_store;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		sin6->sin6_port = stcb->rport;
+		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+			bad_address = 1;
+		}
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}			/* end switch */
+
+	/* if 0.0.0.0/::0, add the source address instead */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		sa = src;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "process_asconf_add_ip: using source addr ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+	}
+	/* add the address */
+	if (bad_address) {
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+		    aparam_length);
+	} else if (sctp_add_remote_addr(stcb, sa, &net, SCTP_DONOT_SETSCOPE,
+	                         SCTP_ADDR_DYNAMIC_ADDED) != 0) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_add_ip: error adding address\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
+		    aparam_length);
+	} else {
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+		if (response_required) {
+			m_reply =
+			    sctp_asconf_success_response(aph->correlation_id);
+		}
+		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+		                 stcb, net);
+		if (send_hb) {
+			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	return (m_reply);
+}
+
+static int
+sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb, struct sockaddr *src)
+{
+	struct sctp_nets *src_net, *net;
+
+	/* make sure the source address exists as a destination net */
+	src_net = sctp_findnet(stcb, src);
+	if (src_net == NULL) {
+		/* not found */
+		return (-1);
+	}
+
+	/* delete all destination addresses except the source */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (net != src_net) {
+			/* delete this address */
+			sctp_remove_net(stcb, net);
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"asconf_del_remote_addrs_except: deleting ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1,
+				     (struct sockaddr *)&net->ro._l_addr);
+			/* notify upper layer */
+			sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0,
+			    (struct sockaddr *)&net->ro._l_addr, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	return (0);
+}
+
+static struct mbuf *
+sctp_process_asconf_delete_ip(struct sockaddr *src,
+                              struct sctp_asconf_paramhdr *aph,
+			      struct sctp_tcb *stcb, int response_required)
+{
+	struct mbuf *m_reply = NULL;
+	struct sockaddr_storage sa_store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, param_length, aparam_length;
+	struct sockaddr *sa;
+	int zero_address = 0;
+	int result;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+	param_length = ntohs(ph->param_length);
+
+	sa = (struct sockaddr *)&sa_store;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = (struct sockaddr_in *)&sa_store;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_port = stcb->rport;
+		sin->sin_addr.s_addr = v4addr->addr;
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_delete_ip: deleting ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = (struct sockaddr_in6 *)&sa_store;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		sin6->sin6_port = stcb->rport;
+		memcpy(&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_delete_ip: deleting ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* make sure the source address is not being deleted */
+	if (sctp_cmpaddr(sa, src)) {
+		/* trying to delete the source address! */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete source addr\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* if deleting 0.0.0.0/::0, delete all addresses except src addr */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		result = sctp_asconf_del_remote_addrs_except(stcb, src);
+
+		if (result) {
+			/* src address did not exist? */
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: src addr does not exist?\n");
+			/* what error to reply with?? */
+			m_reply =
+			    sctp_asconf_error_response(aph->correlation_id,
+			    SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *) aph,
+			    aparam_length);
+		} else if (response_required) {
+			m_reply =
+			    sctp_asconf_success_response(aph->correlation_id);
+		}
+		return (m_reply);
+	}
+
+	/* delete the address */
+	result = sctp_del_remote_addr(stcb, sa);
+	/*
+	 * note if result == -2, the address doesn't exist in the asoc but
+	 * since it's being deleted anyways, we just ack the delete -- but
+	 * this probably means something has already gone awry
+	 */
+	if (result == -1) {
+		/* only one address in the asoc */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete last IP addr!\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *) aph,
+		    aparam_length);
+	} else {
+		if (response_required) {
+	 		m_reply = sctp_asconf_success_response(aph->correlation_id);
+		}
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+	}
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_set_primary(struct sockaddr *src,
+				struct sctp_asconf_paramhdr *aph,
+				struct sctp_tcb *stcb, int response_required)
+{
+	struct mbuf *m_reply = NULL;
+	struct sockaddr_storage sa_store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, param_length, aparam_length;
+	struct sockaddr *sa;
+	int zero_address = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+	param_length = ntohs(ph->param_length);
+
+	sa = (struct sockaddr *)&sa_store;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = (struct sockaddr_in *)&sa_store;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_addr.s_addr = v4addr->addr;
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = (struct sockaddr_in6 *)&sa_store;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* if 0.0.0.0/::0, use the source address instead */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		sa = src;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: using source addr ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+	}
+	/* set the primary address */
+	if (sctp_set_primary_addr(stcb, sa, NULL) == 0) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: primary address set\n");
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+		if ((stcb->asoc.primary_destination->dest_state & SCTP_ADDR_REACHABLE) &&
+		    (!(stcb->asoc.primary_destination->dest_state & SCTP_ADDR_PF)) &&
+		    (stcb->asoc.alternate)) {
+			sctp_free_remote_addr(stcb->asoc.alternate);
+			stcb->asoc.alternate = NULL;
+		}
+		if (response_required) {
+			m_reply = sctp_asconf_success_response(aph->correlation_id);
+		}
+		/* Mobility adaptation.
+		   Ideally, when the reception of SET PRIMARY with DELETE IP
+		   ADDRESS of the previous primary destination, unacknowledged
+		   DATA are retransmitted immediately to the new primary
+		   destination for seamless handover.
+		   If the destination is UNCONFIRMED and marked to REQ_PRIM,
+		   The retransmission occur when reception of the
+		   HEARTBEAT-ACK.  (See sctp_handle_heartbeat_ack in
+		   sctp_input.c)
+		   Also, when change of the primary destination, it is better
+		   that all subsequent new DATA containing already queued DATA
+		   are transmitted to the new primary destination. (by micchie)
+		 */
+		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+				       	SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+			    		SCTP_MOBILITY_FASTHANDOFF)) &&
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+			   		 SCTP_MOBILITY_PRIM_DELETED) &&
+		    (stcb->asoc.primary_destination->dest_state &
+		     SCTP_ADDR_UNCONFIRMED) == 0) {
+
+			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER+SCTP_LOC_7);
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_FASTHANDOFF)) {
+				sctp_assoc_immediate_retrans(stcb,
+						stcb->asoc.primary_destination);
+			}
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_BASE)) {
+				sctp_move_chunks_from_net(stcb,
+						stcb->asoc.deleted_primary);
+			}
+			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
+						stcb->asoc.deleted_primary);
+		}
+	} else {
+		/* couldn't set the requested primary address! */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: set primary failed!\n");
+		/* must have been an invalid address, so report */
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+	}
+
+	return (m_reply);
+}
+
+/*
+ * handles an ASCONF chunk.
+ * if all parameters are processed ok, send a plain (empty) ASCONF-ACK
+ */
+void
+sctp_handle_asconf(struct mbuf *m, unsigned int offset,
+                   struct sockaddr *src,
+		   struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb,
+		   int first)
+{
+	struct sctp_association *asoc;
+	uint32_t serial_num;
+	struct mbuf *n, *m_ack, *m_result, *m_tail;
+	struct sctp_asconf_ack_chunk *ack_cp;
+	struct sctp_asconf_paramhdr *aph, *ack_aph;
+	struct sctp_ipv6addr_param *p_addr;
+	unsigned int asconf_limit, cnt;
+	int error = 0;		/* did an error occur? */
+
+	/* asconf param buffer */
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_asconf_ack *ack, *ack_next;
+
+	/* verify minimum length */
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: chunk too small = %xh\n",
+			ntohs(cp->ch.chunk_length));
+		return;
+	}
+	asoc = &stcb->asoc;
+	serial_num = ntohl(cp->serial_number);
+
+	if (SCTP_TSN_GE(asoc->asconf_seq_in, serial_num)) {
+		/* got a duplicate ASCONF */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: got duplicate serial number = %xh\n",
+			serial_num);
+		return;
+	} else if (serial_num != (asoc->asconf_seq_in + 1)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
+			serial_num, asoc->asconf_seq_in + 1);
+		return;
+	}
+
+	/* it's the expected "next" sequence number, so process it */
+	asoc->asconf_seq_in = serial_num;	/* update sequence */
+	/* get length of all the param's in the ASCONF */
+	asconf_limit = offset + ntohs(cp->ch.chunk_length);
+	SCTPDBG(SCTP_DEBUG_ASCONF1,
+		"handle_asconf: asconf_limit=%u, sequence=%xh\n",
+		asconf_limit, serial_num);
+
+	if (first) {
+		/* delete old cache */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: Now processing first ASCONF. Try to delete old cache\n");
+
+		TAILQ_FOREACH_SAFE(ack, &asoc->asconf_ack_sent, next, ack_next) {
+			if (ack->serial_number == serial_num)
+				break;
+			SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: delete old(%u) < first(%u)\n",
+			    ack->serial_number, serial_num);
+			TAILQ_REMOVE(&asoc->asconf_ack_sent, ack, next);
+			if (ack->data != NULL) {
+				sctp_m_freem(ack->data);
+			}
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack);
+		}
+	}
+
+	m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0,
+				      M_DONTWAIT, 1, MT_DATA);
+	if (m_ack == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: couldn't get mbuf!\n");
+		return;
+	}
+	m_tail = m_ack;		/* current reply chain's tail */
+
+	/* fill in ASCONF-ACK header */
+	ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *);
+	ack_cp->ch.chunk_type = SCTP_ASCONF_ACK;
+	ack_cp->ch.chunk_flags = 0;
+	ack_cp->serial_number = htonl(serial_num);
+	/* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */
+	SCTP_BUF_LEN(m_ack) = sizeof(struct sctp_asconf_ack_chunk);
+	ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk);
+
+	/* skip the lookup address parameter */
+	offset += sizeof(struct sctp_asconf_chunk);
+	p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&aparam_buf);
+	if (p_addr == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: couldn't get lookup addr!\n");
+		/* respond with a missing/invalid mandatory parameter error */
+		return;
+	}
+	/* param_length is already validated in process_control... */
+	offset += ntohs(p_addr->ph.param_length);	/* skip lookup addr */
+
+	/* get pointer to first asconf param in ASCONF-ACK */
+	ack_aph = (struct sctp_asconf_paramhdr *)(mtod(m_ack, caddr_t) + sizeof(struct sctp_asconf_ack_chunk));
+	if (ack_aph == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Gak in asconf2\n");
+		return;
+	}
+	/* get pointer to first asconf param in ASCONF */
+	aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *)&aparam_buf);
+	if (aph == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Empty ASCONF received?\n");
+		goto send_reply;
+	}
+	/* process through all parameters */
+	cnt = 0;
+	while (aph != NULL) {
+		unsigned int param_length, param_type;
+
+		param_type = ntohs(aph->ph.param_type);
+		param_length = ntohs(aph->ph.param_length);
+		if (offset + param_length > asconf_limit) {
+			/* parameter goes beyond end of chunk! */
+			sctp_m_freem(m_ack);
+			return;
+		}
+		m_result = NULL;
+
+		if (param_length > sizeof(aparam_buf)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) larger than buffer size!\n", param_length);
+			sctp_m_freem(m_ack);
+			return;
+		}
+		if (param_length <= sizeof(struct sctp_paramhdr)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length);
+			sctp_m_freem(m_ack);
+		}
+		/* get the entire parameter */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+		if (aph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get entire param\n");
+			sctp_m_freem(m_ack);
+			return;
+		}
+		switch (param_type) {
+		case SCTP_ADD_IP_ADDRESS:
+			asoc->peer_supports_asconf = 1;
+			m_result = sctp_process_asconf_add_ip(src, aph, stcb,
+			    (cnt < SCTP_BASE_SYSCTL(sctp_hb_maxburst)), error);
+			cnt++;
+			break;
+		case SCTP_DEL_IP_ADDRESS:
+			asoc->peer_supports_asconf = 1;
+			m_result = sctp_process_asconf_delete_ip(src, aph, stcb,
+			    error);
+			break;
+		case SCTP_ERROR_CAUSE_IND:
+			/* not valid in an ASCONF chunk */
+			break;
+		case SCTP_SET_PRIM_ADDR:
+			asoc->peer_supports_asconf = 1;
+			m_result = sctp_process_asconf_set_primary(src, aph,
+			    stcb, error);
+			break;
+		case SCTP_NAT_VTAGS:
+		        SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
+		        break;
+		case SCTP_SUCCESS_REPORT:
+			/* not valid in an ASCONF chunk */
+			break;
+		case SCTP_ULP_ADAPTATION:
+			/* FIX */
+			break;
+		default:
+			if ((param_type & 0x8000) == 0) {
+				/* Been told to STOP at this param */
+				asconf_limit = offset;
+				/*
+				 * FIX FIX - We need to call
+				 * sctp_arethere_unrecognized_parameters()
+				 * to get a operr and send it for any
+				 * param's with the 0x4000 bit set OR do it
+				 * here ourselves... note we still must STOP
+				 * if the 0x8000 bit is clear.
+				 */
+			}
+			/* unknown/invalid param type */
+			break;
+		} /* switch */
+
+		/* add any (error) result to the reply mbuf chain */
+		if (m_result != NULL) {
+			SCTP_BUF_NEXT(m_tail) = m_result;
+			m_tail = m_result;
+			/* update lengths, make sure it's aligned too */
+			SCTP_BUF_LEN(m_result) = SCTP_SIZE32(SCTP_BUF_LEN(m_result));
+			ack_cp->ch.chunk_length += SCTP_BUF_LEN(m_result);
+			/* set flag to force success reports */
+			error = 1;
+		}
+		offset += SCTP_SIZE32(param_length);
+		/* update remaining ASCONF message length to process */
+		if (offset >= asconf_limit) {
+			/* no more data in the mbuf chain */
+			break;
+		}
+		/* get pointer to next asconf param */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_asconf_paramhdr),
+		    (uint8_t *)&aparam_buf);
+		if (aph == NULL) {
+			/* can't get an asconf paramhdr */
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: can't get asconf param hdr!\n");
+			/* FIX ME - add error here... */
+		}
+	}
+
+ send_reply:
+	ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
+	/* save the ASCONF-ACK reply */
+	ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack),
+	    struct sctp_asconf_ack);
+	if (ack == NULL) {
+		sctp_m_freem(m_ack);
+		return;
+	}
+	ack->serial_number = serial_num;
+	ack->last_sent_to = NULL;
+	ack->data = m_ack;
+	ack->len = 0;
+	for (n = m_ack; n != NULL; n = SCTP_BUF_NEXT(n)) {
+		ack->len += SCTP_BUF_LEN(n);
+	}
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next);
+
+	/* see if last_control_chunk_from is set properly (use IP src addr) */
+	if (stcb->asoc.last_control_chunk_from == NULL) {
+		/*
+		 * this could happen if the source address was just newly
+		 * added
+		 */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: looking up net for IP source address\n");
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Looking for IP source: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+		/* look up the from address */
+		stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, src);
+#ifdef SCTP_DEBUG
+		if (stcb->asoc.last_control_chunk_from == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: IP source address not found?!\n");
+		}
+#endif
+	}
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
+{
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* XXX scopeid */
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+		if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) &&
+		    (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+		if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) &&
+		    (memcmp(&aa->ap.addrp.addr, &sin->sin_addr,
+		    sizeof(struct in_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+	default:
+		break;
+	}
+	return (0);
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_addr_match(struct sctp_paramhdr *ph, struct sockaddr *sa)
+{
+	uint16_t param_type, param_length;
+
+	param_type = ntohs(ph->param_type);
+	param_length = ntohs(ph->param_length);
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* XXX scopeid */
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+		struct sctp_ipv6addr_param *v6addr;
+
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		if ((param_type == SCTP_IPV6_ADDRESS) &&
+		    param_length == sizeof(struct sctp_ipv6addr_param) &&
+		    (memcmp(&v6addr->addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+		struct sctp_ipv4addr_param *v4addr;
+
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		if ((param_type == SCTP_IPV4_ADDRESS) &&
+		    param_length == sizeof(struct sctp_ipv4addr_param) &&
+		    (memcmp(&v4addr->addr, &sin->sin_addr,
+		    sizeof(struct in_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+	default:
+		break;
+	}
+	return (0);
+}
+/*
+ * Cleanup for non-responded/OP ERR'd ASCONF
+ */
+void
+sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/* mark peer as ASCONF incapable */
+	stcb->asoc.peer_supports_asconf = 0;
+	/*
+	 * clear out any existing asconfs going out
+	 */
+	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+			SCTP_FROM_SCTP_ASCONF+SCTP_LOC_2);
+	stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
+	/* remove the old ASCONF on our outbound queue */
+	sctp_toss_old_asconf(stcb);
+}
+
+/*
+ * cleanup any cached source addresses that may be topologically
+ * incorrect after a new address has been added to this interface.
+ */
+static void
+sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
+{
+	struct sctp_nets *net;
+
+	/*
+	 * Ideally, we want to only clear cached routes and source addresses
+	 * that are topologically incorrect.  But since there is no easy way
+	 * to know whether the newly added address on the ifn would cause a
+	 * routing change (i.e. a new egress interface would be chosen)
+	 * without doing a new routing lookup and source address selection,
+	 * we will (for now) just flush any cached route using a different
+	 * ifn (and cached source addrs) and let output re-choose them during
+	 * the next send on that net.
+	 */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/*
+		 * clear any cached route (and cached source address) if the
+		 * route's interface is NOT the same as the address change.
+		 * If it's the same interface, just clear the cached source
+		 * address.
+		 */
+		if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) &&
+		    ((ifn == NULL) ||
+		     (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) {
+			/* clear any cached route */
+			RTFREE(net->ro.ro_rt);
+			net->ro.ro_rt = NULL;
+		}
+		/* clear any cached source address */
+		if (net->src_addr_selected) {
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+			net->src_addr_selected = 0;
+		}
+	}
+}
+
+
+void
+sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet)
+{
+	int error;
+
+	if (dstnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+		return;
+	}
+	if (stcb->asoc.deleted_primary == NULL) {
+		return;
+	}
+
+	if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa);
+		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb,
+				stcb->asoc.deleted_primary,
+				SCTP_FROM_SCTP_TIMER+SCTP_LOC_8);
+		stcb->asoc.num_send_timers_up--;
+		if (stcb->asoc.num_send_timers_up < 0) {
+			stcb->asoc.num_send_timers_up = 0;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		error = sctp_t3rxt_timer(stcb->sctp_ep, stcb,
+					stcb->asoc.deleted_primary);
+		if (error) {
+			SCTP_INP_DECR_REF(stcb->sctp_ep);
+			return;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary);
+#endif
+		sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+		if ((stcb->asoc.num_send_timers_up == 0) &&
+		    (stcb->asoc.sent_queue_cnt > 0)) {
+			struct sctp_tmit_chunk *chk;
+
+			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+					 stcb, chk->whoTo);
+		}
+	}
+	return;
+}
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t);
+
+void
+sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_tmit_chunk *chk;
+
+	SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO);
+	sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
+	    SCTP_FROM_SCTP_TIMER+SCTP_LOC_5);
+	stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+	net->error_count = 0;
+	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+		if (chk->whoTo == net) {
+			if (chk->sent < SCTP_DATAGRAM_RESEND) {
+				chk->sent = SCTP_DATAGRAM_RESEND;
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				sctp_flight_size_decrease(chk);
+				sctp_total_flight_decrease(stcb, chk);
+				net->marked_retrans++;
+				stcb->asoc.marked_retrans++;
+			}
+		}
+	}
+	if (net->marked_retrans) {
+		sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+	}
+}
+
+static void
+sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
+{
+	struct sctp_nets *net;
+	int addrnum, changed;
+
+	/*   If number of local valid addresses is 1, the valid address is
+	     probably newly added address.
+	     Several valid addresses in this association.  A source address
+	     may not be changed.  Additionally, they can be configured on a
+	     same interface as "alias" addresses.  (by micchie)
+	 */
+	addrnum = sctp_local_addr_count(stcb);
+	SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n",
+		addrnum);
+	if (addrnum == 1) {
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			/* clear any cached route and source address */
+			if (net->ro.ro_rt) {
+				RTFREE(net->ro.ro_rt);
+				net->ro.ro_rt = NULL;
+			}
+			if (net->src_addr_selected) {
+				sctp_free_ifa(net->ro._s_addr);
+				net->ro._s_addr = NULL;
+				net->src_addr_selected = 0;
+			}
+			/* Retransmit unacknowledged DATA chunks immediately */
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+		    		SCTP_MOBILITY_FASTHANDOFF)) {
+				sctp_net_immediate_retrans(stcb, net);
+			}
+			/* also, SET PRIMARY is maybe already sent */
+		}
+		return;
+	}
+
+	/* Multiple local addresses exsist in the association.  */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/* clear any cached route and source address */
+		if (net->ro.ro_rt) {
+			RTFREE(net->ro.ro_rt);
+			net->ro.ro_rt = NULL;
+		}
+		if (net->src_addr_selected) {
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+			net->src_addr_selected = 0;
+		}
+		/* Check if the nexthop is corresponding to the new address.
+		   If the new address is corresponding to the current nexthop,
+		   the path will be changed.
+		   If the new address is NOT corresponding to the current
+		   nexthop, the path will not be changed.
+		 */
+		SCTP_RTALLOC((sctp_route_t *)&net->ro,
+			     stcb->sctp_ep->def_vrf_id);
+		if (net->ro.ro_rt == NULL)
+			continue;
+
+		changed = 0;
+		switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+			if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *)&net->ro)) {
+				changed = 1;
+			}
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			if (sctp_v6src_match_nexthop(
+			    &newifa->address.sin6, (sctp_route_t *)&net->ro)) {
+				changed = 1;
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+		/* if the newly added address does not relate routing
+		   information, we skip.
+		 */
+		if (changed == 0)
+			continue;
+		/* Retransmit unacknowledged DATA chunks immediately */
+		if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+		   		SCTP_MOBILITY_FASTHANDOFF)) {
+			sctp_net_immediate_retrans(stcb, net);
+		}
+		/* Send SET PRIMARY for this new address */
+		if (net == stcb->asoc.primary_destination) {
+			(void)sctp_asconf_queue_mgmt(stcb, newifa,
+						     SCTP_SET_PRIM_ADDR);
+		}
+	}
+}
+#endif /* __FreeBSD__  __APPLE__  __Userspace__ */
+
+/*
+ * process an ADD/DELETE IP ack from peer.
+ * addr: corresponding sctp_ifa to the address being added/deleted.
+ * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS.
+ * flag: 1=success, 0=failure.
+ */
+static void
+sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr, uint32_t flag)
+{
+	/*
+	 * do the necessary asoc list work- if we get a failure indication,
+	 * leave the address on the assoc's restricted list.  If we get a
+	 * success indication, remove the address from the restricted list.
+	 */
+	/*
+	 * Note: this will only occur for ADD_IP_ADDRESS, since
+	 * DEL_IP_ADDRESS is never actually added to the list...
+	 */
+	if (flag) {
+		/* success case, so remove from the restricted list */
+		sctp_del_local_addr_restricted(stcb, addr);
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+		if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+						SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+			    			SCTP_MOBILITY_FASTHANDOFF)) {
+			sctp_path_check_and_react(stcb, addr);
+			return;
+		}
+#endif /* __FreeBSD__ __APPLE__ __Userspace__ */
+		/* clear any cached/topologically incorrect source addresses */
+		sctp_asconf_nets_cleanup(stcb, addr->ifn_p);
+	}
+	/* else, leave it on the list */
+}
+
+/*
+ * add an asconf add/delete/set primary IP address parameter to the queue.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if queued, -1 if not queued/removed.
+ * NOTE: if adding, but a delete for the same address is already scheduled
+ * (and not yet sent out), simply remove it from queue.  Same for deleting
+ * an address already scheduled for add.  If a duplicate operation is found,
+ * ignore the new one.
+ */
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+		       uint16_t type)
+{
+	struct sctp_asconf_addr *aa, *aa_next;
+	struct sockaddr *sa;
+
+	/* make sure the request isn't already in the queue */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		/* address match? */
+		if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0)
+			continue;
+		/* is the request already in queue but not sent?
+		 * pass the request already sent in order to resolve the following case:
+		 *  1. arrival of ADD, then sent
+		 *  2. arrival of DEL. we can't remove the ADD request already sent
+		 *  3. arrival of ADD
+		 */
+		if (aa->ap.aph.ph.param_type == type && aa->sent == 0) {
+			return (-1);
+		}
+		/* is the negative request already in queue, and not sent */
+		if ((aa->sent == 0) && (type == SCTP_ADD_IP_ADDRESS) &&
+		    (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS)) {
+			/* add requested, delete already queued */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			/* remove the ifa from the restricted list */
+			sctp_del_local_addr_restricted(stcb, ifa);
+			/* free the asconf param */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: add removes queued entry\n");
+			return (-1);
+		}
+		if ((aa->sent == 0) && (type == SCTP_DEL_IP_ADDRESS) &&
+		    (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS)) {
+			/* delete requested, add already queued */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			/* remove the aa->ifa from the restricted list */
+			sctp_del_local_addr_restricted(stcb, aa->ifa);
+			/* free the asconf param */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: delete removes queued entry\n");
+			return (-1);
+		}
+	} /* for each aa */
+
+	/* adding new request to the queue */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+		    SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
+		return (-1);
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ap.aph.ph.param_type = type;
+	aa->ifa = ifa;
+	atomic_add_int(&ifa->refcount, 1);
+	/* correlation_id filled in during send routine later... */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)&ifa->address.sa;
+		sa = (struct sockaddr *)sin6;
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+		    sizeof(struct sctp_ipv6addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		       sizeof(struct in6_addr));
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin= (struct sockaddr_in *)&ifa->address.sa;
+		sa = (struct sockaddr *)sin;
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+		    sizeof(struct sctp_ipv4addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+		       sizeof(struct in_addr));
+		break;
+	}
+#endif
+	default:
+		/* invalid family! */
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		sctp_free_ifa(ifa);
+		return (-1);
+	}
+	aa->sent = 0;		/* clear sent flag */
+
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+	if (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_ASCONF2) {
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+		} else {
+			SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+		}
+	}
+#endif
+
+	return (0);
+}
+
+
+/*
+ * add an asconf operation for the given ifa and type.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, -1 if not completed, 1 if immediate send is
+ * advisable.
+ */
+static int
+sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+		      uint16_t type)
+{
+	uint32_t status;
+	int pending_delete_queued = 0;
+
+	/* see if peer supports ASCONF */
+	if (stcb->asoc.peer_supports_asconf == 0) {
+		return (-1);
+	}
+
+	/*
+	 * if this is deleting the last address from the assoc, mark it as
+	 * pending.
+	 */
+	if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending &&
+	    (sctp_local_addr_count(stcb) < 2)) {
+		/* set the pending delete info only */
+		stcb->asoc.asconf_del_pending = 1;
+		stcb->asoc.asconf_addr_del_pending = ifa;
+		atomic_add_int(&ifa->refcount, 1);
+		SCTPDBG(SCTP_DEBUG_ASCONF2,
+			"asconf_queue_add: mark delete last address pending\n");
+		return (-1);
+	}
+
+	/* queue an asconf parameter */
+	status = sctp_asconf_queue_mgmt(stcb, ifa, type);
+
+	/*
+	 * if this is an add, and there is a delete also pending (i.e. the
+	 * last local address is being changed), queue the pending delete too.
+	 */
+	if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) {
+		/* queue in the pending delete */
+		if (sctp_asconf_queue_mgmt(stcb,
+					   stcb->asoc.asconf_addr_del_pending,
+					   SCTP_DEL_IP_ADDRESS) == 0) {
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: queing pending delete\n");
+			pending_delete_queued = 1;
+			/* clear out the pending delete info */
+			stcb->asoc.asconf_del_pending = 0;
+			sctp_free_ifa(stcb->asoc.asconf_addr_del_pending);
+			stcb->asoc.asconf_addr_del_pending = NULL;
+		}
+	}
+
+	if (pending_delete_queued) {
+		struct sctp_nets *net;
+		/*
+		 * since we know that the only/last address is now being
+		 * changed in this case, reset the cwnd/rto on all nets to
+		 * start as a new address and path.  Also clear the error
+		 * counts to give the assoc the best chance to complete the
+		 * address change.
+		 */
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb,
+									  net);
+			net->RTO = 0;
+			net->error_count = 0;
+		}
+		stcb->asoc.overall_error_count = 0;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+				       stcb->asoc.overall_error_count,
+				       0,
+				       SCTP_FROM_SCTP_ASCONF,
+				       __LINE__);
+		}
+
+		/* queue in an advisory set primary too */
+		(void)sctp_asconf_queue_mgmt(stcb, ifa, SCTP_SET_PRIM_ADDR);
+		/* let caller know we should send this out immediately */
+		status = 1;
+	}
+	return (status);
+}
+
+/*-
+ * add an asconf delete IP address parameter to the queue by sockaddr and
+ * possibly with no sctp_ifa available.  This is only called by the routine
+ * that checks the addresses in an INIT-ACK against the current address list.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if an add is already scheduled (and not yet sent out), simply
+ * remove it from queue.  If a duplicate operation is found, ignore the
+ * new one.
+ */
+static int
+sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_asconf_addr *aa, *aa_next;
+	uint32_t vrf_id;
+
+	if (stcb == NULL) {
+		return (-1);
+	}
+	/* see if peer supports ASCONF */
+	if (stcb->asoc.peer_supports_asconf == 0) {
+		return (-1);
+	}
+	/* make sure the request isn't already in the queue */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		/* address match? */
+		if (sctp_asconf_addr_match(aa, sa) == 0)
+			continue;
+		/* is the request already in queue (sent or not) */
+		if (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+			return (-1);
+		}
+		/* is the negative request already in queue, and not sent */
+		if (aa->sent == 1)
+			continue;
+		if (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) {
+			/* add already queued, so remove existing entry */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			sctp_del_local_addr_restricted(stcb, aa->ifa);
+			/* free the entry */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			return (-1);
+		}
+	} /* for each aa */
+
+	/* find any existing ifa-- NOTE ifa CAN be allowed to be NULL */
+	if (stcb) {
+		vrf_id = stcb->asoc.vrf_id;
+	} else {
+		vrf_id = SCTP_DEFAULT_VRFID;
+	}
+	ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+
+	/* adding new request to the queue */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+		    SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"sctp_asconf_queue_sa_delete: failed to get memory!\n");
+		return (-1);
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+	aa->ifa = ifa;
+	if (ifa)
+		atomic_add_int(&ifa->refcount, 1);
+	/* correlation_id filled in during send routine later... */
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* IPv6 address */
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)sa;
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr));
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		/* IPv4 address */
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+		    sizeof(struct in_addr));
+		break;
+	}
+#endif
+	default:
+		/* invalid family! */
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		if (ifa)
+			sctp_free_ifa(ifa);
+		return (-1);
+	}
+	aa->sent = 0;		/* clear sent flag */
+
+	/* delete goes to the back of the queue */
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+	/* sa_ignore MEMLEAK {memory is put on the tailq} */
+	return (0);
+}
+
+/*
+ * find a specific asconf param on our "sent" queue
+ */
+static struct sctp_asconf_addr *
+sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id)
+{
+	struct sctp_asconf_addr *aa;
+
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->ap.aph.correlation_id == correlation_id &&
+		    aa->sent == 1) {
+			/* found it */
+			return (aa);
+		}
+	}
+	/* didn't find it */
+	return (NULL);
+}
+
+/*
+ * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do
+ * notifications based on the error response
+ */
+static void
+sctp_asconf_process_error(struct sctp_tcb *stcb,
+			  struct sctp_asconf_paramhdr *aph)
+{
+	struct sctp_error_cause *eh;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type;
+	uint16_t error_code;
+
+	eh = (struct sctp_error_cause *)(aph + 1);
+	ph = (struct sctp_paramhdr *)(eh + 1);
+	/* validate lengths */
+	if (htons(eh->length) + sizeof(struct sctp_error_cause) >
+	    htons(aph->ph.param_length)) {
+		/* invalid error cause length */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_process_error: cause element too long\n");
+		return;
+	}
+	if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) >
+	    htons(eh->length)) {
+		/* invalid included TLV length */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_process_error: included TLV too long\n");
+		return;
+	}
+	/* which error code ? */
+	error_code = ntohs(eh->code);
+	param_type = ntohs(aph->ph.param_type);
+	/* FIX: this should go back up the REMOTE_ERROR ULP notify */
+	switch (error_code) {
+	case SCTP_CAUSE_RESOURCE_SHORTAGE:
+		/* we allow ourselves to "try again" for this error */
+		break;
+	default:
+		/* peer can't handle it... */
+		switch (param_type) {
+		case SCTP_ADD_IP_ADDRESS:
+		case SCTP_DEL_IP_ADDRESS:
+			stcb->asoc.peer_supports_asconf = 0;
+			break;
+		case SCTP_SET_PRIM_ADDR:
+			stcb->asoc.peer_supports_asconf = 0;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * process an asconf queue param.
+ * aparam: parameter to process, will be removed from the queue.
+ * flag: 1=success case, 0=failure case
+ */
+static void
+sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
+			      struct sctp_asconf_addr *aparam, uint32_t flag)
+{
+	uint16_t param_type;
+
+	/* process this param */
+	param_type = aparam->ap.aph.ph.param_type;
+	switch (param_type) {
+	case SCTP_ADD_IP_ADDRESS:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: added IP address\n");
+		sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, flag);
+		break;
+	case SCTP_DEL_IP_ADDRESS:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: deleted IP address\n");
+		/* nothing really to do... lists already updated */
+		break;
+	case SCTP_SET_PRIM_ADDR:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: set primary IP address\n");
+		/* nothing to do... peer may start using this addr */
+		if (flag == 0)
+			stcb->asoc.peer_supports_asconf = 0;
+		break;
+	default:
+		/* should NEVER happen */
+		break;
+	}
+
+	/* remove the param and free it */
+	TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next);
+	if (aparam->ifa)
+		sctp_free_ifa(aparam->ifa);
+	SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+}
+
+/*
+ * cleanup from a bad asconf ack parameter
+ */
+static void
+sctp_asconf_ack_clear(struct sctp_tcb *stcb)
+{
+	/* assume peer doesn't really know how to do asconfs */
+	stcb->asoc.peer_supports_asconf = 0;
+	/* XXX we could free the pending queue here */
+}
+
+void
+sctp_handle_asconf_ack(struct mbuf *m, int offset,
+		       struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
+		       struct sctp_nets *net, int *abort_no_unlock)
+{
+	struct sctp_association *asoc;
+	uint32_t serial_num;
+	uint16_t ack_length;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_asconf_addr *aa, *aa_next;
+	uint32_t last_error_id = 0;	/* last error correlation id */
+	uint32_t id;
+	struct sctp_asconf_addr *ap;
+
+	/* asconf param buffer */
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+
+	/* verify minimum length */
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf_ack: chunk too small = %xh\n",
+			ntohs(cp->ch.chunk_length));
+		return;
+	}
+	asoc = &stcb->asoc;
+	serial_num = ntohl(cp->serial_number);
+
+	/*
+	 * NOTE: we may want to handle this differently- currently, we will
+	 * abort when we get an ack for the expected serial number + 1 (eg.
+	 * we didn't send it), process an ack normally if it is the expected
+	 * serial number, and re-send the previous ack for *ALL* other
+	 * serial numbers
+	 */
+
+	/*
+	 * if the serial number is the next expected, but I didn't send it,
+	 * abort the asoc, since someone probably just hijacked us...
+	 */
+	if (serial_num == (asoc->asconf_seq_out + 1)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
+		sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, SCTP_SO_NOT_LOCKED);
+		*abort_no_unlock = 1;
+		return;
+	}
+	if (serial_num != asoc->asconf_seq_out_acked + 1) {
+		/* got a duplicate/unexpected ASCONF-ACK */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
+			serial_num, asoc->asconf_seq_out_acked + 1);
+		return;
+	}
+
+	if (serial_num == asoc->asconf_seq_out - 1) {
+		/* stop our timer */
+		sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+				SCTP_FROM_SCTP_ASCONF+SCTP_LOC_3);
+	}
+
+	/* process the ASCONF-ACK contents */
+	ack_length = ntohs(cp->ch.chunk_length) -
+	    sizeof(struct sctp_asconf_ack_chunk);
+	offset += sizeof(struct sctp_asconf_ack_chunk);
+	/* process through all parameters */
+	while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
+		unsigned int param_length, param_type;
+
+		/* get pointer to next asconf parameter */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		if (aph == NULL) {
+			/* can't get an asconf paramhdr */
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		param_type = ntohs(aph->ph.param_type);
+		param_length = ntohs(aph->ph.param_length);
+		if (param_length > ack_length) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		if (param_length < sizeof(struct sctp_paramhdr)) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		/* get the complete parameter... */
+		if (param_length > sizeof(aparam_buf)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"param length (%u) larger than buffer size!\n", param_length);
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+		if (aph == NULL) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		/* correlation_id is transparent to peer, no ntohl needed */
+		id = aph->correlation_id;
+
+		switch (param_type) {
+		case SCTP_ERROR_CAUSE_IND:
+			last_error_id = id;
+			/* find the corresponding asconf param in our queue */
+			ap = sctp_asconf_find_param(stcb, id);
+			if (ap == NULL) {
+				/* hmm... can't find this in our queue! */
+				break;
+			}
+			/* process the parameter, failed flag */
+			sctp_asconf_process_param_ack(stcb, ap, 0);
+			/* process the error response */
+			sctp_asconf_process_error(stcb, aph);
+			break;
+		case SCTP_SUCCESS_REPORT:
+			/* find the corresponding asconf param in our queue */
+			ap = sctp_asconf_find_param(stcb, id);
+			if (ap == NULL) {
+				/* hmm... can't find this in our queue! */
+				break;
+			}
+			/* process the parameter, success flag */
+			sctp_asconf_process_param_ack(stcb, ap, 1);
+			break;
+		default:
+			break;
+		}		/* switch */
+
+		/* update remaining ASCONF-ACK message length to process */
+		ack_length -= SCTP_SIZE32(param_length);
+		if (ack_length <= 0) {
+			/* no more data in the mbuf chain */
+			break;
+		}
+		offset += SCTP_SIZE32(param_length);
+	} /* while */
+
+	/*
+	 * if there are any "sent" params still on the queue, these are
+	 * implicitly "success", or "failed" (if we got an error back) ...
+	 * so process these appropriately
+	 *
+	 * we assume that the correlation_id's are monotonically increasing
+	 * beginning from 1 and that we don't have *that* many outstanding
+	 * at any given time
+	 */
+	if (last_error_id == 0)
+		last_error_id--;	/* set to "max" value */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		if (aa->sent == 1) {
+			/*
+			 * implicitly successful or failed if correlation_id
+			 * < last_error_id, then success else, failure
+			 */
+			if (aa->ap.aph.correlation_id < last_error_id)
+				sctp_asconf_process_param_ack(stcb, aa, 1);
+			else
+				sctp_asconf_process_param_ack(stcb, aa, 0);
+		} else {
+			/*
+			 * since we always process in order (FIFO queue) if
+			 * we reach one that hasn't been sent, the rest
+			 * should not have been sent either. so, we're
+			 * done...
+			 */
+			break;
+		}
+	}
+
+	/* update the next sequence number to use */
+	asoc->asconf_seq_out_acked++;
+	/* remove the old ASCONF on our outbound queue */
+	sctp_toss_old_asconf(stcb);
+	if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+		/* we have more params, so restart our timer */
+		sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep,
+				 stcb, net);
+#else
+		/* we have more params, so send out more */
+		sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+#endif
+	}
+}
+
+#ifdef INET6
+static uint32_t
+sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+	struct sockaddr_in6 *sin6, *net6;
+	struct sctp_nets *net;
+
+	if (sa->sa_family != AF_INET6) {
+		/* wrong family */
+		return (0);
+	}
+	sin6 = (struct sockaddr_in6 *)sa;
+	if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) {
+		/* not link local address */
+		return (0);
+	}
+	/* hunt through our destination nets list for this scope_id */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (((struct sockaddr *)(&net->ro._l_addr))->sa_family !=
+		    AF_INET6)
+			continue;
+		net6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+		if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0)
+			continue;
+		if (sctp_is_same_scope(sin6, net6)) {
+			/* found one */
+			return (1);
+		}
+	}
+	/* didn't find one */
+	return (0);
+}
+#endif
+
+/*
+ * address management functions
+ */
+static void
+sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+		     struct sctp_ifa *ifa, uint16_t type, int addr_locked)
+{
+	int status;
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 ||
+	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+		/* subset bound, no ASCONF allowed case, so ignore */
+		return;
+	}
+	/*
+	 * note: we know this is not the subset bound, no ASCONF case eg.
+	 * this is boundall or subset bound w/ASCONF allowed
+	 */
+
+	/* first, make sure it's a good address family */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+		break;
+#endif
+#ifdef INET
+	case AF_INET:
+		break;
+#endif
+	default:
+		return;
+	}
+#ifdef INET6
+	/* make sure we're "allowed" to add this type of addr */
+	if (ifa->address.sa.sa_family == AF_INET6) {
+		/* invalid if we're not a v6 endpoint */
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)
+			return;
+		/* is the v6 addr really valid ? */
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			return;
+		}
+	}
+#endif
+	/* put this address on the "pending/do not use yet" list */
+	sctp_add_local_addr_restricted(stcb, ifa);
+	/*
+	 * check address scope if address is out of scope, don't queue
+	 * anything... note: this would leave the address on both inp and
+	 * asoc lists
+	 */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+			/* we skip unspecifed addresses */
+			return;
+		}
+		if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+			if (stcb->asoc.local_scope == 0) {
+				return;
+			}
+			/* is it the right link local scope? */
+			if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+				return;
+			}
+		}
+		if (stcb->asoc.site_scope == 0 &&
+		    IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+			return;
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+		struct in6pcb *inp6;
+
+		inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+		/* invalid if we are a v6 only endpoint */
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+		    SCTP_IPV6_V6ONLY(inp6))
+			return;
+
+		sin = (struct sockaddr_in *)&ifa->address.sa;
+		if (sin->sin_addr.s_addr == 0) {
+			/* we skip unspecifed addresses */
+			return;
+		}
+		if (stcb->asoc.ipv4_local_scope == 0 &&
+		    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+			return;
+		}
+		break;
+	}
+#endif
+	default:
+		/* else, not AF_INET or AF_INET6, so skip */
+		return;
+	}
+
+	/* queue an asconf for this address add/delete */
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+		/* does the peer do asconf? */
+		if (stcb->asoc.peer_supports_asconf) {
+			/* queue an asconf for this addr */
+			status = sctp_asconf_queue_add(stcb, ifa, type);
+
+			/*
+			 * if queued ok, and in the open state, send out the
+			 * ASCONF.  If in the non-open state, these will be
+			 * sent when the state goes open.
+			 */
+			if (status == 0 &&
+			    SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+				    stcb, stcb->asoc.primary_destination);
+#else
+				sctp_send_asconf(stcb, NULL, addr_locked);
+#endif
+			}
+		}
+	}
+}
+
+
+int
+sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l;
+	int cnt_invalid = 0;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+			/* invalid if we're not a v6 endpoint */
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return (1);
+			}
+			break;
+#endif
+#ifdef INET
+		case AF_INET:
+		{
+			/* invalid if we are a v6 only endpoint */
+			struct in6pcb *inp6;
+			inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6)) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return (1);
+			}
+			break;
+		}
+#endif
+		default:
+			/* invalid address family */
+			cnt_invalid++;
+			if (asc->cnt == cnt_invalid)
+				return (1);
+		}
+	}
+	return (0);
+}
+
+static int
+sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_asconf_iterator *asc;
+	struct sctp_laddr *laddr, *nladdr, *l;
+
+	/* Only for specific case not bound all */
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		if (l->action == SCTP_ADD_IP_ADDRESS) {
+			LIST_FOREACH(laddr, &inp->sctp_addr_list,
+				     sctp_nxt_addr) {
+				if (laddr->ifa == ifa) {
+					laddr->action = 0;
+					break;
+				}
+
+			}
+		} else if (l->action == SCTP_DEL_IP_ADDRESS) {
+			LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+				/* remove only after all guys are done */
+				if (laddr->ifa == ifa) {
+					sctp_del_local_addr_ep(inp, ifa);
+				}
+			}
+		}
+	}
+	return (0);
+}
+
+void
+sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+			  void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l;
+	int cnt_invalid = 0;
+	int type, status;
+	int num_queued = 0;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		type = l->action;
+
+		/* address's vrf_id must be the vrf_id of the assoc */
+		if (ifa->vrf_id != stcb->asoc.vrf_id) {
+			continue;
+		}
+
+		/* Same checks again for assoc */
+		switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+		{
+			/* invalid if we're not a v6 endpoint */
+			struct sockaddr_in6 *sin6;
+
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return;
+				else
+					continue;
+			}
+			sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
+			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+				/* we skip unspecifed addresses */
+				continue;
+			}
+			if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+				if (stcb->asoc.local_scope == 0) {
+					continue;
+				}
+				/* is it the right link local scope? */
+				if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+					continue;
+				}
+			}
+			break;
+		}
+#endif
+#ifdef INET
+		case AF_INET:
+		{
+			/* invalid if we are a v6 only endpoint */
+			struct in6pcb *inp6;
+			struct sockaddr_in *sin;
+
+			inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+			/* invalid if we are a v6 only endpoint */
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6))
+				continue;
+
+			sin = (struct sockaddr_in *)&ifa->address.sa;
+			if (sin->sin_addr.s_addr == 0) {
+				/* we skip unspecifed addresses */
+				continue;
+			}
+			if (stcb->asoc.ipv4_local_scope == 0 &&
+			    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+				continue;
+			}
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6)) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return;
+				else
+					continue;
+			}
+			break;
+		}
+#endif
+		default:
+			/* invalid address family */
+			cnt_invalid++;
+			if (asc->cnt == cnt_invalid)
+				return;
+			else
+				continue;
+			break;
+		}
+
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			/* prevent this address from being used as a source */
+			sctp_add_local_addr_restricted(stcb, ifa);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			struct sctp_nets *net;
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				sctp_rtentry_t *rt;
+
+				/* delete this address if cached */
+				if (net->ro._s_addr == ifa) {
+					sctp_free_ifa(net->ro._s_addr);
+					net->ro._s_addr = NULL;
+					net->src_addr_selected = 0;
+					rt = net->ro.ro_rt;
+					if (rt) {
+						RTFREE(rt);
+						net->ro.ro_rt = NULL;
+					}
+					/*
+					 * Now we deleted our src address,
+					 * should we not also now reset the
+					 * cwnd/rto to start as if its a new
+					 * address?
+					 */
+					stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+					net->RTO = 0;
+
+				}
+			}
+		} else if (type == SCTP_SET_PRIM_ADDR) {
+			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+				/* must validate the ifa is in the ep */
+				if (sctp_is_addr_in_ep(stcb->sctp_ep,ifa) == 0) {
+					continue;
+				}
+			} else {
+				/* Need to check scopes for this guy */
+				if (sctp_is_address_in_scope(ifa,
+					stcb->asoc.ipv4_addr_legal,
+					stcb->asoc.ipv6_addr_legal,
+					stcb->asoc.loopback_scope,
+					stcb->asoc.ipv4_local_scope,
+					stcb->asoc.local_scope,
+					stcb->asoc.site_scope,0) == 0) {
+					continue;
+				}
+			}
+		}
+		/* queue an asconf for this address add/delete */
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF) &&
+		    stcb->asoc.peer_supports_asconf) {
+			/* queue an asconf for this addr */
+			status = sctp_asconf_queue_add(stcb, ifa, type);
+			/*
+			 * if queued ok, and in the open state, update the
+			 * count of queued params.  If in the non-open state,
+			 * these get sent when the assoc goes open.
+			 */
+			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+				if (status >= 0) {
+					num_queued++;
+				}
+			}
+		}
+	}
+	/*
+	 * If we have queued params in the open state, send out an ASCONF.
+	 */
+	if (num_queued > 0) {
+		sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+	}
+}
+
+void
+sctp_asconf_iterator_end(void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l, *nl;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH_SAFE(l, &asc->list_of_work, sctp_nxt_addr, nl) {
+		ifa = l->ifa;
+		if (l->action == SCTP_ADD_IP_ADDRESS) {
+			/* Clear the defer use flag */
+			ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+		}
+		sctp_free_ifa(ifa);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l);
+		SCTP_DECR_LADDR_COUNT();
+	}
+	SCTP_FREE(asc, SCTP_M_ASC_IT);
+}
+
+/*
+ * sa is the sockaddr to ask the peer to set primary to.
+ * returns: 0 = completed, -1 = error
+ */
+int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+ 	uint32_t vrf_id;
+	struct sctp_ifa *ifa;
+
+	/* find the ifa for the desired set primary */
+	vrf_id = stcb->asoc.vrf_id;
+	ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+	if (ifa == NULL) {
+		/* Invalid address */
+		return (-1);
+	}
+
+	/* queue an ASCONF:SET_PRIM_ADDR to be sent */
+	if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) {
+		/* set primary queuing succeeded */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"set_primary_ip_address_sa: queued on tcb=%p, ",
+			(void *)stcb);
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+					 stcb->sctp_ep, stcb,
+					 stcb->asoc.primary_destination);
+#else
+			sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+		}
+	} else {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
+			(void *)stcb);
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		return (-1);
+	}
+	return (0);
+}
+
+void
+sctp_set_primary_ip_address(struct sctp_ifa *ifa)
+{
+	struct sctp_inpcb *inp;
+
+	/* go through all our PCB's */
+	LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+		struct sctp_tcb *stcb;
+
+		/* process for all associations for this endpoint */
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			/* queue an ASCONF:SET_PRIM_ADDR to be sent */
+			if (!sctp_asconf_queue_add(stcb, ifa,
+						   SCTP_SET_PRIM_ADDR)) {
+				/* set primary queuing succeeded */
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address: queued on stcb=%p, ",
+					(void *)stcb);
+				SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &ifa->address.sa);
+				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+							 stcb->sctp_ep, stcb,
+							 stcb->asoc.primary_destination);
+#else
+					sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+				}
+			}
+		} /* for each stcb */
+	} /* for each inp */
+}
+
+int
+sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+	unsigned int offset, asconf_limit;
+	struct sctp_asconf_chunk *acp;
+	struct sctp_asconf_paramhdr *aph;
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_paramhdr *ph;
+	int add_cnt, del_cnt;
+	uint16_t last_param_type;
+
+	add_cnt = del_cnt = 0;
+	last_param_type = 0;
+	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.asconf_send_queue, sctp_next, nchk) {
+		if (chk->data == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n");
+			continue;
+		}
+		offset = 0;
+		acp = mtod(chk->data, struct sctp_asconf_chunk *);
+		offset += sizeof(struct sctp_asconf_chunk);
+		asconf_limit = ntohs(acp->ch.chunk_length);
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf);
+		if (ph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n");
+			continue;
+		}
+		offset += ntohs(ph->param_length);
+
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		if (aph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n");
+			continue;
+		}
+		while (aph != NULL) {
+			unsigned int param_length, param_type;
+
+			param_type = ntohs(aph->ph.param_type);
+			param_length = ntohs(aph->ph.param_length);
+			if (offset + param_length > asconf_limit) {
+				/* parameter goes beyond end of chunk! */
+				break;
+			}
+			if (param_length > sizeof(aparam_buf)) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length);
+				break;
+			}
+			if (param_length <= sizeof(struct sctp_paramhdr)) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length);
+				break;
+			}
+
+			aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf);
+			if (aph == NULL) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n");
+				break;
+			}
+
+			ph = (struct sctp_paramhdr *)(aph + 1);
+			if (sctp_addr_match(ph, &sctp_ifa->address.sa) != 0) {
+				switch (param_type) {
+				case SCTP_ADD_IP_ADDRESS:
+					add_cnt++;
+					break;
+				case SCTP_DEL_IP_ADDRESS:
+					del_cnt++;
+					break;
+				default:
+					break;
+				}
+				last_param_type = param_type;
+			}
+
+			offset += SCTP_SIZE32(param_length);
+			if (offset >= asconf_limit) {
+				/* no more data in the mbuf chain */
+				break;
+			}
+			/* get pointer to next asconf param */
+			aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		}
+	}
+
+	/* we want to find the sequences which consist of ADD -> DEL -> ADD or DEL -> ADD */
+	if (add_cnt > del_cnt ||
+	    (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
+		return (1);
+	}
+	return (0);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
+{
+	struct sctp_vrf *vrf = NULL;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+
+	if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+		SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+	if (vrf == NULL) {
+		if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+			SCTP_IPI_ADDR_RUNLOCK();
+		return (NULL);
+	}
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		if (stcb->asoc.loopback_scope == 0 &&
+		    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* Skip if loopback_scope not set */
+			continue;
+		}
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+			switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				if (stcb->asoc.ipv4_addr_legal) {
+					struct sockaddr_in *sin;
+
+					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
+					if (sin->sin_addr.s_addr == 0) {
+						/* skip unspecifed addresses */
+						continue;
+					}
+					if (stcb->asoc.ipv4_local_scope == 0 &&
+					    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
+						continue;
+
+					if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+					    (!sctp_is_addr_pending(stcb, sctp_ifa)))
+						continue;
+					/* found a valid local v4 address to use */
+					if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+						SCTP_IPI_ADDR_RUNLOCK();
+					return (&sctp_ifa->address.sa);
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				if (stcb->asoc.ipv6_addr_legal) {
+					struct sockaddr_in6 *sin6;
+
+					if (sctp_ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+						continue;
+					}
+
+					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
+					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+						/* we skip unspecifed addresses */
+						continue;
+					}
+					if (stcb->asoc.local_scope == 0 &&
+					    IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
+						continue;
+					if (stcb->asoc.site_scope == 0 &&
+					    IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
+						continue;
+
+					if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+					    (!sctp_is_addr_pending(stcb, sctp_ifa)))
+						continue;
+					/* found a valid local v6 address to use */
+					if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+						SCTP_IPI_ADDR_RUNLOCK();
+					return (&sctp_ifa->address.sa);
+				}
+				break;
+#endif
+			default:
+				break;
+			}
+		}
+	}
+	/* no valid addresses found */
+	if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+		SCTP_IPI_ADDR_RUNLOCK();
+	return (NULL);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
+{
+	struct sctp_laddr *laddr;
+
+	LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == NULL) {
+			continue;
+		}
+		/* is the address restricted ? */
+		if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+		    (!sctp_is_addr_pending(stcb, laddr->ifa)))
+			continue;
+
+		/* found a valid local address to use */
+		return (&laddr->ifa->address.sa);
+	}
+	/* no valid addresses found */
+	return (NULL);
+}
+
+/*
+ * builds an ASCONF chunk from queued ASCONF params.
+ * returns NULL on error (no mbuf, no ASCONF params queued, etc).
+ */
+struct mbuf *
+sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
+{
+	struct mbuf *m_asconf, *m_asconf_chk;
+	struct sctp_asconf_addr *aa;
+	struct sctp_asconf_chunk *acp;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_asconf_addr_param *aap;
+	uint32_t p_length;
+	uint32_t correlation_id = 1;	/* 0 is reserved... */
+	caddr_t ptr, lookup_ptr;
+	uint8_t lookup_used = 0;
+
+	/* are there any asconf params to send? */
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->sent == 0)
+			break;
+	}
+	if (aa == NULL)
+		return (NULL);
+
+	/*
+	 * get a chunk header mbuf and a cluster for the asconf params since
+	 * it's simpler to fill in the asconf chunk header lookup address on
+	 * the fly
+	 */
+	m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 0, M_DONTWAIT, 1, MT_DATA);
+	if (m_asconf_chk == NULL) {
+		/* no mbuf's */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"compose_asconf: couldn't get chunk mbuf!\n");
+		return (NULL);
+	}
+	m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
+	if (m_asconf == NULL) {
+		/* no mbuf's */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"compose_asconf: couldn't get mbuf!\n");
+		sctp_m_freem(m_asconf_chk);
+		return (NULL);
+	}
+	SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk);
+	SCTP_BUF_LEN(m_asconf) = 0;
+	acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
+	bzero(acp, sizeof(struct sctp_asconf_chunk));
+	/* save pointers to lookup address and asconf params */
+	lookup_ptr = (caddr_t)(acp + 1);	/* after the header */
+	ptr = mtod(m_asconf, caddr_t);	/* beginning of cluster */
+
+	/* fill in chunk header info */
+	acp->ch.chunk_type = SCTP_ASCONF;
+	acp->ch.chunk_flags = 0;
+	acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+	stcb->asoc.asconf_seq_out++;
+
+	/* add parameters... up to smallest MTU allowed */
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->sent)
+			continue;
+		/* get the parameter length */
+		p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
+		/* will it fit in current chunk? */
+		if (SCTP_BUF_LEN(m_asconf) + p_length > stcb->asoc.smallest_mtu) {
+			/* won't fit, so we're done with this chunk */
+			break;
+		}
+		/* assign (and store) a correlation id */
+		aa->ap.aph.correlation_id = correlation_id++;
+
+		/*
+		 * fill in address if we're doing a delete this is a simple
+		 * way for us to fill in the correlation address, which
+		 * should only be used by the peer if we're deleting our
+		 * source address and adding a new address (e.g. renumbering
+		 * case)
+		 */
+		if (lookup_used == 0 &&
+		    (aa->special_del == 0) &&
+		    aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+			struct sctp_ipv6addr_param *lookup;
+			uint16_t p_size, addr_size;
+
+			lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+			lookup->ph.param_type =
+			    htons(aa->ap.addrp.ph.param_type);
+			if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) {
+				/* copy IPv6 address */
+				p_size = sizeof(struct sctp_ipv6addr_param);
+				addr_size = sizeof(struct in6_addr);
+			} else {
+				/* copy IPv4 address */
+				p_size = sizeof(struct sctp_ipv4addr_param);
+				addr_size = sizeof(struct in_addr);
+			}
+			lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+			memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size);
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+			lookup_used = 1;
+		}
+		/* copy into current space */
+		memcpy(ptr, &aa->ap, p_length);
+
+		/* network elements and update lengths */
+		aph = (struct sctp_asconf_paramhdr *)ptr;
+		aap = (struct sctp_asconf_addr_param *)ptr;
+		/* correlation_id is transparent to peer, no htonl needed */
+		aph->ph.param_type = htons(aph->ph.param_type);
+		aph->ph.param_length = htons(aph->ph.param_length);
+		aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type);
+		aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length);
+
+		SCTP_BUF_LEN(m_asconf) += SCTP_SIZE32(p_length);
+		ptr += SCTP_SIZE32(p_length);
+
+		/*
+		 * these params are removed off the pending list upon
+		 * getting an ASCONF-ACK back from the peer, just set flag
+		 */
+		aa->sent = 1;
+	}
+	/* check to see if the lookup addr has been populated yet */
+	if (lookup_used == 0) {
+		/* NOTE: if the address param is optional, can skip this... */
+		/* add any valid (existing) address... */
+		struct sctp_ipv6addr_param *lookup;
+		uint16_t p_size, addr_size;
+		struct sockaddr *found_addr;
+		caddr_t addr_ptr;
+
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)
+			found_addr = sctp_find_valid_localaddr(stcb,
+							       addr_locked);
+		else
+			found_addr = sctp_find_valid_localaddr_ep(stcb);
+
+		lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+		if (found_addr != NULL) {
+			switch (found_addr->sa_family) {
+#ifdef INET6
+			case AF_INET6:
+				/* copy IPv6 address */
+				lookup->ph.param_type =
+				    htons(SCTP_IPV6_ADDRESS);
+				p_size = sizeof(struct sctp_ipv6addr_param);
+				addr_size = sizeof(struct in6_addr);
+				addr_ptr = (caddr_t)&((struct sockaddr_in6 *)
+				    found_addr)->sin6_addr;
+				break;
+#endif
+#ifdef INET
+			case AF_INET:
+				/* copy IPv4 address */
+				lookup->ph.param_type =
+				    htons(SCTP_IPV4_ADDRESS);
+				p_size = sizeof(struct sctp_ipv4addr_param);
+				addr_size = sizeof(struct in_addr);
+				addr_ptr = (caddr_t)&((struct sockaddr_in *)
+				    found_addr)->sin_addr;
+				break;
+#endif
+			default:
+				p_size = 0;
+				addr_size = 0;
+				addr_ptr = NULL;
+				break;
+			}
+			lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+			memcpy(lookup->addr, addr_ptr, addr_size);
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+		} else {
+			/* uh oh... don't have any address?? */
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"compose_asconf: no lookup addr!\n");
+			/* XXX for now, we send a IPv4 address of 0.0.0.0 */
+			lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS);
+			lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)));
+			bzero(lookup->addr, sizeof(struct in_addr));
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param));
+		}
+	}
+	/* chain it all together */
+	SCTP_BUF_NEXT(m_asconf_chk) = m_asconf;
+	*retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
+	acp->ch.chunk_length = ntohs(*retlen);
+
+	return (m_asconf_chk);
+}
+
+/*
+ * section to handle address changes before an association is up eg. changes
+ * during INIT/INIT-ACK/COOKIE-ECHO handshake
+ */
+
+/*
+ * processes the (local) addresses in the INIT-ACK chunk
+ */
+static void
+sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
+    unsigned int offset, unsigned int length)
+{
+	struct sctp_paramhdr tmp_param, *ph;
+	uint16_t plen, ptype;
+	struct sctp_ifa *sctp_ifa;
+#ifdef INET6
+	struct sctp_ipv6addr_param addr6_store;
+	struct sockaddr_in6 sin6;
+#endif
+#ifdef INET
+	struct sctp_ipv4addr_param addr4_store;
+	struct sockaddr_in sin;
+#endif
+	struct sockaddr *sa;
+	uint32_t vrf_id;
+
+	SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n");
+	if (stcb == NULL) /* Un-needed check for SA */
+		return;
+
+	/* convert to upper bound */
+	length += offset;
+
+	if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+		return;
+	}
+	/* init the addresses */
+#ifdef INET6
+	bzero(&sin6, sizeof(sin6));
+	sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+	sin6.sin6_len = sizeof(sin6);
+#endif
+	sin6.sin6_port = stcb->rport;
+#endif
+
+#ifdef INET
+	bzero(&sin, sizeof(sin));
+	sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	sin.sin_len = sizeof(sin);
+#endif
+	sin.sin_port = stcb->rport;
+#endif
+
+	/* go through the addresses in the init-ack */
+	ph = (struct sctp_paramhdr *)
+	     sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+	                   (uint8_t *)&tmp_param);
+	while (ph != NULL) {
+		ptype = ntohs(ph->param_type);
+		plen = ntohs(ph->param_length);
+		switch (ptype) {
+#ifdef INET6
+		case SCTP_IPV6_ADDRESS:
+		{
+			struct sctp_ipv6addr_param *a6p;
+
+			/* get the entire IPv6 address param */
+			a6p = (struct sctp_ipv6addr_param *)
+			    sctp_m_getptr(m, offset,
+			    sizeof(struct sctp_ipv6addr_param),
+			    (uint8_t *)&addr6_store);
+			if (plen != sizeof(struct sctp_ipv6addr_param) ||
+			    a6p == NULL) {
+				return;
+			}
+			memcpy(&sin6.sin6_addr, a6p->addr,
+			    sizeof(struct in6_addr));
+			sa = (struct sockaddr *)&sin6;
+			break;
+		}
+#endif
+#ifdef INET
+		case SCTP_IPV4_ADDRESS:
+		{
+			struct sctp_ipv4addr_param *a4p;
+
+			/* get the entire IPv4 address param */
+			a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset,
+									  sizeof(struct sctp_ipv4addr_param),
+									  (uint8_t *)&addr4_store);
+			if (plen != sizeof(struct sctp_ipv4addr_param) ||
+			    a4p == NULL) {
+				return;
+			}
+			sin.sin_addr.s_addr = a4p->addr;
+			sa = (struct sockaddr *)&sin;
+			break;
+		}
+#endif
+		default:
+			goto next_addr;
+		}
+
+		/* see if this address really (still) exists */
+		if (stcb) {
+			vrf_id = stcb->asoc.vrf_id;
+		} else {
+			vrf_id = SCTP_DEFAULT_VRFID;
+		}
+		sctp_ifa = sctp_find_ifa_by_addr(sa, vrf_id,
+						 SCTP_ADDR_NOT_LOCKED);
+		if (sctp_ifa == NULL) {
+			/* address doesn't exist anymore */
+			int status;
+
+			/* are ASCONFs allowed ? */
+			if ((sctp_is_feature_on(stcb->sctp_ep,
+			    SCTP_PCB_FLAGS_DO_ASCONF)) &&
+			    stcb->asoc.peer_supports_asconf) {
+				/* queue an ASCONF DEL_IP_ADDRESS */
+				status = sctp_asconf_queue_sa_delete(stcb, sa);
+				/*
+				 * if queued ok, and in correct state, send
+				 * out the ASCONF.
+				 */
+				if (status == 0 &&
+				    SCTP_GET_STATE(&stcb->asoc) ==
+				    SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+							 stcb->sctp_ep, stcb,
+							 stcb->asoc.primary_destination);
+#else
+					sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+				}
+			}
+		}
+
+next_addr:
+		/*
+		 * Sanity check:  Make sure the length isn't 0, otherwise
+		 * we'll be stuck in this loop for a long time...
+		 */
+		if (SCTP_SIZE32(plen) == 0) {
+			SCTP_PRINTF("process_initack_addrs: bad len (%d) type=%xh\n",
+				    plen, ptype);
+			return;
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if ((offset + sizeof(struct sctp_paramhdr)) > length)
+			return;
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+	} /* while */
+}
+
+/* FIX ME: need to verify return result for v6 address type if v6 disabled */
+/*
+ * checks to see if a specific address is in the initack address list returns
+ * 1 if found, 0 if not
+ */
+static uint32_t
+sctp_addr_in_initack(struct mbuf *m, uint32_t offset, uint32_t length, struct sockaddr *sa)
+{
+	struct sctp_paramhdr tmp_param, *ph;
+	uint16_t plen, ptype;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *a4p;
+	struct sctp_ipv6addr_param addr4_store;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *a6p;
+	struct sctp_ipv6addr_param addr6_store;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+	struct sockaddr_in6 sin6_tmp;
+#endif
+#endif
+
+	switch (sa->sa_family) {
+#ifdef INET
+	case AF_INET:
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		break;
+#endif
+	default:
+		return (0);
+	}
+
+	SCTPDBG(SCTP_DEBUG_ASCONF2, "find_initack_addr: starting search for ");
+	SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+	/* convert to upper bound */
+	length += offset;
+
+	if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"find_initack_addr: invalid offset?\n");
+		return (0);
+	}
+	/* go through the addresses in the init-ack */
+	ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+	    sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+	while (ph != NULL) {
+		ptype = ntohs(ph->param_type);
+		plen = ntohs(ph->param_length);
+		switch (ptype) {
+#ifdef INET6
+		case SCTP_IPV6_ADDRESS:
+			if (sa->sa_family == AF_INET6) {
+				/* get the entire IPv6 address param */
+				if (plen != sizeof(struct sctp_ipv6addr_param)) {
+					break;
+				}
+				/* get the entire IPv6 address param */
+				a6p = (struct sctp_ipv6addr_param *)
+				      sctp_m_getptr(m, offset,
+				                    sizeof(struct sctp_ipv6addr_param),
+				                    (uint8_t *)&addr6_store);
+				if (a6p == NULL) {
+					return (0);
+				}
+				sin6 = (struct sockaddr_in6 *)sa;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+				if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+					/* create a copy and clear scope */
+					memcpy(&sin6_tmp, sin6,
+					       sizeof(struct sockaddr_in6));
+					sin6 = &sin6_tmp;
+					in6_clearscope(&sin6->sin6_addr);
+				}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+				if (memcmp(&sin6->sin6_addr, a6p->addr,
+				           sizeof(struct in6_addr)) == 0) {
+					/* found it */
+					return (1);
+				}
+			}
+			break;
+#endif /* INET6 */
+#ifdef INET
+		case SCTP_IPV4_ADDRESS:
+			if (sa->sa_family == AF_INET) {
+				if (plen != sizeof(struct sctp_ipv4addr_param)) {
+					break;
+				}
+				/* get the entire IPv4 address param */
+				a4p = (struct sctp_ipv4addr_param *)
+				      sctp_m_getptr(m, offset,
+				                    sizeof(struct sctp_ipv4addr_param),
+				                    (uint8_t *)&addr4_store);
+				if (a4p == NULL) {
+					return (0);
+				}
+				sin = (struct sockaddr_in *)sa;
+				if (sin->sin_addr.s_addr == a4p->addr) {
+					/* found it */
+					return (1);
+				}
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if (offset + sizeof(struct sctp_paramhdr) > length) {
+			return (0);
+		}
+		ph = (struct sctp_paramhdr *)
+		    sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+		    (uint8_t *) & tmp_param);
+	} /* while */
+	/* not found! */
+	return (0);
+}
+
+/*
+ * makes sure that the current endpoint local addr list is consistent with
+ * the new association (eg. subset bound, asconf allowed) adds addresses as
+ * necessary
+ */
+static void
+sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr)
+{
+	struct sctp_laddr *laddr;
+
+	/* go through the endpoint list */
+	LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+		/* be paranoid and validate the laddr */
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"check_addr_list_ep: laddr->ifa is NULL");
+			continue;
+		}
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa->ifa_addr is NULL");
+			continue;
+		}
+		/* do i have it implicitly? */
+		if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) {
+			continue;
+		}
+		/* check to see if in the init-ack */
+		if (!sctp_addr_in_initack(m, offset, length, &laddr->ifa->address.sa)) {
+			/* try to add it */
+			sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa,
+			    SCTP_ADD_IP_ADDRESS, SCTP_ADDR_NOT_LOCKED);
+		}
+	}
+}
+
+/*
+ * makes sure that the current kernel address list is consistent with the new
+ * association (with all addrs bound) adds addresses as necessary
+ */
+static void
+sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr,
+    uint16_t local_scope, uint16_t site_scope,
+    uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+	struct sctp_vrf *vrf = NULL;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t vrf_id;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+#endif
+
+	if (stcb) {
+		vrf_id = stcb->asoc.vrf_id;
+	} else {
+		return;
+	}
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTP_IPI_ADDR_RUNLOCK();
+		return;
+	}
+	/* go through all our known interfaces */
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		if (loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* skip loopback interface */
+			continue;
+		}
+		/* go through each interface address */
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+			/* do i have it implicitly? */
+			if (sctp_cmpaddr(&sctp_ifa->address.sa, init_addr)) {
+				continue;
+			}
+			switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				sin = (struct sockaddr_in *)&sctp_ifa->address.sin;
+				if ((ipv4_scope == 0) &&
+				    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+					/* private address not in scope */
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sin6;
+				if ((local_scope == 0) &&
+				    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+					continue;
+				}
+				if ((site_scope == 0) &&
+				    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+					continue;
+				}
+				break;
+#endif
+			default:
+				break;
+			}
+			/* check to see if in the init-ack */
+			if (!sctp_addr_in_initack(m, offset, length, &sctp_ifa->address.sa)) {
+				/* try to add it */
+				sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb,
+				    sctp_ifa, SCTP_ADD_IP_ADDRESS,
+				    SCTP_ADDR_LOCKED);
+			}
+		} /* end foreach ifa */
+	} /* end foreach ifn */
+	SCTP_IPI_ADDR_RUNLOCK();
+}
+
+/*
+ * validates an init-ack chunk (from a cookie-echo) with current addresses
+ * adds addresses from the init-ack into our local address list, if needed
+ * queues asconf adds/deletes addresses as needed and makes appropriate list
+ * changes for source address selection m, offset: points to the start of the
+ * address list in an init-ack chunk length: total length of the address
+ * params only init_addr: address where my INIT-ACK was sent from
+ */
+void
+sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr,
+    uint16_t local_scope, uint16_t site_scope,
+    uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+	/* process the local addresses in the initack */
+	sctp_process_initack_addresses(stcb, m, offset, length);
+
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/* bound all case */
+		sctp_check_address_list_all(stcb, m, offset, length, init_addr,
+		    local_scope, site_scope, ipv4_scope, loopback_scope);
+	} else {
+		/* subset bound case */
+		if (sctp_is_feature_on(stcb->sctp_ep,
+		    SCTP_PCB_FLAGS_DO_ASCONF)) {
+			/* asconf's allowed */
+			sctp_check_address_list_ep(stcb, m, offset, length,
+			    init_addr);
+		}
+		/* else, no asconfs allowed, so what we sent is what we get */
+	}
+}
+
+/*
+ * sctp_bindx() support
+ */
+uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
+    uint32_t type, uint32_t vrf_id, struct sctp_ifa *sctp_ifap)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *laddr, *nladdr;
+
+#ifdef HAVE_SA_LEN
+	if (sa->sa_len == 0) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+		return (EINVAL);
+	}
+#endif
+	if (sctp_ifap) {
+		ifa = sctp_ifap;
+	} else 	if (type == SCTP_ADD_IP_ADDRESS) {
+		/* For an add the address MUST be on the system */
+		ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+	} else if (type == SCTP_DEL_IP_ADDRESS) {
+		/* For a delete we need to find it in the inp */
+		ifa = sctp_find_ifa_in_ep(inp, sa, SCTP_ADDR_NOT_LOCKED);
+	} else {
+		ifa = NULL;
+	}
+	if (ifa != NULL) {
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			sctp_add_local_addr_ep(inp, ifa, type);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			if (inp->laddr_count < 2) {
+				/* can't delete the last local address */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+				return (EINVAL);
+			}
+			LIST_FOREACH(laddr, &inp->sctp_addr_list,
+				     sctp_nxt_addr) {
+				if (ifa == laddr->ifa) {
+					/* Mark in the delete */
+					laddr->action = type;
+				}
+			}
+		}
+		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+			/*
+			 * There is no need to start the iterator if
+			 * the inp has no associations.
+			 */
+			if (type == SCTP_DEL_IP_ADDRESS) {
+				LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+					if (laddr->ifa == ifa) {
+						sctp_del_local_addr_ep(inp, ifa);
+					}
+				}
+			}
+		} else {
+			struct sctp_asconf_iterator *asc;
+			struct sctp_laddr *wi;
+
+			SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+			            sizeof(struct sctp_asconf_iterator),
+			            SCTP_M_ASC_IT);
+			if (asc == NULL) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+				return (ENOMEM);
+			}
+			wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+			if (wi == NULL) {
+				SCTP_FREE(asc, SCTP_M_ASC_IT);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+				return (ENOMEM);
+			}
+			LIST_INIT(&asc->list_of_work);
+			asc->cnt = 1;
+			SCTP_INCR_LADDR_COUNT();
+			wi->ifa = ifa;
+			wi->action = type;
+			atomic_add_int(&ifa->refcount, 1);
+			LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+			(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
+			                             sctp_asconf_iterator_stcb,
+			                             sctp_asconf_iterator_ep_end,
+			                             SCTP_PCB_ANY_FLAGS,
+			                             SCTP_PCB_ANY_FEATURES,
+			                             SCTP_ASOC_ANY_STATE,
+			                             (void *)asc, 0,
+			                             sctp_asconf_iterator_end, inp, 0);
+		}
+		return (0);
+	} else {
+		/* invalid address! */
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL);
+		return (EADDRNOTAVAIL);
+	}
+}
+
+void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+				  struct sctp_nets *net)
+{
+	struct sctp_asconf_addr *aa;
+	struct sctp_ifa *sctp_ifap;
+	struct sctp_asconf_tag_param *vtag;
+#ifdef INET
+	struct sockaddr_in *to;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *to6;
+#endif
+	if (net == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
+		return;
+	}
+	if (stcb == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
+		return;
+	}
+  /* Need to have in the asconf:
+   * - vtagparam(my_vtag/peer_vtag)
+   * - add(0.0.0.0)
+   * - del(0.0.0.0)
+   * - Any global addresses add(addr)
+   */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ifa = NULL;
+	aa->sent = 0;		/* clear sent flag */
+	vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph;
+	vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
+	vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
+	vtag->local_vtag = htonl(stcb->asoc.my_vtag);
+	vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	memset(aa, 0, sizeof(struct sctp_asconf_addr));
+	/* fill in asconf address parameter fields */
+	/* ADD(0.0.0.0) */
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+	}
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	memset(aa, 0, sizeof(struct sctp_asconf_addr));
+	/* fill in asconf address parameter fields */
+	/* ADD(0.0.0.0) */
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+	}
+	/* Now we must hunt the addresses and add all global addresses */
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		struct sctp_vrf *vrf = NULL;
+		struct sctp_ifn *sctp_ifnp;
+		uint32_t vrf_id;
+
+		vrf_id = stcb->sctp_ep->def_vrf_id;
+		vrf = sctp_find_vrf(vrf_id);
+		if (vrf == NULL) {
+			goto skip_rest;
+		}
+
+		SCTP_IPI_ADDR_RLOCK();
+		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+				switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					to = &sctp_ifap->address.sin;
+					if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+						continue;
+					}
+					if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+						continue;
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					to6 = &sctp_ifap->address.sin6;
+					if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+						continue;
+					}
+					if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+						continue;
+					}
+					break;
+#endif
+				default:
+					continue;
+				}
+				sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+			}
+		}
+		SCTP_IPI_ADDR_RUNLOCK();
+	} else {
+		struct sctp_laddr *laddr;
+
+		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa == NULL) {
+				continue;
+			}
+			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+				/* Address being deleted by the system, dont
+				 * list.
+				 */
+				continue;
+			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+				/* Address being deleted on this ep
+				 * don't list.
+				 */
+				continue;
+			}
+			sctp_ifap = laddr->ifa;
+			switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				to = &sctp_ifap->address.sin;
+				if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+					continue;
+				}
+				if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				to6 = &sctp_ifap->address.sin6;
+				if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+					continue;
+				}
+				if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+					continue;
+				}
+				break;
+#endif
+			default:
+				continue;
+			}
+			sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+		}
+	}
+ skip_rest:
+	/* Now we must send the asconf into the queue */
+	sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+}
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_asconf.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 237715 2012-06-28 16:01:08Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_ASCONF_H_
+#define _NETINET_SCTP_ASCONF_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/*
+ * function prototypes
+ */
+extern void sctp_asconf_cleanup(struct sctp_tcb *, struct sctp_nets *);
+
+extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *, int);
+
+extern void
+sctp_handle_asconf(struct mbuf *, unsigned int, struct sockaddr *,
+                   struct sctp_asconf_chunk *, struct sctp_tcb *, int);
+
+extern void
+sctp_handle_asconf_ack(struct mbuf *, int, struct sctp_asconf_ack_chunk *,
+     struct sctp_tcb *, struct sctp_nets *, int *);
+
+extern uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *,
+		     uint32_t, uint32_t, struct sctp_ifa *);
+
+
+extern int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr,
+				   uint32_t val);
+extern void sctp_asconf_iterator_stcb(struct sctp_inpcb *inp,
+				      struct sctp_tcb *stcb,
+				      void *ptr, uint32_t type);
+extern void sctp_asconf_iterator_end(void *ptr, uint32_t val);
+
+
+extern int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *,
+    struct sockaddr *);
+
+extern void
+sctp_set_primary_ip_address(struct sctp_ifa *ifa);
+
+extern void
+sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
+    struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern void
+sctp_assoc_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+extern void
+sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#endif
+
+extern void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+				  struct sctp_nets *net);
+
+extern int
+sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
+#endif				/* _KERNEL */
+
+#endif				/* !_NETINET_SCTP_ASCONF_H_ */
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_auth.c
@@ -0,0 +1,2417 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 240148 2012-09-05 18:52:01Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_auth.h>
+
+#ifdef SCTP_DEBUG
+#define SCTP_AUTH_DEBUG		(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
+#define SCTP_AUTH_DEBUG2	(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
+#endif /* SCTP_DEBUG */
+
+
+void
+sctp_clear_chunklist(sctp_auth_chklist_t *chklist)
+{
+	bzero(chklist, sizeof(*chklist));
+	/* chklist->num_chunks = 0; */
+}
+
+sctp_auth_chklist_t *
+sctp_alloc_chunklist(void)
+{
+	sctp_auth_chklist_t *chklist;
+
+	SCTP_MALLOC(chklist, sctp_auth_chklist_t *, sizeof(*chklist),
+		    SCTP_M_AUTH_CL);
+	if (chklist == NULL) {
+		SCTPDBG(SCTP_DEBUG_AUTH1, "sctp_alloc_chunklist: failed to get memory!\n");
+	} else {
+		sctp_clear_chunklist(chklist);
+	}
+	return (chklist);
+}
+
+void
+sctp_free_chunklist(sctp_auth_chklist_t *list)
+{
+	if (list != NULL)
+		SCTP_FREE(list, SCTP_M_AUTH_CL);
+}
+
+sctp_auth_chklist_t *
+sctp_copy_chunklist(sctp_auth_chklist_t *list)
+{
+	sctp_auth_chklist_t *new_list;
+
+	if (list == NULL)
+		return (NULL);
+
+	/* get a new list */
+	new_list = sctp_alloc_chunklist();
+	if (new_list == NULL)
+		return (NULL);
+	/* copy it */
+	bcopy(list, new_list, sizeof(*new_list));
+
+	return (new_list);
+}
+
+
+/*
+ * add a chunk to the required chunks list
+ */
+int
+sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (-1);
+
+	/* is chunk restricted? */
+	if ((chunk == SCTP_INITIATION) ||
+	    (chunk == SCTP_INITIATION_ACK) ||
+	    (chunk == SCTP_SHUTDOWN_COMPLETE) ||
+	    (chunk == SCTP_AUTHENTICATION)) {
+		return (-1);
+	}
+	if (list->chunks[chunk] == 0) {
+		list->chunks[chunk] = 1;
+		list->num_chunks++;
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: added chunk %u (0x%02x) to Auth list\n",
+			chunk, chunk);
+	}
+	return (0);
+}
+
+/*
+ * delete a chunk from the required chunks list
+ */
+int
+sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (-1);
+
+	/* is chunk restricted? */
+	if ((chunk == SCTP_ASCONF) ||
+	    (chunk == SCTP_ASCONF_ACK)) {
+		return (-1);
+	}
+	if (list->chunks[chunk] == 1) {
+		list->chunks[chunk] = 0;
+		list->num_chunks--;
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: deleted chunk %u (0x%02x) from Auth list\n",
+			chunk, chunk);
+	}
+	return (0);
+}
+
+size_t
+sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (0);
+	else
+		return (list->num_chunks);
+}
+
+/*
+ * set the default list of chunks requiring AUTH
+ */
+void
+sctp_auth_set_default_chunks(sctp_auth_chklist_t *list)
+{
+	(void)sctp_auth_add_chunk(SCTP_ASCONF, list);
+	(void)sctp_auth_add_chunk(SCTP_ASCONF_ACK, list);
+}
+
+/*
+ * return the current number and list of required chunks caller must
+ * guarantee ptr has space for up to 256 bytes
+ */
+int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+	int i, count = 0;
+
+	if (list == NULL)
+		return (0);
+
+	for (i = 0; i < 256; i++) {
+		if (list->chunks[i] != 0) {
+			*ptr++ = i;
+			count++;
+		}
+	}
+	return (count);
+}
+
+int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+	int i, size = 0;
+
+	if (list == NULL)
+		return (0);
+
+	if (list->num_chunks <= 32) {
+		/* just list them, one byte each */
+		for (i = 0; i < 256; i++) {
+			if (list->chunks[i] != 0) {
+				*ptr++ = i;
+				size++;
+			}
+		}
+	} else {
+		int index, offset;
+
+		/* pack into a 32 byte bitfield */
+		for (i = 0; i < 256; i++) {
+			if (list->chunks[i] != 0) {
+				index = i / 8;
+				offset = i % 8;
+				ptr[index] |= (1 << offset);
+			}
+		}
+		size = 32;
+	}
+	return (size);
+}
+
+int
+sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+    sctp_auth_chklist_t *list)
+{
+	int i;
+	int size;
+
+	if (list == NULL)
+		return (0);
+
+	if (num_chunks <= 32) {
+		/* just pull them, one byte each */
+		for (i = 0; i < num_chunks; i++) {
+			(void)sctp_auth_add_chunk(*ptr++, list);
+		}
+		size = num_chunks;
+	} else {
+		int index, offset;
+
+		/* unpack from a 32 byte bitfield */
+		for (index = 0; index < 32; index++) {
+			for (offset = 0; offset < 8; offset++) {
+				if (ptr[index] & (1 << offset)) {
+					(void)sctp_auth_add_chunk((index * 8) + offset, list);
+				}
+			}
+		}
+		size = 32;
+	}
+	return (size);
+}
+
+
+/*
+ * allocate structure space for a key of length keylen
+ */
+sctp_key_t *
+sctp_alloc_key(uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	SCTP_MALLOC(new_key, sctp_key_t *, sizeof(*new_key) + keylen,
+		    SCTP_M_AUTH_KY);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_key->keylen = keylen;
+	return (new_key);
+}
+
+void
+sctp_free_key(sctp_key_t *key)
+{
+	if (key != NULL)
+		SCTP_FREE(key,SCTP_M_AUTH_KY);
+}
+
+void
+sctp_print_key(sctp_key_t *key, const char *str)
+{
+	uint32_t i;
+
+	if (key == NULL) {
+		SCTP_PRINTF("%s: [Null key]\n", str);
+		return;
+	}
+	SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+	if (key->keylen) {
+		for (i = 0; i < key->keylen; i++)
+			SCTP_PRINTF("%02x", key->key[i]);
+		SCTP_PRINTF("\n");
+	} else {
+		SCTP_PRINTF("[Null key]\n");
+	}
+}
+
+void
+sctp_show_key(sctp_key_t *key, const char *str)
+{
+	uint32_t i;
+
+	if (key == NULL) {
+		SCTP_PRINTF("%s: [Null key]\n", str);
+		return;
+	}
+	SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+	if (key->keylen) {
+		for (i = 0; i < key->keylen; i++)
+			SCTP_PRINTF("%02x", key->key[i]);
+		SCTP_PRINTF("\n");
+	} else {
+		SCTP_PRINTF("[Null key]\n");
+	}
+}
+
+static uint32_t
+sctp_get_keylen(sctp_key_t *key)
+{
+	if (key != NULL)
+		return (key->keylen);
+	else
+		return (0);
+}
+
+/*
+ * generate a new random key of length 'keylen'
+ */
+sctp_key_t *
+sctp_generate_random_key(uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	/* validate keylen */
+	if (keylen > SCTP_AUTH_RANDOM_SIZE_MAX)
+		keylen = SCTP_AUTH_RANDOM_SIZE_MAX;
+
+	new_key = sctp_alloc_key(keylen);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	SCTP_READ_RANDOM(new_key->key, keylen);
+	new_key->keylen = keylen;
+	return (new_key);
+}
+
+sctp_key_t *
+sctp_set_key(uint8_t *key, uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	new_key = sctp_alloc_key(keylen);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	bcopy(key, new_key->key, keylen);
+	return (new_key);
+}
+
+/*-
+ * given two keys of variable size, compute which key is "larger/smaller"
+ * returns:  1 if key1 > key2
+ *          -1 if key1 < key2
+ *           0 if key1 = key2
+ */
+static int
+sctp_compare_key(sctp_key_t *key1, sctp_key_t *key2)
+{
+	uint32_t maxlen;
+	uint32_t i;
+	uint32_t key1len, key2len;
+	uint8_t *key_1, *key_2;
+	uint8_t temp[SCTP_AUTH_RANDOM_SIZE_MAX];
+
+	/* sanity/length check */
+	key1len = sctp_get_keylen(key1);
+	key2len = sctp_get_keylen(key2);
+	if ((key1len == 0) && (key2len == 0))
+		return (0);
+	else if (key1len == 0)
+		return (-1);
+	else if (key2len == 0)
+		return (1);
+
+	if (key1len != key2len) {
+		if (key1len >= key2len)
+			maxlen = key1len;
+		else
+			maxlen = key2len;
+		bzero(temp, maxlen);
+		if (key1len < maxlen) {
+			/* prepend zeroes to key1 */
+			bcopy(key1->key, temp + (maxlen - key1len), key1len);
+			key_1 = temp;
+			key_2 = key2->key;
+		} else {
+			/* prepend zeroes to key2 */
+			bcopy(key2->key, temp + (maxlen - key2len), key2len);
+			key_1 = key1->key;
+			key_2 = temp;
+		}
+	} else {
+		maxlen = key1len;
+		key_1 = key1->key;
+		key_2 = key2->key;
+	}
+
+	for (i = 0; i < maxlen; i++) {
+		if (*key_1 > *key_2)
+			return (1);
+		else if (*key_1 < *key_2)
+			return (-1);
+		key_1++;
+		key_2++;
+	}
+
+	/* keys are equal value, so check lengths */
+	if (key1len == key2len)
+		return (0);
+	else if (key1len < key2len)
+		return (-1);
+	else
+		return (1);
+}
+
+/*
+ * generate the concatenated keying material based on the two keys and the
+ * shared key (if available). draft-ietf-tsvwg-auth specifies the specific
+ * order for concatenation
+ */
+sctp_key_t *
+sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2, sctp_key_t *shared)
+{
+	uint32_t keylen;
+	sctp_key_t *new_key;
+	uint8_t *key_ptr;
+
+	keylen = sctp_get_keylen(key1) + sctp_get_keylen(key2) +
+	    sctp_get_keylen(shared);
+
+	if (keylen > 0) {
+		/* get space for the new key */
+		new_key = sctp_alloc_key(keylen);
+		if (new_key == NULL) {
+			/* out of memory */
+			return (NULL);
+		}
+		new_key->keylen = keylen;
+		key_ptr = new_key->key;
+	} else {
+		/* all keys empty/null?! */
+		return (NULL);
+	}
+
+	/* concatenate the keys */
+	if (sctp_compare_key(key1, key2) <= 0) {
+		/* key is shared + key1 + key2 */
+		if (sctp_get_keylen(shared)) {
+			bcopy(shared->key, key_ptr, shared->keylen);
+			key_ptr += shared->keylen;
+		}
+		if (sctp_get_keylen(key1)) {
+			bcopy(key1->key, key_ptr, key1->keylen);
+			key_ptr += key1->keylen;
+		}
+		if (sctp_get_keylen(key2)) {
+			bcopy(key2->key, key_ptr, key2->keylen);
+		}
+	} else {
+		/* key is shared + key2 + key1 */
+		if (sctp_get_keylen(shared)) {
+			bcopy(shared->key, key_ptr, shared->keylen);
+			key_ptr += shared->keylen;
+		}
+		if (sctp_get_keylen(key2)) {
+			bcopy(key2->key, key_ptr, key2->keylen);
+			key_ptr += key2->keylen;
+		}
+		if (sctp_get_keylen(key1)) {
+			bcopy(key1->key, key_ptr, key1->keylen);
+		}
+	}
+	return (new_key);
+}
+
+
+sctp_sharedkey_t *
+sctp_alloc_sharedkey(void)
+{
+	sctp_sharedkey_t *new_key;
+
+	SCTP_MALLOC(new_key, sctp_sharedkey_t *, sizeof(*new_key),
+		    SCTP_M_AUTH_KY);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_key->keyid = 0;
+	new_key->key = NULL;
+	new_key->refcount = 1;
+	new_key->deactivated = 0;
+	return (new_key);
+}
+
+void
+sctp_free_sharedkey(sctp_sharedkey_t *skey)
+{
+	if (skey == NULL)
+		return;
+
+	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
+		if (skey->key != NULL)
+			sctp_free_key(skey->key);
+		SCTP_FREE(skey, SCTP_M_AUTH_KY);
+	}
+}
+
+sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
+{
+	sctp_sharedkey_t *skey;
+
+	LIST_FOREACH(skey, shared_keys, next) {
+		if (skey->keyid == key_id)
+			return (skey);
+	}
+	return (NULL);
+}
+
+int
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+		      sctp_sharedkey_t *new_skey)
+{
+	sctp_sharedkey_t *skey;
+
+	if ((shared_keys == NULL) || (new_skey == NULL))
+		return (EINVAL);
+
+	/* insert into an empty list? */
+	if (LIST_EMPTY(shared_keys)) {
+		LIST_INSERT_HEAD(shared_keys, new_skey, next);
+		return (0);
+	}
+	/* insert into the existing list, ordered by key id */
+	LIST_FOREACH(skey, shared_keys, next) {
+		if (new_skey->keyid < skey->keyid) {
+			/* insert it before here */
+			LIST_INSERT_BEFORE(skey, new_skey, next);
+			return (0);
+		} else if (new_skey->keyid == skey->keyid) {
+			/* replace the existing key */
+			/* verify this key *can* be replaced */
+			if ((skey->deactivated) && (skey->refcount > 1)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"can't replace shared key id %u\n",
+					new_skey->keyid);
+				return (EBUSY);
+			}
+			SCTPDBG(SCTP_DEBUG_AUTH1,
+				"replacing shared key id %u\n",
+				new_skey->keyid);
+			LIST_INSERT_BEFORE(skey, new_skey, next);
+			LIST_REMOVE(skey, next);
+			sctp_free_sharedkey(skey);
+			return (0);
+		}
+		if (LIST_NEXT(skey, next) == NULL) {
+			/* belongs at the end of the list */
+			LIST_INSERT_AFTER(skey, new_skey, next);
+			return (0);
+		}
+	}
+	/* shouldn't reach here */
+	return (0);
+}
+
+void
+sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the shared key */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+	/* bump the ref count */
+	if (skey) {
+		atomic_add_int(&skey->refcount, 1);
+		SCTPDBG(SCTP_DEBUG_AUTH2,
+			"%s: stcb %p key %u refcount acquire to %d\n",
+			__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+	}
+}
+
+void
+sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the shared key */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+	/* decrement the ref count */
+	if (skey) {
+		sctp_free_sharedkey(skey);
+		SCTPDBG(SCTP_DEBUG_AUTH2,
+			"%s: stcb %p key %u refcount release to %d\n",
+			__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+
+		/* see if a notification should be generated */
+		if ((skey->refcount <= 1) && (skey->deactivated)) {
+			/* notify ULP that key is no longer used */
+			sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
+					key_id, 0, so_locked);
+			SCTPDBG(SCTP_DEBUG_AUTH2,
+				"%s: stcb %p key %u no longer used, %d\n",
+				__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+		}
+	}
+}
+
+static sctp_sharedkey_t *
+sctp_copy_sharedkey(const sctp_sharedkey_t *skey)
+{
+	sctp_sharedkey_t *new_skey;
+
+	if (skey == NULL)
+		return (NULL);
+	new_skey = sctp_alloc_sharedkey();
+	if (new_skey == NULL)
+		return (NULL);
+	if (skey->key != NULL)
+		new_skey->key = sctp_set_key(skey->key->key, skey->key->keylen);
+	else
+		new_skey->key = NULL;
+	new_skey->keyid = skey->keyid;
+	return (new_skey);
+}
+
+int
+sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
+{
+	sctp_sharedkey_t *skey, *new_skey;
+	int count = 0;
+
+	if ((src == NULL) || (dest == NULL))
+		return (0);
+	LIST_FOREACH(skey, src, next) {
+		new_skey = sctp_copy_sharedkey(skey);
+		if (new_skey != NULL) {
+			(void)sctp_insert_sharedkey(dest, new_skey);
+			count++;
+		}
+	}
+	return (count);
+}
+
+
+sctp_hmaclist_t *
+sctp_alloc_hmaclist(uint8_t num_hmacs)
+{
+	sctp_hmaclist_t *new_list;
+	int alloc_size;
+
+	alloc_size = sizeof(*new_list) + num_hmacs * sizeof(new_list->hmac[0]);
+	SCTP_MALLOC(new_list, sctp_hmaclist_t *, alloc_size,
+		    SCTP_M_AUTH_HL);
+	if (new_list == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_list->max_algo = num_hmacs;
+	new_list->num_algo = 0;
+	return (new_list);
+}
+
+void
+sctp_free_hmaclist(sctp_hmaclist_t *list)
+{
+	if (list != NULL) {
+		SCTP_FREE(list,SCTP_M_AUTH_HL);
+		list = NULL;
+	}
+}
+
+int
+sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id)
+{
+	int i;
+	if (list == NULL)
+		return (-1);
+	if (list->num_algo == list->max_algo) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: HMAC id list full, ignoring add %u\n", hmac_id);
+		return (-1);
+	}
+	if ((hmac_id != SCTP_AUTH_HMAC_ID_SHA1) &&
+#ifdef HAVE_SHA224
+	    (hmac_id != SCTP_AUTH_HMAC_ID_SHA224) &&
+#endif
+#ifdef HAVE_SHA2
+	    (hmac_id != SCTP_AUTH_HMAC_ID_SHA256) &&
+	    (hmac_id != SCTP_AUTH_HMAC_ID_SHA384) &&
+	    (hmac_id != SCTP_AUTH_HMAC_ID_SHA512) &&
+#endif
+	    1) {
+		return (-1);
+	}
+	/* Now is it already in the list */
+	for (i = 0; i < list->num_algo; i++) {
+		if (list->hmac[i] == hmac_id) {
+			/* already in list */
+			return (-1);
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: add HMAC id %u to list\n", hmac_id);
+	list->hmac[list->num_algo++] = hmac_id;
+	return (0);
+}
+
+sctp_hmaclist_t *
+sctp_copy_hmaclist(sctp_hmaclist_t *list)
+{
+	sctp_hmaclist_t *new_list;
+	int i;
+
+	if (list == NULL)
+		return (NULL);
+	/* get a new list */
+	new_list = sctp_alloc_hmaclist(list->max_algo);
+	if (new_list == NULL)
+		return (NULL);
+	/* copy it */
+	new_list->max_algo = list->max_algo;
+	new_list->num_algo = list->num_algo;
+	for (i = 0; i < list->num_algo; i++)
+		new_list->hmac[i] = list->hmac[i];
+	return (new_list);
+}
+
+sctp_hmaclist_t *
+sctp_default_supported_hmaclist(void)
+{
+	sctp_hmaclist_t *new_list;
+
+	new_list = sctp_alloc_hmaclist(2);
+	if (new_list == NULL)
+		return (NULL);
+	(void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA1);
+	(void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA256);
+	return (new_list);
+}
+
+/*-
+ * HMAC algos are listed in priority/preference order
+ * find the best HMAC id to use for the peer based on local support
+ */
+uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t *peer, sctp_hmaclist_t *local)
+{
+	int i, j;
+
+	if ((local == NULL) || (peer == NULL))
+		return (SCTP_AUTH_HMAC_ID_RSVD);
+
+	for (i = 0; i < peer->num_algo; i++) {
+		for (j = 0; j < local->num_algo; j++) {
+			if (peer->hmac[i] == local->hmac[j]) {
+				/* found the "best" one */
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: negotiated peer HMAC id %u\n",
+					peer->hmac[i]);
+				return (peer->hmac[i]);
+			}
+		}
+	}
+	/* didn't find one! */
+	return (SCTP_AUTH_HMAC_ID_RSVD);
+}
+
+/*-
+ * serialize the HMAC algo list and return space used
+ * caller must guarantee ptr has appropriate space
+ */
+int
+sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr)
+{
+	int i;
+	uint16_t hmac_id;
+
+	if (list == NULL)
+		return (0);
+
+	for (i = 0; i < list->num_algo; i++) {
+		hmac_id = htons(list->hmac[i]);
+		bcopy(&hmac_id, ptr, sizeof(hmac_id));
+		ptr += sizeof(hmac_id);
+	}
+	return (list->num_algo * sizeof(hmac_id));
+}
+
+int
+sctp_verify_hmac_param (struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
+{
+	uint32_t i;
+	uint16_t hmac_id;
+	uint32_t sha1_supported = 0;
+
+	for (i = 0; i < num_hmacs; i++) {
+		hmac_id = ntohs(hmacs->hmac_ids[i]);
+		if (hmac_id == SCTP_AUTH_HMAC_ID_SHA1)
+	 		sha1_supported = 1;
+	}
+	/* all HMAC id's are supported */
+	if (sha1_supported == 0)
+		return (-1);
+	else
+		return (0);
+}
+
+sctp_authinfo_t *
+sctp_alloc_authinfo(void)
+{
+	sctp_authinfo_t *new_authinfo;
+
+	SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
+		    SCTP_M_AUTH_IF);
+
+	if (new_authinfo == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	bzero(new_authinfo, sizeof(*new_authinfo));
+	return (new_authinfo);
+}
+
+void
+sctp_free_authinfo(sctp_authinfo_t *authinfo)
+{
+	if (authinfo == NULL)
+		return;
+
+	if (authinfo->random != NULL)
+		sctp_free_key(authinfo->random);
+	if (authinfo->peer_random != NULL)
+		sctp_free_key(authinfo->peer_random);
+	if (authinfo->assoc_key != NULL)
+		sctp_free_key(authinfo->assoc_key);
+	if (authinfo->recv_key != NULL)
+		sctp_free_key(authinfo->recv_key);
+
+	/* We are NOT dynamically allocating authinfo's right now... */
+	/* SCTP_FREE(authinfo, SCTP_M_AUTH_??); */
+}
+
+
+uint32_t
+sctp_get_auth_chunk_len(uint16_t hmac_algo)
+{
+	int size;
+
+	size = sizeof(struct sctp_auth_chunk) + sctp_get_hmac_digest_len(hmac_algo);
+	return (SCTP_SIZE32(size));
+}
+
+uint32_t
+sctp_get_hmac_digest_len(uint16_t hmac_algo)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		return (SCTP_AUTH_DIGEST_LEN_SHA1);
+#ifdef HAVE_SHA224
+	case SCTP_AUTH_HMAC_ID_SHA224:
+		return (SCTP_AUTH_DIGEST_LEN_SHA224);
+#endif
+#ifdef HAVE_SHA2
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		return (SCTP_AUTH_DIGEST_LEN_SHA256);
+	case SCTP_AUTH_HMAC_ID_SHA384:
+		return (SCTP_AUTH_DIGEST_LEN_SHA384);
+	case SCTP_AUTH_HMAC_ID_SHA512:
+		return (SCTP_AUTH_DIGEST_LEN_SHA512);
+#endif
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return (0);
+	} /* end switch */
+}
+
+static inline int
+sctp_get_hmac_block_len(uint16_t hmac_algo)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+#ifdef HAVE_SHA224
+	case SCTP_AUTH_HMAC_ID_SHA224:
+#endif
+		return (64);
+#ifdef HAVE_SHA2
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		return (64);
+	case SCTP_AUTH_HMAC_ID_SHA384:
+	case SCTP_AUTH_HMAC_ID_SHA512:
+		return (128);
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return (0);
+	} /* end switch */
+}
+
+#if defined(__Userspace__)
+/* __Userspace__ SHA1_Init is defined in libcrypto.a (libssl-dev on Ubuntu) */
+#endif
+static void
+sctp_hmac_init(uint16_t hmac_algo, sctp_hash_context_t *ctx)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SHA1_Init(&ctx->sha1);
+		break;
+#ifdef HAVE_SHA224
+	case SCTP_AUTH_HMAC_ID_SHA224:
+		break;
+#endif
+#ifdef HAVE_SHA2
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SHA256_Init(&ctx->sha256);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA384:
+		SHA384_Init(&ctx->sha384);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA512:
+		SHA512_Init(&ctx->sha512);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+static void
+sctp_hmac_update(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+    uint8_t *text, uint32_t textlen)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SHA1_Update(&ctx->sha1, text, textlen);
+		break;
+#ifdef HAVE_SHA224
+	case SCTP_AUTH_HMAC_ID_SHA224:
+		break;
+#endif
+#ifdef HAVE_SHA2
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SHA256_Update(&ctx->sha256, text, textlen);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA384:
+		SHA384_Update(&ctx->sha384, text, textlen);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA512:
+		SHA512_Update(&ctx->sha512, text, textlen);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+static void
+sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+    uint8_t *digest)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SHA1_Final(digest, &ctx->sha1);
+		break;
+#ifdef HAVE_SHA224
+	case SCTP_AUTH_HMAC_ID_SHA224:
+		break;
+#endif
+#ifdef HAVE_SHA2
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SHA256_Final(digest, &ctx->sha256);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA384:
+		/* SHA384 is truncated SHA512 */
+		SHA384_Final(digest, &ctx->sha384);
+		break;
+	case SCTP_AUTH_HMAC_ID_SHA512:
+		SHA512_Final(digest, &ctx->sha512);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+/*-
+ * Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
+ *
+ * Compute the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm.  Resulting digest is placed in 'digest' and digest length
+ * is returned, if the HMAC was performed.
+ *
+ * WARNING: it is up to the caller to supply sufficient space to hold the
+ * resultant digest.
+ */
+uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t ipad[128], opad[128];	/* keyed hash inner/outer pads */
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint32_t i;
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) || (text == NULL) ||
+	    (textlen == 0) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* set the hashed key as the key */
+		keylen = digestlen;
+		key = temp;
+	}
+	/* initialize the inner/outer pads with the key and "append" zeroes */
+	bzero(ipad, blocklen);
+	bzero(opad, blocklen);
+	bcopy(key, ipad, keylen);
+	bcopy(key, opad, keylen);
+
+	/* XOR the key with ipad and opad values */
+	for (i = 0; i < blocklen; i++) {
+		ipad[i] ^= 0x36;
+		opad[i] ^= 0x5c;
+	}
+
+	/* perform inner hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, text, textlen);
+	sctp_hmac_final(hmac_algo, &ctx, temp);
+
+	/* perform outer hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+	sctp_hmac_final(hmac_algo, &ctx, digest);
+
+	return (digestlen);
+}
+
+/* mbuf version */
+uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t ipad[128], opad[128];	/* keyed hash inner/outer pads */
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint32_t i;
+	struct mbuf *m_tmp;
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) || (m == NULL) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* set the hashed key as the key */
+		keylen = digestlen;
+		key = temp;
+	}
+	/* initialize the inner/outer pads with the key and "append" zeroes */
+	bzero(ipad, blocklen);
+	bzero(opad, blocklen);
+	bcopy(key, ipad, keylen);
+	bcopy(key, opad, keylen);
+
+	/* XOR the key with ipad and opad values */
+	for (i = 0; i < blocklen; i++) {
+		ipad[i] ^= 0x36;
+		opad[i] ^= 0x5c;
+	}
+
+	/* perform inner hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+	/* find the correct starting mbuf and offset (get start of text) */
+	m_tmp = m;
+	while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+		m_offset -= SCTP_BUF_LEN(m_tmp);
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	/* now use the rest of the mbuf chain for the text */
+	while (m_tmp != NULL) {
+		if ((SCTP_BUF_NEXT(m_tmp) == NULL) && trailer) {
+			sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+					 SCTP_BUF_LEN(m_tmp) - (trailer+m_offset));
+		} else {
+			sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+					 SCTP_BUF_LEN(m_tmp) - m_offset);
+		}
+
+		/* clear the offset since it's only for the first mbuf */
+		m_offset = 0;
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	sctp_hmac_final(hmac_algo, &ctx, temp);
+
+	/* perform outer hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+	sctp_hmac_final(hmac_algo, &ctx, digest);
+
+	return (digestlen);
+}
+
+/*-
+ * verify the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm.
+ * Returns -1 on error, 0 on success.
+ */
+int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen,
+    uint8_t *digest, uint32_t digestlen)
+{
+	uint32_t len;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) ||
+	    (text == NULL) || (textlen == 0) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest */
+		return (-1);
+	}
+	len = sctp_get_hmac_digest_len(hmac_algo);
+	if ((len == 0) || (digestlen != len))
+		return (-1);
+
+	/* compute the expected hash */
+	if (sctp_hmac(hmac_algo, key, keylen, text, textlen, temp) != len)
+		return (-1);
+
+	if (memcmp(digest, temp, digestlen) != 0)
+		return (-1);
+	else
+		return (0);
+}
+
+
+/*
+ * computes the requested HMAC using a key struct (which may be modified if
+ * the keylen exceeds the HMAC block len).
+ */
+uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key, uint8_t *text,
+    uint32_t textlen, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check */
+	if ((key == NULL) || (text == NULL) || (textlen == 0) ||
+	    (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (key->keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* save the hashed key as the new key */
+		key->keylen = digestlen;
+		bcopy(temp, key->key, key->keylen);
+	}
+	return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
+	    digest));
+}
+
+/* mbuf version */
+uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key, struct mbuf *m,
+    uint32_t m_offset, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check */
+	if ((key == NULL) || (m == NULL) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (key->keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* save the hashed key as the new key */
+		key->keylen = digestlen;
+		bcopy(temp, key->key, key->keylen);
+	}
+	return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest, 0));
+}
+
+int
+sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id)
+{
+	int i;
+
+	if ((list == NULL) || (id == SCTP_AUTH_HMAC_ID_RSVD))
+		return (0);
+
+	for (i = 0; i < list->num_algo; i++)
+		if (list->hmac[i] == id)
+			return (1);
+
+	/* not in the list */
+	return (0);
+}
+
+
+/*-
+ * clear any cached key(s) if they match the given key id on an association.
+ * the cached key(s) will be recomputed and re-cached at next use.
+ * ASSUMES TCB_LOCK is already held
+ */
+void
+sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	if (stcb == NULL)
+		return;
+
+	if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+		sctp_free_key(stcb->asoc.authinfo.assoc_key);
+		stcb->asoc.authinfo.assoc_key = NULL;
+	}
+	if (keyid == stcb->asoc.authinfo.recv_keyid) {
+		sctp_free_key(stcb->asoc.authinfo.recv_key);
+		stcb->asoc.authinfo.recv_key = NULL;
+	}
+}
+
+/*-
+ * clear any cached key(s) if they match the given key id for all assocs on
+ * an endpoint.
+ * ASSUMES INP_WLOCK is already held
+ */
+void
+sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	struct sctp_tcb *stcb;
+
+	if (inp == NULL)
+		return;
+
+	/* clear the cached keys on all assocs on this instance */
+	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+		SCTP_TCB_LOCK(stcb);
+		sctp_clear_cachedkeys(stcb, keyid);
+		SCTP_TCB_UNLOCK(stcb);
+	}
+}
+
+/*-
+ * delete a shared key from an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (stcb == NULL)
+		return (-1);
+
+	/* is the keyid the assoc active sending key */
+	if (keyid == stcb->asoc.authinfo.active_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* are there other refcount holders on the key? */
+	if (skey->refcount > 1)
+		return (-1);
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	/* clear any cached keys */
+	sctp_clear_cachedkeys(stcb, keyid);
+	return (0);
+}
+
+/*-
+ * deletes a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (inp == NULL)
+		return (-1);
+
+	/* is the keyid the active sending key on the endpoint */
+	if (keyid == inp->sctp_ep.default_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* endpoint keys are not refcounted */
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	/* clear any cached keys */
+	sctp_clear_cachedkeys_ep(inp, keyid);
+	return (0);
+}
+
+/*-
+ * set the active key on an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey = NULL;
+
+	/* find the key on the assoc */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL) {
+		/* that key doesn't exist */
+		return (-1);
+	}
+	if ((skey->deactivated) && (skey->refcount > 1)) {
+		/* can't reactivate a deactivated key with other refcounts */
+		return (-1);
+	}
+
+	/* set the (new) active key */
+	stcb->asoc.authinfo.active_keyid = keyid;
+	/* reset the deactivated flag */
+	skey->deactivated = 0;
+
+	return (0);
+}
+
+/*-
+ * set the active key on an endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the key */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL) {
+		/* that key doesn't exist */
+		return (-1);
+	}
+	inp->sctp_ep.default_keyid = keyid;
+	return (0);
+}
+
+/*-
+ * deactivates a shared key from the association
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (stcb == NULL)
+		return (-1);
+
+	/* is the keyid the assoc active sending key */
+	if (keyid == stcb->asoc.authinfo.active_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* are there other refcount holders on the key? */
+	if (skey->refcount == 1) {
+		/* no other users, send a notification for this key */
+		sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
+				SCTP_SO_LOCKED);
+	}
+
+	/* mark the key as deactivated */
+	skey->deactivated = 1;
+
+	return (0);
+}
+
+/*-
+ * deactivates a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (inp == NULL)
+		return (-1);
+
+	/* is the keyid the active sending key on the endpoint */
+	if (keyid == inp->sctp_ep.default_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* endpoint keys are not refcounted */
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	return (0);
+}
+
+/*
+ * get local authentication parameters from cookie (from INIT-ACK)
+ */
+void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+    uint32_t offset, uint32_t length)
+{
+	struct sctp_paramhdr *phdr, tmp_param;
+	uint16_t plen, ptype;
+	uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_random *p_random = NULL;
+	uint16_t random_len = 0;
+	uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_hmac_algo *hmacs = NULL;
+	uint16_t hmacs_len = 0;
+	uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_chunk_list *chunks = NULL;
+	uint16_t num_chunks = 0;
+	sctp_key_t *new_key;
+	uint32_t keylen;
+
+	/* convert to upper bound */
+	length += offset;
+
+	phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+	    sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param);
+	while (phdr != NULL) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+
+		if ((plen == 0) || (offset + plen > length))
+			break;
+
+		if (ptype == SCTP_RANDOM) {
+			if (plen > sizeof(random_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)random_store, min(plen, sizeof(random_store)));
+			if (phdr == NULL)
+				return;
+			/* save the random and length for the key */
+			p_random = (struct sctp_auth_random *)phdr;
+			random_len = plen - sizeof(*p_random);
+		} else if (ptype == SCTP_HMAC_LIST) {
+			int num_hmacs;
+			int i;
+
+			if (plen > sizeof(hmacs_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)hmacs_store, min(plen,sizeof(hmacs_store)));
+			if (phdr == NULL)
+				return;
+			/* save the hmacs list and num for the key */
+			hmacs = (struct sctp_auth_hmac_algo *)phdr;
+			hmacs_len = plen - sizeof(*hmacs);
+			num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+			if (stcb->asoc.local_hmacs != NULL)
+				sctp_free_hmaclist(stcb->asoc.local_hmacs);
+			stcb->asoc.local_hmacs = sctp_alloc_hmaclist(num_hmacs);
+			if (stcb->asoc.local_hmacs != NULL) {
+				for (i = 0; i < num_hmacs; i++) {
+					(void)sctp_auth_add_hmacid(stcb->asoc.local_hmacs,
+					    ntohs(hmacs->hmac_ids[i]));
+				}
+			}
+		} else if (ptype == SCTP_CHUNK_LIST) {
+			int i;
+
+			if (plen > sizeof(chunks_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)chunks_store, min(plen,sizeof(chunks_store)));
+			if (phdr == NULL)
+				return;
+			chunks = (struct sctp_auth_chunk_list *)phdr;
+			num_chunks = plen - sizeof(*chunks);
+			/* save chunks list and num for the key */
+			if (stcb->asoc.local_auth_chunks != NULL)
+				sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
+			else
+				stcb->asoc.local_auth_chunks = sctp_alloc_chunklist();
+			for (i = 0; i < num_chunks; i++) {
+				(void)sctp_auth_add_chunk(chunks->chunk_types[i],
+				    stcb->asoc.local_auth_chunks);
+			}
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if (offset + sizeof(struct sctp_paramhdr) > length)
+			break;
+		phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+		    (uint8_t *)&tmp_param);
+	}
+	/* concatenate the full random key */
+	keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+	if (chunks != NULL) {
+		keylen += sizeof(*chunks) + num_chunks;
+	}
+	new_key = sctp_alloc_key(keylen);
+	if (new_key != NULL) {
+	    /* copy in the RANDOM */
+	    if (p_random != NULL) {
+		keylen = sizeof(*p_random) + random_len;
+		bcopy(p_random, new_key->key, keylen);
+	    }
+	    /* append in the AUTH chunks */
+	    if (chunks != NULL) {
+		bcopy(chunks, new_key->key + keylen,
+		      sizeof(*chunks) + num_chunks);
+		keylen += sizeof(*chunks) + num_chunks;
+	    }
+	    /* append in the HMACs */
+	    if (hmacs != NULL) {
+		bcopy(hmacs, new_key->key + keylen,
+		      sizeof(*hmacs) + hmacs_len);
+	    }
+	}
+	if (stcb->asoc.authinfo.random != NULL)
+	    sctp_free_key(stcb->asoc.authinfo.random);
+	stcb->asoc.authinfo.random = new_key;
+	stcb->asoc.authinfo.random_len = random_len;
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+	/* negotiate what HMAC to use for the peer */
+	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+	    stcb->asoc.local_hmacs);
+
+	/* copy defaults from the endpoint */
+	/* FIX ME: put in cookie? */
+	stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+	/* copy out the shared key list (by reference) from the endpoint */
+	(void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
+				 &stcb->asoc.shared_keys);
+}
+
+/*
+ * compute and fill in the HMAC digest for a packet
+ */
+void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+    struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
+{
+	uint32_t digestlen;
+	sctp_sharedkey_t *skey;
+	sctp_key_t *key;
+
+	if ((stcb == NULL) || (auth == NULL))
+		return;
+
+	/* zero the digest + chunk padding */
+	digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+	bzero(auth->hmac, SCTP_SIZE32(digestlen));
+
+	/* is the desired key cached? */
+	if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
+	    (stcb->asoc.authinfo.assoc_key == NULL)) {
+		if (stcb->asoc.authinfo.assoc_key != NULL) {
+			/* free the old cached key */
+			sctp_free_key(stcb->asoc.authinfo.assoc_key);
+		}
+		skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+		/* the only way skey is NULL is if null key id 0 is used */
+		if (skey != NULL)
+			key = skey->key;
+		else
+			key = NULL;
+		/* compute a new assoc key and cache it */
+		stcb->asoc.authinfo.assoc_key =
+		    sctp_compute_hashkey(stcb->asoc.authinfo.random,
+					 stcb->asoc.authinfo.peer_random, key);
+		stcb->asoc.authinfo.assoc_keyid = keyid;
+		SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
+			stcb->asoc.authinfo.assoc_keyid);
+#ifdef SCTP_DEBUG
+		if (SCTP_AUTH_DEBUG)
+			sctp_print_key(stcb->asoc.authinfo.assoc_key,
+				       "Assoc Key");
+#endif
+	}
+
+	/* set in the active key id */
+	auth->shared_key_id = htons(keyid);
+
+	/* compute and fill in the digest */
+	(void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
+				  m, auth_offset, auth->hmac);
+}
+
+
+static void
+sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+{
+	struct mbuf *m_tmp;
+	uint8_t *data;
+
+	/* sanity check */
+	if (m == NULL)
+		return;
+
+	/* find the correct starting mbuf and offset (get start position) */
+	m_tmp = m;
+	while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+		m_offset -= SCTP_BUF_LEN(m_tmp);
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	/* now use the rest of the mbuf chain */
+	while ((m_tmp != NULL) && (size > 0)) {
+		data = mtod(m_tmp, uint8_t *) + m_offset;
+		if (size > (uint32_t) SCTP_BUF_LEN(m_tmp)) {
+			bzero(data, SCTP_BUF_LEN(m_tmp));
+			size -= SCTP_BUF_LEN(m_tmp);
+		} else {
+			bzero(data, size);
+			size = 0;
+		}
+		/* clear the offset since it's only for the first mbuf */
+		m_offset = 0;
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+}
+
+/*-
+ * process the incoming Authentication chunk
+ * return codes:
+ *   -1 on any authentication error
+ *    0 on authentication verification
+ */
+int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
+    struct mbuf *m, uint32_t offset)
+{
+	uint16_t chunklen;
+	uint16_t shared_key_id;
+	uint16_t hmac_id;
+	sctp_sharedkey_t *skey;
+	uint32_t digestlen;
+	uint8_t digest[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* auth is checked for NULL by caller */
+	chunklen = ntohs(auth->ch.chunk_length);
+	if (chunklen < sizeof(*auth)) {
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		return (-1);
+	}
+	SCTP_STAT_INCR(sctps_recvauth);
+
+	/* get the auth params */
+	shared_key_id = ntohs(auth->shared_key_id);
+	hmac_id = ntohs(auth->hmac_id);
+	SCTPDBG(SCTP_DEBUG_AUTH1,
+		"SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
+		shared_key_id, hmac_id);
+
+	/* is the indicated HMAC supported? */
+	if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
+		struct mbuf *m_err;
+		struct sctp_auth_invalid_hmac *err;
+
+		SCTP_STAT_INCR(sctps_recvivalhmacid);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: unsupported HMAC id %u\n",
+			hmac_id);
+		/*
+		 * report this in an Error Chunk: Unsupported HMAC
+		 * Identifier
+		 */
+		m_err = sctp_get_mbuf_for_msg(sizeof(*err), 0, M_DONTWAIT,
+					      1, MT_HEADER);
+		if (m_err != NULL) {
+			/* pre-reserve some space */
+			SCTP_BUF_RESV_UF(m_err, sizeof(struct sctp_chunkhdr));
+			/* fill in the error */
+			err = mtod(m_err, struct sctp_auth_invalid_hmac *);
+			bzero(err, sizeof(*err));
+			err->ph.param_type = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+			err->ph.param_length = htons(sizeof(*err));
+			err->hmac_id = ntohs(hmac_id);
+			SCTP_BUF_LEN(m_err) = sizeof(*err);
+			/* queue it */
+			sctp_queue_op_err(stcb, m_err);
+		}
+		return (-1);
+	}
+	/* get the indicated shared key, if available */
+	if ((stcb->asoc.authinfo.recv_key == NULL) ||
+	    (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
+		/* find the shared key on the assoc first */
+		skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+					   shared_key_id);
+		/* if the shared key isn't found, discard the chunk */
+		if (skey == NULL) {
+			SCTP_STAT_INCR(sctps_recvivalkeyid);
+			SCTPDBG(SCTP_DEBUG_AUTH1,
+				"SCTP Auth: unknown key id %u\n",
+				shared_key_id);
+			return (-1);
+		}
+		/* generate a notification if this is a new key id */
+		if (stcb->asoc.authinfo.recv_keyid != shared_key_id)
+			/*
+			 * sctp_ulp_notify(SCTP_NOTIFY_AUTH_NEW_KEY, stcb,
+			 * shared_key_id, (void
+			 * *)stcb->asoc.authinfo.recv_keyid);
+			 */
+			sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY,
+			    shared_key_id, stcb->asoc.authinfo.recv_keyid,
+			    SCTP_SO_NOT_LOCKED);
+		/* compute a new recv assoc key and cache it */
+		if (stcb->asoc.authinfo.recv_key != NULL)
+			sctp_free_key(stcb->asoc.authinfo.recv_key);
+		stcb->asoc.authinfo.recv_key =
+		    sctp_compute_hashkey(stcb->asoc.authinfo.random,
+		    stcb->asoc.authinfo.peer_random, skey->key);
+		stcb->asoc.authinfo.recv_keyid = shared_key_id;
+#ifdef SCTP_DEBUG
+		if (SCTP_AUTH_DEBUG)
+			sctp_print_key(stcb->asoc.authinfo.recv_key, "Recv Key");
+#endif
+	}
+	/* validate the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_id);
+	if (chunklen < (sizeof(*auth) + digestlen)) {
+		/* invalid digest length */
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: chunk too short for HMAC\n");
+		return (-1);
+	}
+	/* save a copy of the digest, zero the pseudo header, and validate */
+	bcopy(auth->hmac, digest, digestlen);
+	sctp_bzero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+	(void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
+	    m, offset, computed_digest);
+
+	/* compare the computed digest with the one in the AUTH chunk */
+	if (memcmp(digest, computed_digest, digestlen) != 0) {
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: HMAC digest check failed\n");
+		return (-1);
+	}
+	return (0);
+}
+
+/*
+ * Generate NOTIFICATION
+ */
+void
+sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
+			   uint16_t keyid, uint16_t alt_keyid, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	struct mbuf *m_notify;
+	struct sctp_authkey_event *auth;
+	struct sctp_queued_to_read *control;
+
+	if ((stcb == NULL) ||
+	   (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	   (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	   (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+		) {
+		/* If the socket is gone we are out of here */
+		return;
+	}
+
+	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_AUTHEVNT))
+		/* event not enabled */
+		return;
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_authkey_event),
+					  0, M_DONTWAIT, 1, MT_HEADER);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+
+	SCTP_BUF_LEN(m_notify) = 0;
+	auth = mtod(m_notify, struct sctp_authkey_event *);
+	auth->auth_type = SCTP_AUTHENTICATION_EVENT;
+	auth->auth_flags = 0;
+	auth->auth_length = sizeof(*auth);
+	auth->auth_keynumber = keyid;
+	auth->auth_altkeynumber = alt_keyid;
+	auth->auth_indication = indication;
+	auth->auth_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(*auth);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	    0, 0, stcb->asoc.context, 0, 0, 0, m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+/*-
+ * validates the AUTHentication related parameters in an INIT/INIT-ACK
+ * Note: currently only used for INIT as INIT-ACK is handled inline
+ * with sctp_load_addresses_from_init()
+ */
+int
+sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
+{
+	struct sctp_paramhdr *phdr, parm_buf;
+	uint16_t ptype, plen;
+	int peer_supports_asconf = 0;
+	int peer_supports_auth = 0;
+	int got_random = 0, got_hmacs = 0, got_chklist = 0;
+	uint8_t saw_asconf = 0;
+	uint8_t saw_asconf_ack = 0;
+
+	/* go through each of the params. */
+	phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+	while (phdr) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+
+		if (offset + plen > limit) {
+			break;
+		}
+		if (plen < sizeof(struct sctp_paramhdr)) {
+			break;
+		}
+		if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+			/* A supported extension chunk */
+			struct sctp_supported_chunk_types_param *pr_supported;
+			uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+			int num_ent, i;
+
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)&local_store, min(plen,sizeof(local_store)));
+			if (phdr == NULL) {
+				return (-1);
+			}
+			pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+			num_ent = plen - sizeof(struct sctp_paramhdr);
+			for (i = 0; i < num_ent; i++) {
+				switch (pr_supported->chunk_types[i]) {
+				case SCTP_ASCONF:
+				case SCTP_ASCONF_ACK:
+					peer_supports_asconf = 1;
+					break;
+				default:
+					/* one we don't care about */
+					break;
+				}
+			}
+		} else if (ptype == SCTP_RANDOM) {
+			got_random = 1;
+			/* enforce the random length */
+			if (plen != (sizeof(struct sctp_auth_random) +
+				     SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: invalid RANDOM len\n");
+				return (-1);
+			}
+		} else if (ptype == SCTP_HMAC_LIST) {
+			uint8_t store[SCTP_PARAM_BUFFER_SIZE];
+			struct sctp_auth_hmac_algo *hmacs;
+			int num_hmacs;
+
+			if (plen > sizeof(store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)store, min(plen,sizeof(store)));
+			if (phdr == NULL)
+				return (-1);
+			hmacs = (struct sctp_auth_hmac_algo *)phdr;
+			num_hmacs = (plen - sizeof(*hmacs)) /
+			    sizeof(hmacs->hmac_ids[0]);
+			/* validate the hmac list */
+			if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: invalid HMAC param\n");
+				return (-1);
+			}
+			got_hmacs = 1;
+		} else if (ptype == SCTP_CHUNK_LIST) {
+			int i, num_chunks;
+			uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
+			/* did the peer send a non-empty chunk list? */
+			struct sctp_auth_chunk_list *chunks = NULL;
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)chunks_store,
+						   min(plen,sizeof(chunks_store)));
+			if (phdr == NULL)
+				return (-1);
+
+			/*-
+			 * Flip through the list and mark that the
+			 * peer supports asconf/asconf_ack.
+			 */
+			chunks = (struct sctp_auth_chunk_list *)phdr;
+			num_chunks = plen - sizeof(*chunks);
+			for (i = 0; i < num_chunks; i++) {
+				/* record asconf/asconf-ack if listed */
+				if (chunks->chunk_types[i] == SCTP_ASCONF)
+					saw_asconf = 1;
+				if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+					saw_asconf_ack = 1;
+
+			}
+			if (num_chunks)
+				got_chklist = 1;
+		}
+
+		offset += SCTP_SIZE32(plen);
+		if (offset >= limit) {
+			break;
+		}
+		phdr = sctp_get_next_param(m, offset, &parm_buf,
+		    sizeof(parm_buf));
+	}
+	/* validate authentication required parameters */
+	if (got_random && got_hmacs) {
+		peer_supports_auth = 1;
+	} else {
+		peer_supports_auth = 0;
+	}
+	if (!peer_supports_auth && got_chklist) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: peer sent chunk list w/o AUTH\n");
+		return (-1);
+	}
+	if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && peer_supports_asconf &&
+	    !peer_supports_auth) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: peer supports ASCONF but not AUTH\n");
+		return (-1);
+	} else if ((peer_supports_asconf) && (peer_supports_auth) &&
+		   ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+		return (-2);
+	}
+	return (0);
+}
+
+void
+sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+	uint16_t chunks_len = 0;
+	uint16_t hmacs_len = 0;
+	uint16_t random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+	sctp_key_t *new_key;
+	uint16_t keylen;
+
+	/* initialize hmac list from endpoint */
+	stcb->asoc.local_hmacs = sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+	if (stcb->asoc.local_hmacs != NULL) {
+		hmacs_len = stcb->asoc.local_hmacs->num_algo *
+		    sizeof(stcb->asoc.local_hmacs->hmac[0]);
+	}
+	/* initialize auth chunks list from endpoint */
+	stcb->asoc.local_auth_chunks =
+	    sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+	if (stcb->asoc.local_auth_chunks != NULL) {
+		int i;
+		for (i = 0; i < 256; i++) {
+			if (stcb->asoc.local_auth_chunks->chunks[i])
+				chunks_len++;
+		}
+	}
+	/* copy defaults from the endpoint */
+	stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
+
+	/* copy out the shared key list (by reference) from the endpoint */
+	(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+				 &stcb->asoc.shared_keys);
+
+	/* now set the concatenated key (random + chunks + hmacs) */
+	/* key includes parameter headers */
+	keylen = (3 * sizeof(struct sctp_paramhdr)) + random_len + chunks_len +
+	    hmacs_len;
+	new_key = sctp_alloc_key(keylen);
+	if (new_key != NULL) {
+		struct sctp_paramhdr *ph;
+		int plen;
+		/* generate and copy in the RANDOM */
+		ph = (struct sctp_paramhdr *)new_key->key;
+		ph->param_type = htons(SCTP_RANDOM);
+		plen = sizeof(*ph) + random_len;
+		ph->param_length = htons(plen);
+		SCTP_READ_RANDOM(new_key->key + sizeof(*ph), random_len);
+		keylen = plen;
+
+		/* append in the AUTH chunks */
+		/* NOTE: currently we always have chunks to list */
+		ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+		ph->param_type = htons(SCTP_CHUNK_LIST);
+		plen = sizeof(*ph) + chunks_len;
+		ph->param_length = htons(plen);
+		keylen += sizeof(*ph);
+		if (stcb->asoc.local_auth_chunks) {
+			int i;
+			for (i = 0; i < 256; i++) {
+				if (stcb->asoc.local_auth_chunks->chunks[i])
+					new_key->key[keylen++] = i;
+			}
+		}
+
+		/* append in the HMACs */
+		ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+		ph->param_type = htons(SCTP_HMAC_LIST);
+		plen = sizeof(*ph) + hmacs_len;
+		ph->param_length = htons(plen);
+		keylen += sizeof(*ph);
+		(void)sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+					new_key->key + keylen);
+	}
+	if (stcb->asoc.authinfo.random != NULL)
+	    sctp_free_key(stcb->asoc.authinfo.random);
+	stcb->asoc.authinfo.random = new_key;
+	stcb->asoc.authinfo.random_len = random_len;
+}
+
+
+#ifdef SCTP_HMAC_TEST
+/*
+ * HMAC and key concatenation tests
+ */
+static void
+sctp_print_digest(uint8_t *digest, uint32_t digestlen, const char *str)
+{
+	uint32_t i;
+
+	SCTP_PRINTF("\n%s: 0x", str);
+	if (digest == NULL)
+		return;
+
+	for (i = 0; i < digestlen; i++)
+		SCTP_PRINTF("%02x", digest[i]);
+}
+
+static int
+sctp_test_hmac(const char *str, uint16_t hmac_id, uint8_t *key,
+    uint32_t keylen, uint8_t *text, uint32_t textlen,
+    uint8_t *digest, uint32_t digestlen)
+{
+	uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	SCTP_PRINTF("\n%s:", str);
+	sctp_hmac(hmac_id, key, keylen, text, textlen, computed_digest);
+	sctp_print_digest(digest, digestlen, "Expected digest");
+	sctp_print_digest(computed_digest, digestlen, "Computed digest");
+	if (memcmp(digest, computed_digest, digestlen) != 0) {
+		SCTP_PRINTF("\nFAILED");
+		return (-1);
+	} else {
+		SCTP_PRINTF("\nPASSED");
+		return (0);
+	}
+}
+
+
+/*
+ * RFC 2202: HMAC-SHA1 test cases
+ */
+void
+sctp_test_hmac_sha1(void)
+{
+	uint8_t *digest;
+	uint8_t key[128];
+	uint32_t keylen;
+	uint8_t text[128];
+	uint32_t textlen;
+	uint32_t digestlen = 20;
+	int failed = 0;
+
+	/*-
+	 * test_case =     1
+	 * key =           0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+	 * key_len =       20
+	 * data =          "Hi There"
+	 * data_len =      8
+	 * digest =        0xb617318655057264e28bc0b6fb378c8ef146be00
+	 */
+	keylen = 20;
+	memset(key, 0x0b, keylen);
+	textlen = 8;
+	strcpy(text, "Hi There");
+	digest = "\xb6\x17\x31\x86\x55\x05\x72\x64\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1\x46\xbe\x00";
+	if (sctp_test_hmac("SHA1 test case 1", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     2
+	 * key =           "Jefe"
+	 * key_len =       4
+	 * data =          "what do ya want for nothing?"
+	 * data_len =      28
+	 * digest =        0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+	 */
+	keylen = 4;
+	strcpy(key, "Jefe");
+	textlen = 28;
+	strcpy(text, "what do ya want for nothing?");
+	digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79";
+	if (sctp_test_hmac("SHA1 test case 2", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     3
+	 * key =           0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+	 * key_len =       20
+	 * data =          0xdd repeated 50 times
+	 * data_len =      50
+	 * digest =        0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+	 */
+	keylen = 20;
+	memset(key, 0xaa, keylen);
+	textlen = 50;
+	memset(text, 0xdd, textlen);
+	digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3";
+	if (sctp_test_hmac("SHA1 test case 3", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     4
+	 * key =           0x0102030405060708090a0b0c0d0e0f10111213141516171819
+	 * key_len =       25
+	 * data =          0xcd repeated 50 times
+	 * data_len =      50
+	 * digest =        0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+	 */
+	keylen = 25;
+	memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
+	textlen = 50;
+	memset(text, 0xcd, textlen);
+	digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda";
+	if (sctp_test_hmac("SHA1 test case 4", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     5
+	 * key =           0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+	 * key_len =       20
+	 * data =          "Test With Truncation"
+	 * data_len =      20
+	 * digest =        0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
+	 * digest-96 =     0x4c1a03424b55e07fe7f27be1
+	 */
+	keylen = 20;
+	memset(key, 0x0c, keylen);
+	textlen = 20;
+	strcpy(text, "Test With Truncation");
+	digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04";
+	if (sctp_test_hmac("SHA1 test case 5", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     6
+	 * key =           0xaa repeated 80 times
+	 * key_len =       80
+	 * data =          "Test Using Larger Than Block-Size Key - Hash Key First"
+	 * data_len =      54
+	 * digest =        0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+	 */
+	keylen = 80;
+	memset(key, 0xaa, keylen);
+	textlen = 54;
+	strcpy(text, "Test Using Larger Than Block-Size Key - Hash Key First");
+	digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12";
+	if (sctp_test_hmac("SHA1 test case 6", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     7
+	 * key =           0xaa repeated 80 times
+	 * key_len =       80
+	 * data =          "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
+	 * data_len =      73
+	 * digest =        0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+	 */
+	keylen = 80;
+	memset(key, 0xaa, keylen);
+	textlen = 73;
+	strcpy(text, "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data");
+	digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91";
+	if (sctp_test_hmac("SHA1 test case 7", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/* done with all tests */
+	if (failed)
+		SCTP_PRINTF("\nSHA1 test results: %d cases failed", failed);
+	else
+		SCTP_PRINTF("\nSHA1 test results: all test cases passed");
+}
+
+/*
+ * test assoc key concatenation
+ */
+static int
+sctp_test_key_concatenation(sctp_key_t *key1, sctp_key_t *key2,
+    sctp_key_t *expected_key)
+{
+	sctp_key_t *key;
+	int ret_val;
+
+	sctp_show_key(key1, "\nkey1");
+	sctp_show_key(key2, "\nkey2");
+	key = sctp_compute_hashkey(key1, key2, NULL);
+	sctp_show_key(expected_key, "\nExpected");
+	sctp_show_key(key, "\nComputed");
+	if (memcmp(key, expected_key, expected_key->keylen) != 0) {
+		SCTP_PRINTF("\nFAILED");
+		ret_val = -1;
+	} else {
+		SCTP_PRINTF("\nPASSED");
+		ret_val = 0;
+	}
+	sctp_free_key(key1);
+	sctp_free_key(key2);
+	sctp_free_key(expected_key);
+	sctp_free_key(key);
+	return (ret_val);
+}
+
+
+void
+sctp_test_authkey(void)
+{
+	sctp_key_t *key1, *key2, *expected_key;
+	int failed = 0;
+
+	/* test case 1 */
+	key1 = sctp_set_key("\x01\x01\x01\x01", 4);
+	key2 = sctp_set_key("\x01\x02\x03\x04", 4);
+	expected_key = sctp_set_key("\x01\x01\x01\x01\x01\x02\x03\x04", 8);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 2 */
+	key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+	key2 = sctp_set_key("\x02", 1);
+	expected_key = sctp_set_key("\x00\x00\x00\x01\x02", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 3 */
+	key1 = sctp_set_key("\x01", 1);
+	key2 = sctp_set_key("\x00\x00\x00\x02", 4);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x02", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 4 */
+	key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+	key2 = sctp_set_key("\x01", 1);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 5 */
+	key1 = sctp_set_key("\x01", 1);
+	key2 = sctp_set_key("\x00\x00\x00\x01", 4);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 6 */
+	key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+	key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+	expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 7 */
+	key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+	key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+	expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* done with all tests */
+	if (failed)
+		SCTP_PRINTF("\nKey concatenation test results: %d cases failed", failed);
+	else
+		SCTP_PRINTF("\nKey concatenation test results: all test cases passed");
+}
+
+
+#if defined(STANDALONE_HMAC_TEST)
+int
+main(void)
+{
+	sctp_test_hmac_sha1();
+	sctp_test_authkey();
+}
+
+#endif /* STANDALONE_HMAC_TEST */
+
+#endif /* SCTP_HMAC_TEST */
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_auth.h
@@ -0,0 +1,222 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 235828 2012-05-23 11:26:28Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_AUTH_H_
+#define _NETINET_SCTP_AUTH_H_
+
+
+/* digest lengths */
+#define SCTP_AUTH_DIGEST_LEN_SHA1	20
+#define SCTP_AUTH_DIGEST_LEN_SHA224	28
+#define SCTP_AUTH_DIGEST_LEN_SHA256	32
+#define SCTP_AUTH_DIGEST_LEN_SHA384	48
+#define SCTP_AUTH_DIGEST_LEN_SHA512	64
+#define SCTP_AUTH_DIGEST_LEN_MAX	64
+
+/* random sizes */
+#define SCTP_AUTH_RANDOM_SIZE_DEFAULT	32
+#define SCTP_AUTH_RANDOM_SIZE_REQUIRED	32
+#define SCTP_AUTH_RANDOM_SIZE_MAX	256
+
+/* union of all supported HMAC algorithm contexts */
+typedef union sctp_hash_context {
+	SHA1_CTX sha1;
+#ifdef HAVE_SHA2
+	SHA256_CTX sha256;
+	SHA384_CTX sha384;
+	SHA512_CTX sha512;
+#endif
+} sctp_hash_context_t;
+
+typedef struct sctp_key {
+	uint32_t keylen;
+	uint8_t key[];
+} sctp_key_t;
+
+typedef struct sctp_shared_key {
+	LIST_ENTRY(sctp_shared_key) next;
+	sctp_key_t *key;	/* key text */
+	uint32_t refcount;	/* reference count */
+	uint16_t keyid;		/* shared key ID */
+	uint8_t deactivated;	/* key is deactivated */
+} sctp_sharedkey_t;
+
+LIST_HEAD(sctp_keyhead, sctp_shared_key);
+
+/* authentication chunks list */
+typedef struct sctp_auth_chklist {
+	uint8_t chunks[256];
+	uint8_t num_chunks;
+} sctp_auth_chklist_t;
+
+/* hmac algos supported list */
+typedef struct sctp_hmaclist {
+	uint16_t max_algo;	/* max algorithms allocated */
+	uint16_t num_algo;	/* num algorithms used */
+	uint16_t hmac[];
+} sctp_hmaclist_t;
+
+/* authentication info */
+typedef struct sctp_authinformation {
+	sctp_key_t *random;	/* local random key (concatenated) */
+	uint32_t random_len;	/* local random number length for param */
+	sctp_key_t *peer_random;/* peer's random key (concatenated) */
+	sctp_key_t *assoc_key;	/* cached concatenated send key */
+	sctp_key_t *recv_key;	/* cached concatenated recv key */
+	uint16_t active_keyid;	/* active send keyid */
+	uint16_t assoc_keyid;	/* current send keyid (cached) */
+	uint16_t recv_keyid;	/* last recv keyid (cached) */
+} sctp_authinfo_t;
+
+
+
+/*
+ * Macros
+ */
+#define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
+
+/*
+ * function prototypes
+ */
+
+/* socket option api functions */
+extern sctp_auth_chklist_t *sctp_alloc_chunklist(void);
+extern void sctp_free_chunklist(sctp_auth_chklist_t *chklist);
+extern void sctp_clear_chunklist(sctp_auth_chklist_t *chklist);
+extern sctp_auth_chklist_t *sctp_copy_chunklist(sctp_auth_chklist_t *chklist);
+extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list);
+extern void sctp_auth_set_default_chunks(sctp_auth_chklist_t *list);
+extern int sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list,
+    uint8_t *ptr);
+extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t *list,
+    uint8_t *ptr);
+extern int sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+    sctp_auth_chklist_t *list);
+
+/* key handling */
+extern sctp_key_t *sctp_alloc_key(uint32_t keylen);
+extern void sctp_free_key(sctp_key_t *key);
+extern void sctp_print_key(sctp_key_t *key, const char *str);
+extern void sctp_show_key(sctp_key_t *key, const char *str);
+extern sctp_key_t *sctp_generate_random_key(uint32_t keylen);
+extern sctp_key_t *sctp_set_key(uint8_t *key, uint32_t keylen);
+extern sctp_key_t *sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2,
+    sctp_key_t *shared);
+
+/* shared key handling */
+extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
+extern void sctp_free_sharedkey(sctp_sharedkey_t *skey);
+extern sctp_sharedkey_t *sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+    uint16_t key_id);
+extern int sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+    sctp_sharedkey_t *new_skey);
+extern int sctp_copy_skeylist(const struct sctp_keyhead *src,
+    struct sctp_keyhead *dest);
+/* ref counts on shared keys, by key id */
+extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid,
+    int so_locked);
+
+
+/* hmac list handling */
+extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint8_t num_hmacs);
+extern void sctp_free_hmaclist(sctp_hmaclist_t *list);
+extern int sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id);
+extern sctp_hmaclist_t *sctp_copy_hmaclist(sctp_hmaclist_t *list);
+extern sctp_hmaclist_t *sctp_default_supported_hmaclist(void);
+extern uint16_t sctp_negotiate_hmacid(sctp_hmaclist_t *peer,
+    sctp_hmaclist_t *local);
+extern int sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr);
+extern int sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs,
+    uint32_t num_hmacs);
+
+extern sctp_authinfo_t *sctp_alloc_authinfo(void);
+extern void sctp_free_authinfo(sctp_authinfo_t *authinfo);
+
+/* keyed-HMAC functions */
+extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
+extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
+extern uint32_t sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern int sctp_verify_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest, uint32_t digestlen);
+extern uint32_t sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key,
+    uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id);
+
+/* mbuf versions */
+extern uint32_t sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer);
+extern uint32_t sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest);
+
+/*
+ * authentication routines
+ */
+extern void sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+
+extern void sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+    uint32_t offset, uint32_t length);
+extern void sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+    struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
+extern struct mbuf *sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+    struct sctp_auth_chunk **auth_ret, uint32_t *offset,
+    struct sctp_tcb *stcb, uint8_t chunk);
+extern int sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
+    struct mbuf *m, uint32_t offset);
+extern void sctp_notify_authentication(struct sctp_tcb *stcb,
+    uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
+extern int sctp_validate_init_auth_params(struct mbuf *m, int offset,
+    int limit);
+extern void sctp_initialize_auth_params(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb);
+
+/* test functions */
+#ifdef SCTP_HMAC_TEST
+extern void sctp_test_hmac_sha1(void);
+extern void sctp_test_authkey(void);
+#endif
+#endif /* __SCTP_AUTH_H__ */
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_bsd_addr.c
@@ -0,0 +1,1108 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 239035 2012-08-04 08:03:30Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_indata.h>
+#if !defined(__Userspace_os_Windows)
+#include <sys/unistd.h>
+#endif
+
+/* Declare all of our malloc named types */
+#ifndef __Panda__
+MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
+MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
+MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
+MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
+MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
+MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
+MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
+MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
+MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
+MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
+MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
+MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
+MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
+MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
+MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
+MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
+MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
+MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
+MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
+MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
+#endif
+
+/* Global NON-VNET structure that controls the iterator */
+struct iterator_control sctp_it_ctl;
+
+#if !defined(__FreeBSD__)
+static void
+sctp_cleanup_itqueue(void)
+{
+	struct sctp_iterator *it, *nit;
+
+	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+		if (it->function_atend != NULL) {
+			(*it->function_atend) (it->pointer, it->val);
+		}
+		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+		SCTP_FREE(it, SCTP_M_ITER);
+	}
+}
+#endif
+#if defined(__Userspace__)
+/*__Userspace__ TODO if we use thread based iterator
+ * then the implementation of wakeup will need to change.
+ * Currently we are using timeo_cond for ident so_timeo
+ * but that is not sufficient if we need to use another ident
+ * like wakeup(&sctppcbinfo.iterator_running);
+ */
+#endif
+
+void
+sctp_wakeup_iterator(void)
+{
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace_os_Windows)
+	WakeAllConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+	pthread_cond_broadcast(&sctp_it_ctl.iterator_wakeup);
+#endif
+#else
+	wakeup(&sctp_it_ctl.iterator_running);
+#endif
+}
+
+#if defined(__Userspace__)
+static void *
+#else
+static void
+#endif
+sctp_iterator_thread(void *v SCTP_UNUSED)
+{
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	/* In FreeBSD this thread never terminates. */
+#if defined(__FreeBSD__)
+	for (;;) {
+#else
+	while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) == 0) {
+#endif
+#if !defined(__Userspace__)
+		msleep(&sctp_it_ctl.iterator_running,
+#if defined(__FreeBSD__)
+		       &sctp_it_ctl.ipi_iterator_wq_mtx,
+#elif defined(__APPLE__) || defined(__Userspace_os_Darwin)
+		       sctp_it_ctl.ipi_iterator_wq_mtx,
+#endif
+		       0, "waiting_for_work", 0);
+#else
+#if defined(__Userspace_os_Windows)
+		SleepConditionVariableCS(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx, INFINITE);
+#else
+		pthread_cond_wait(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx);
+#endif
+#endif
+#if !defined(__FreeBSD__)
+		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+			break;
+		}
+#endif
+		sctp_iterator_worker();
+	}
+#if !defined(__FreeBSD__)
+	/* Now this thread needs to be terminated */
+	sctp_cleanup_itqueue();
+	sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_EXITED;
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__Userspace__)
+	sctp_wakeup_iterator();
+#if !defined(__Userspace_os_Windows)
+	pthread_exit(NULL);
+#else
+	ExitThread(0);
+#endif
+#else
+	wakeup(&sctp_it_ctl.iterator_flags);
+	thread_terminate(current_thread());
+#endif
+#ifdef INVARIANTS
+	panic("Hmm. thread_terminate() continues...");
+#endif
+#if defined(__Userspace__)
+	return NULL;
+#endif
+#endif
+}
+
+void
+sctp_startup_iterator(void)
+{
+	static int called = 0;
+#if defined(__FreeBSD__) || (defined(__Userspace__) && !defined(__Userspace_os_Windows))
+	int ret;
+#endif
+
+	if (called) {
+		/* You only get one */
+		return;
+	}
+	/* init the iterator head */
+	called = 1;
+	sctp_it_ctl.iterator_running = 0;
+	sctp_it_ctl.iterator_flags = 0;
+	sctp_it_ctl.cur_it = NULL;
+	SCTP_ITERATOR_LOCK_INIT();
+	SCTP_IPI_ITERATOR_WQ_INIT();
+	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
+#if defined(__FreeBSD__)
+#if __FreeBSD_version <= 701000
+	ret = kthread_create(sctp_iterator_thread,
+#else
+	ret = kproc_create(sctp_iterator_thread,
+#endif
+			   (void *)NULL,
+			   &sctp_it_ctl.thread_proc,
+			   RFPROC,
+			   SCTP_KTHREAD_PAGES,
+			   SCTP_KTRHEAD_NAME);
+#elif defined(__APPLE__)
+        (void)kernel_thread_start((thread_continue_t)sctp_iterator_thread, NULL, &sctp_it_ctl.thread_proc);
+#elif defined(__Userspace__)
+#if defined(__Userspace_os_Windows)
+	if ((sctp_it_ctl.thread_proc = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)&sctp_iterator_thread, NULL, 0, NULL)) == NULL) {
+		SCTP_PRINTF("ERROR; Creating sctp_iterator_thread failed\n");
+	}
+#else
+	if ((ret = pthread_create(&sctp_it_ctl.thread_proc, NULL, &sctp_iterator_thread, NULL))) {
+		SCTP_PRINTF("ERROR; return code from sctp_iterator_thread pthread_create() is %d\n", ret);
+	}
+#endif
+#endif
+}
+
+#ifdef INET6
+
+#if defined(__Userspace__)
+/* __Userspace__ TODO. struct in6_ifaddr is defined in sys/netinet6/in6_var.h
+   ip6_use_deprecated is defined as  int ip6_use_deprecated = 1; in /src/sys/netinet6/in6_proto.c
+ */
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+    return; /* stub */
+}
+#else
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+	struct in6_ifaddr *ifa6;
+
+	ifa6 = (struct in6_ifaddr *)ifa->ifa;
+	ifa->flags = ifa6->ia6_flags;
+	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
+		if (ifa->flags &
+		    IN6_IFF_DEPRECATED) {
+			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+		} else {
+			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+		}
+	} else {
+		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+	}
+	if (ifa->flags &
+	    (IN6_IFF_DETACHED |
+	     IN6_IFF_ANYCAST |
+	     IN6_IFF_NOTREADY)) {
+		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+	} else {
+		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+	}
+}
+#endif /* __Userspace__ */
+#endif /* INET6 */
+
+
+#if !defined(__Userspace__)
+static uint32_t
+sctp_is_desired_interface_type(struct ifnet *ifn)
+{
+	int result;
+
+	/* check the interface type to see if it's one we care about */
+#if defined(__APPLE__)
+	switch(ifnet_type(ifn)) {
+#else
+	switch (ifn->if_type) {
+#endif
+	case IFT_ETHER:
+	case IFT_ISO88023:
+	case IFT_ISO88024:
+	case IFT_ISO88025:
+	case IFT_ISO88026:
+	case IFT_STARLAN:
+	case IFT_P10:
+	case IFT_P80:
+	case IFT_HY:
+	case IFT_FDDI:
+	case IFT_XETHER:
+	case IFT_ISDNBASIC:
+	case IFT_ISDNPRIMARY:
+	case IFT_PTPSERIAL:
+	case IFT_OTHER:
+	case IFT_PPP:
+	case IFT_LOOP:
+	case IFT_SLIP:
+	case IFT_GIF:
+	case IFT_L2VLAN:
+	case IFT_STF:
+#if !defined(__APPLE__)
+	case IFT_IP:
+	case IFT_IPOVERCDLC:
+	case IFT_IPOVERCLAW:
+	case IFT_PROPVIRTUAL: /* NetGraph Virtual too */
+	case IFT_VIRTUALIPADDRESS:
+#endif
+		result = 1;
+		break;
+	default:
+		result = 0;
+	}
+
+	return (result);
+}
+#endif
+
+#if defined(__APPLE__)
+int
+sctp_is_vmware_interface(struct ifnet *ifn)
+{
+	return (strncmp(ifnet_name(ifn), "vmnet", 5) == 0);
+}
+#endif
+
+#if defined(__Userspace_os_Windows)
+#ifdef MALLOC
+#undef MALLOC
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#endif
+#ifdef FREE
+#undef FREE
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+#endif
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	struct ifaddrs *ifa;
+	struct sctp_ifa *sctp_ifa;
+	DWORD Err, AdapterAddrsSize;
+	PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapterAddrs6, pAdapt;
+	PIP_ADAPTER_UNICAST_ADDRESS pUnicast;
+
+#ifdef INET
+	AdapterAddrsSize = 0;
+
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTP_PRINTF("GetAdaptersV4Addresses() sizing failed with error code %d\n", Err);
+			SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			return;
+		}
+	}
+
+	/* Allocate memory from sizing information */
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTP_PRINTF("Memory allocation error!\n");
+		return;
+	}
+	/* Get actual adapter information */
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTP_PRINTF("GetAdaptersV4Addresses() failed with error code %d\n", Err);
+		return;
+	}
+	/* Enumerate through each returned adapter and save its information */
+	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+		if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+			ifa = (struct ifaddrs*)malloc(sizeof(struct ifaddrs));
+			ifa->ifa_name = strdup(pAdapt->AdapterName);
+			ifa->ifa_flags = pAdapt->Flags;
+			ifa->ifa_addr = (struct sockaddr *)malloc(sizeof(struct sockaddr_in));
+			if (pAdapt->FirstUnicastAddress) {
+				memcpy(ifa->ifa_addr, pAdapt->FirstUnicastAddress->Address.lpSockaddr, sizeof(struct sockaddr_in));
+
+				sctp_ifa = sctp_add_addr_to_vrf(0,
+				                                ifa,
+				                                pAdapt->IfIndex,
+				                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+				                                ifa->ifa_name,
+				                                (void *)ifa,
+				                                ifa->ifa_addr,
+				                                ifa->ifa_flags,
+				                                	0);
+				if (sctp_ifa) {
+					sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+				}
+			}
+		}
+	}
+	if (pAdapterAddrs)
+		FREE(pAdapterAddrs);
+#endif
+#ifdef INET6
+	if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) {
+		AdapterAddrsSize = 0;
+
+		if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+			if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+				SCTP_PRINTF("GetAdaptersV6Addresses() sizing failed with error code %d\n", Err);
+				SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+				return;
+			}
+		}
+		/* Allocate memory from sizing information */
+		if ((pAdapterAddrs6 = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+			SCTP_PRINTF("Memory allocation error!\n");
+			return;
+		}
+		/* Get actual adapter information */
+		if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, pAdapterAddrs6, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+			SCTP_PRINTF("GetAdaptersV6Addresses() failed with error code %d\n", Err);
+			return;
+		}
+		/* Enumerate through each returned adapter and save its information */
+		for (pAdapt = pAdapterAddrs6; pAdapt; pAdapt = pAdapt->Next) {
+			if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+				for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
+					ifa = (struct ifaddrs*)malloc(sizeof(struct ifaddrs));
+					ifa->ifa_name = strdup(pAdapt->AdapterName);
+					ifa->ifa_flags = pAdapt->Flags;
+					ifa->ifa_addr = (struct sockaddr *)malloc(sizeof(struct sockaddr_in6));
+					memcpy(ifa->ifa_addr, pUnicast->Address.lpSockaddr, sizeof(struct sockaddr_in6));
+					sctp_ifa = sctp_add_addr_to_vrf(0,
+					                                ifa,
+					                                pAdapt->Ipv6IfIndex,
+					                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+					                                ifa->ifa_name,
+					                                (void *)ifa,
+					                                ifa->ifa_addr,
+					                                ifa->ifa_flags,
+					                                0);
+					if (sctp_ifa) {
+						sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+					}
+				}
+			}
+		}
+		if (pAdapterAddrs6)
+			FREE(pAdapterAddrs6);
+	}
+#endif
+}
+#elif defined(__Userspace__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	/* __Userspace__ TODO struct ifaddr is defined in net/if_var.h
+	 * This struct contains struct ifnet, which is also defined in
+	 * net/if_var.h. Currently a zero byte if_var.h file is present for Linux boxes
+	 */
+	int rc;
+	struct ifaddrs *ifa = NULL;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+
+	rc = getifaddrs(&g_interfaces);
+	if (rc != 0) {
+		return;
+	}
+
+	for (ifa = g_interfaces; ifa; ifa = ifa->ifa_next) {
+		if (ifa->ifa_addr == NULL) {
+			continue;
+		}
+		if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+			/* non inet/inet6 skip */
+			continue;
+		}
+		if ((ifa->ifa_addr->sa_family == AF_INET6) &&
+		    IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+			/* skip unspecifed addresses */
+			continue;
+		}
+		if (ifa->ifa_addr->sa_family == AF_INET &&
+		    ((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+			continue;
+		}
+		ifa_flags = 0;
+		sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+		                                ifa,
+		                                if_nametoindex(ifa->ifa_name),
+		                                0,
+		                                ifa->ifa_name,
+		                                (void *)ifa,
+		                                ifa->ifa_addr,
+		                                ifa_flags,
+		                                0);
+		if (sctp_ifa) {
+			sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+		}
+	}
+}
+#endif
+
+#if defined(__APPLE__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	/* Here we must apply ANY locks needed by the
+	 * IFN we access and also make sure we lock
+	 * any IFA that exists as we float through the
+	 * list of IFA's
+	 */
+	struct ifnet **ifnetlist;
+	uint32_t i, j, count;
+	char name[SCTP_IFNAMSIZ];
+	struct ifnet *ifn;
+	struct ifaddr **ifaddrlist;
+	struct ifaddr *ifa;
+	struct in6_ifaddr *ifa6;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+
+	if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+		return;
+	}
+	for (i = 0; i < count; i++) {
+		ifn = ifnetlist[i];
+		if (SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) && sctp_is_vmware_interface(ifn)) {
+			continue;
+		}
+		if (sctp_is_desired_interface_type(ifn) == 0) {
+			/* non desired type */
+			continue;
+		}
+		if (ifnet_get_address_list(ifn, &ifaddrlist) != 0) {
+			continue;
+		}
+		for (j = 0; ifaddrlist[j] != NULL; j++) {
+			ifa = ifaddrlist[j];
+			if (ifa->ifa_addr == NULL) {
+				continue;
+			}
+			if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+				/* non inet/inet6 skip */
+				continue;
+			}
+			if (ifa->ifa_addr->sa_family == AF_INET6) {
+				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+					/* skip unspecifed addresses */
+					continue;
+				}
+			} else {
+				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) {
+					continue;
+				}
+			}
+			if (ifa->ifa_addr->sa_family == AF_INET6) {
+				ifa6 = (struct in6_ifaddr *)ifa;
+				ifa_flags = ifa6->ia6_flags;
+			} else {
+				ifa_flags = 0;
+			}
+			snprintf(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn));
+			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+			                                (void *)ifn,
+			                                ifnet_index(ifn),
+			                                ifnet_type(ifn),
+			                                name,
+			                                (void *)ifa,
+			                                ifa->ifa_addr,
+			                                ifa_flags,
+			                                0);
+			if (sctp_ifa) {
+				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+			}
+		}
+		ifnet_free_address_list(ifaddrlist);
+	}
+	ifnet_list_free(ifnetlist);
+}
+#endif
+
+#if defined(__FreeBSD__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	/* Here we must apply ANY locks needed by the
+	 * IFN we access and also make sure we lock
+	 * any IFA that exists as we float through the
+	 * list of IFA's
+	 */
+	struct ifnet *ifn;
+	struct ifaddr *ifa;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+#ifdef INET6
+	struct in6_ifaddr *ifa6;
+#endif
+
+	IFNET_RLOCK();
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+		if (sctp_is_desired_interface_type(ifn) == 0) {
+			/* non desired type */
+			continue;
+		}
+#if (__FreeBSD_version >= 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000
+		IF_ADDR_RLOCK(ifn);
+#else
+		IF_ADDR_LOCK(ifn);
+#endif
+		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+			if (ifa->ifa_addr == NULL) {
+				continue;
+			}
+			switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+			case AF_INET:
+				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+					/* skip unspecifed addresses */
+					continue;
+				}
+				break;
+#endif
+			default:
+				continue;
+			}
+			switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+			case AF_INET:
+				ifa_flags = 0;
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				ifa6 = (struct in6_ifaddr *)ifa;
+				ifa_flags = ifa6->ia6_flags;
+				break;
+#endif
+			default:
+				ifa_flags = 0;
+				break;
+			}
+			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+			                                (void *)ifn,
+			                                ifn->if_index,
+			                                ifn->if_type,
+			                                ifn->if_xname,
+			                                (void *)ifa,
+			                                ifa->ifa_addr,
+			                                ifa_flags,
+			                                0);
+			if (sctp_ifa) {
+				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+			}
+		}
+#if (__FreeBSD_version >= 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000
+		IF_ADDR_RUNLOCK(ifn);
+#else
+		IF_ADDR_UNLOCK(ifn);
+#endif
+	}
+	IFNET_RUNLOCK();
+}
+#endif
+
+void
+sctp_init_vrf_list(int vrfid)
+{
+	if (vrfid > SCTP_MAX_VRF_ID)
+		/* can't do that */
+		return;
+
+	/* Don't care about return here */
+	(void)sctp_allocate_vrf(vrfid);
+
+	/* Now we need to build all the ifn's
+	 * for this vrf and there addresses
+	 */
+	sctp_init_ifns_for_vrf(vrfid);
+}
+
+void
+sctp_addr_change(struct ifaddr *ifa, int cmd)
+{
+#if defined(__Userspace__)
+        return;
+#else
+	uint32_t ifa_flags = 0;
+	/* BSD only has one VRF, if this changes
+	 * we will need to hook in the right
+	 * things here to get the id to pass to
+	 * the address managment routine.
+	 */
+	if (SCTP_BASE_VAR(first_time) == 0) {
+		/* Special test to see if my ::1 will showup with this */
+		SCTP_BASE_VAR(first_time) = 1;
+		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
+	}
+
+	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
+		/* don't know what to do with this */
+		return;
+	}
+
+	if (ifa->ifa_addr == NULL) {
+		return;
+	}
+	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
+		/* non desired type */
+		return;
+	}
+	switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+			return;
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
+		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+			/* skip unspecifed addresses */
+			return;
+		}
+		break;
+#endif
+	default:
+		/* non inet/inet6 skip */
+		return;
+	}
+	if (cmd == RTM_ADD) {
+		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
+#if defined(__APPLE__)
+		                           ifnet_index(ifa->ifa_ifp), ifnet_type(ifa->ifa_ifp), ifnet_name(ifa->ifa_ifp),
+#else
+		                           ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
+#endif
+		                           (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
+	} else {
+
+		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
+#if defined(__APPLE__)
+		                       ifnet_index(ifa->ifa_ifp),
+		                       ifnet_name(ifa->ifa_ifp));
+#else
+		                       ifa->ifa_ifp->if_index,
+		                       ifa->ifa_ifp->if_xname);
+#endif
+		                      
+		/* We don't bump refcount here so when it completes
+		 * the final delete will happen.
+		 */
+	}
+#endif
+}
+
+#if defined(__FreeBSD__)
+void
+sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
+{
+	struct ifnet *ifn;
+	struct ifaddr *ifa;
+
+	IFNET_RLOCK();
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+		if (!(*pred)(ifn)) {
+			continue;
+		}
+		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
+		}
+	}
+	IFNET_RUNLOCK();
+}
+#endif
+#if defined(__APPLE__)
+void
+sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
+{
+	struct ifnet **ifnetlist;
+	struct ifaddr **ifaddrlist;
+	uint32_t i, j, count;
+
+	if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+		return;
+	}
+	for (i = 0; i < count; i++) {
+		if (!(*pred)(ifnetlist[i])) {
+			continue;
+		}
+		if (ifnet_get_address_list(ifnetlist[i], &ifaddrlist) != 0) {
+			continue;
+		}
+		for (j = 0; ifaddrlist[j] != NULL; j++) {
+			sctp_addr_change(ifaddrlist[j], add ? RTM_ADD : RTM_DELETE);
+		}
+		ifnet_free_address_list(ifaddrlist);
+	}
+	ifnet_list_free(ifnetlist);
+	return;
+}
+#endif
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
+		      int how, int allonebuf, int type)
+{
+    struct mbuf *m = NULL;
+#if defined(__Userspace__)
+
+  /*
+   * __Userspace__
+   * Using m_clget, which creates and mbuf and a cluster and
+   * hooks those together.
+   * TODO: This does not yet have functionality for jumbo packets.
+   *
+   */
+
+	int mbuf_threshold;
+	if (want_header) {
+		MGETHDR(m, how, type);
+	} else {
+		MGET(m, how, type);
+	}
+	if (m == NULL) {
+		return (NULL);
+	}
+	if (allonebuf == 0)
+                mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+	else
+		mbuf_threshold = 1;
+
+
+	if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+		MCLGET(m, how);
+		if (m == NULL) {
+			return (NULL);
+		}
+
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+		  sctp_m_freem(m);
+		  return (NULL);
+		}
+	}
+	SCTP_BUF_LEN(m) = 0;
+	SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
+
+#if defined(__Userspace__)
+	/* __Userspace__
+	 * Check if anything need to be done to ensure logging works
+	 */
+#endif
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		if (SCTP_BUF_IS_EXTENDED(m)) {
+			sctp_log_mb(m, SCTP_MBUF_IALLOC);
+		}
+	}
+#endif
+#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
+	m =  m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
+	if (m == NULL) {
+		/* bad, no memory */
+		return (m);
+	}
+	if (allonebuf) {
+		int siz;
+		if (SCTP_BUF_IS_EXTENDED(m)) {
+			siz = SCTP_BUF_EXTEND_SIZE(m);
+		} else {
+			if (want_header)
+				siz = MHLEN;
+			else
+				siz = MLEN;
+		}
+		if (siz < space_needed) {
+			m_freem(m);
+			return (NULL);
+		}
+	}
+	if (SCTP_BUF_NEXT(m)) {
+		sctp_m_freem( SCTP_BUF_NEXT(m));
+		SCTP_BUF_NEXT(m) = NULL;
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		if (SCTP_BUF_IS_EXTENDED(m)) {
+			sctp_log_mb(m, SCTP_MBUF_IALLOC);
+		}
+	}
+#endif
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 601000
+	int aloc_size;
+	int index = 0;
+#endif
+	int mbuf_threshold;
+	if (want_header) {
+		MGETHDR(m, how, type);
+	} else {
+		MGET(m, how, type);
+	}
+	if (m == NULL) {
+		return (NULL);
+	}
+	if (allonebuf == 0)
+		mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+	else
+		mbuf_threshold = 1;
+
+
+	if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 601000
+	try_again:
+		index = 4;
+		if (space_needed <= MCLBYTES) {
+			aloc_size = MCLBYTES;
+		} else {
+			aloc_size = MJUMPAGESIZE;
+			index = 5;
+		}
+		m_cljget(m, how, aloc_size);
+		if (m == NULL) {
+			return (NULL);
+		}
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+			if ((aloc_size != MCLBYTES) &&
+			   (allonebuf == 0)) {
+				aloc_size -= 10;
+				goto try_again;
+			}
+			sctp_m_freem(m);
+			return (NULL);
+		}
+#else
+		MCLGET(m, how);
+		if (m == NULL) {
+			return (NULL);
+		}
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+			sctp_m_freem(m);
+			return (NULL);
+		}
+#endif
+	}
+	SCTP_BUF_LEN(m) = 0;
+	SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		if (SCTP_BUF_IS_EXTENDED(m)) {
+			sctp_log_mb(m, SCTP_MBUF_IALLOC);
+		}
+	}
+#endif
+#endif
+	return (m);
+}
+
+
+#ifdef SCTP_PACKET_LOGGING
+void
+sctp_packet_log(struct mbuf *m)
+{
+	int *lenat, thisone;
+	void *copyto;
+	uint32_t *tick_tock;
+	int length;
+	int total_len;
+	int grabbed_lock = 0;
+	int value, newval, thisend, thisbegin;
+	/*
+	 * Buffer layout.
+	 * -sizeof this entry (total_len)
+	 * -previous end      (value)
+	 * -ticks of log      (ticks)
+	 * o -ip packet
+	 * o -as logged
+	 * - where this started (thisbegin)
+	 * x <--end points here
+	 */
+	length = SCTP_HEADER_LEN(m);
+	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
+	/* Log a packet to the buffer. */
+	if (total_len> SCTP_PACKET_LOG_SIZE) {
+		/* Can't log this packet I have not a buffer big enough */
+		return;
+	}
+	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
+		return;
+	}
+	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+ try_again:
+	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		SCTP_IP_PKTLOG_LOCK();
+		grabbed_lock = 1;
+	again_locked:
+		value = SCTP_BASE_VAR(packet_log_end);
+		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+		if (newval >= SCTP_PACKET_LOG_SIZE) {
+			/* we wrapped */
+			thisbegin = 0;
+			thisend = total_len;
+		} else {
+			thisbegin = SCTP_BASE_VAR(packet_log_end);
+			thisend = newval;
+		}
+		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+			goto again_locked;
+		}
+	} else {
+		value = SCTP_BASE_VAR(packet_log_end);
+		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+		if (newval >= SCTP_PACKET_LOG_SIZE) {
+			/* we wrapped */
+			thisbegin = 0;
+			thisend = total_len;
+		} else {
+			thisbegin = SCTP_BASE_VAR(packet_log_end);
+			thisend = newval;
+		}
+		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+			goto try_again;
+		}
+	}
+	/* Sanity check */
+	if (thisend >= SCTP_PACKET_LOG_SIZE) {
+		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
+		            thisbegin,
+		            thisend,
+		            SCTP_BASE_VAR(packet_log_writers),
+		            grabbed_lock,
+		            SCTP_BASE_VAR(packet_log_end));
+		SCTP_BASE_VAR(packet_log_end) = 0;
+		goto no_log;
+
+	}
+	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
+	*lenat = total_len;
+	lenat++;
+	*lenat = value;
+	lenat++;
+	tick_tock = (uint32_t *)lenat;
+	lenat++;
+	*tick_tock = sctp_get_tick_count();
+	copyto = (void *)lenat;
+	thisone = thisend - sizeof(int);
+	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
+	*lenat = thisbegin;
+	if (grabbed_lock) {
+		SCTP_IP_PKTLOG_UNLOCK();
+		grabbed_lock = 0;
+	}
+	m_copydata(m, 0, length, (caddr_t)copyto);
+ no_log:
+	if (grabbed_lock) {
+		SCTP_IP_PKTLOG_UNLOCK();
+	}
+	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+}
+
+
+int
+sctp_copy_out_packet_log(uint8_t *target, int length)
+{
+	/* We wind through the packet log starting at
+	 * start copying up to length bytes out.
+	 * We return the number of bytes copied.
+	 */
+	int tocopy, this_copy;
+	int *lenat;
+	int did_delay = 0;
+
+	tocopy = length;
+	if (length < (int)(2 * sizeof(int))) {
+		/* not enough room */
+		return (0);
+	}
+	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
+	again:
+		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
+			/* we delay here for just a moment hoping the writer(s) that were
+			 * present when we entered will have left and we only have
+			 * locking ones that will contend with us for the lock. This
+			 * does not assure 100% access, but its good enough for
+			 * a logging facility like this.
+			 */
+			did_delay = 1;
+			DELAY(10);
+			goto again;
+		}
+	}
+	SCTP_IP_PKTLOG_LOCK();
+	lenat = (int *)target;
+	*lenat = SCTP_BASE_VAR(packet_log_end);
+	lenat++;
+	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
+	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
+	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
+				    SCTP_PKTLOG_WRITERS_NEED_LOCK);
+	}
+	SCTP_IP_PKTLOG_UNLOCK();
+	return (this_copy + sizeof(int));
+}
+
+#endif
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_bsd_addr.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 237540 2012-06-24 21:25:54Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_BSD_ADDR_H_
+#define _NETINET_SCTP_BSD_ADDR_H_
+
+#include <netinet/sctp_pcb.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+extern struct iterator_control sctp_it_ctl;
+void sctp_wakeup_iterator(void);
+
+void sctp_startup_iterator(void);
+
+
+#ifdef INET6
+void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa);
+#endif
+
+#ifdef  SCTP_PACKET_LOGGING
+
+void sctp_packet_log(struct mbuf *m);
+int sctp_copy_out_packet_log(uint8_t *target, int length);
+
+#endif
+
+#if !defined(__Panda__)
+void sctp_addr_change(struct ifaddr *ifa, int cmd);
+#endif
+
+void sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add);
+
+#endif
+#endif
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_callout.c
@@ -0,0 +1,164 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_callout.h>
+#include <netinet/sctp_pcb.h>
+
+/*
+ * Callout/Timer routines for OS that doesn't have them
+ */
+#if defined(__APPLE__) || defined(__Userspace__)
+int ticks = 0;
+#else
+extern int ticks;
+#endif
+
+/*
+ * SCTP_TIMERQ_LOCK protects:
+ * - SCTP_BASE_INFO(callqueue)
+ * - sctp_os_timer_current: current timer in process
+ * - sctp_os_timer_next: next timer to check
+ */
+static sctp_os_timer_t *sctp_os_timer_current = NULL;
+static sctp_os_timer_t *sctp_os_timer_next = NULL;
+
+void
+sctp_os_timer_init(sctp_os_timer_t *c)
+{
+	bzero(c, sizeof(*c));
+}
+
+void
+sctp_os_timer_start(sctp_os_timer_t *c, int to_ticks, void (*ftn) (void *),
+                    void *arg)
+{
+	/* paranoia */
+	if ((c == NULL) || (ftn == NULL))
+	    return;
+
+	SCTP_TIMERQ_LOCK();
+	/* check to see if we're rescheduling a timer */
+	if (c->c_flags & SCTP_CALLOUT_PENDING) {
+		if (c == sctp_os_timer_next) {
+			sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+		}
+		TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+		/*
+		 * part of the normal "stop a pending callout" process
+		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
+		 * flags.  We don't bother since we are setting these
+		 * below and we still hold the lock.
+		 */
+	}
+
+	/*
+	 * We could unlock/splx here and lock/spl at the TAILQ_INSERT_TAIL,
+	 * but there's no point since doing this setup doesn't take much time.
+	 */
+	if (to_ticks <= 0)
+		to_ticks = 1;
+
+	c->c_arg = arg;
+	c->c_flags = (SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+	c->c_func = ftn;
+	c->c_time = ticks + to_ticks;
+	TAILQ_INSERT_TAIL(&SCTP_BASE_INFO(callqueue), c, tqe);
+	SCTP_TIMERQ_UNLOCK();
+}
+
+int
+sctp_os_timer_stop(sctp_os_timer_t *c)
+{
+	SCTP_TIMERQ_LOCK();
+	/*
+	 * Don't attempt to delete a callout that's not on the queue.
+	 */
+	if (!(c->c_flags & SCTP_CALLOUT_PENDING)) {
+		c->c_flags &= ~SCTP_CALLOUT_ACTIVE;
+		SCTP_TIMERQ_UNLOCK();
+		return (0);
+	}
+	c->c_flags &= ~(SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+	if (c == sctp_os_timer_next) {
+		sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+	}
+	TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+	SCTP_TIMERQ_UNLOCK();
+	return (1);
+}
+
+#if defined(__APPLE__)
+/*
+ * For __APPLE__, use a single main timer at a faster resolution than
+ * fastim.  The timer just calls this existing callout infrastructure.
+ */
+#endif
+void
+sctp_timeout(void *arg SCTP_UNUSED)
+{
+	sctp_os_timer_t *c;
+	void (*c_func)(void *);
+	void *c_arg;
+
+#if defined(__APPLE__)
+	/* update our tick count */
+	ticks += SCTP_BASE_VAR(sctp_main_timer_ticks);
+#endif
+
+	SCTP_TIMERQ_LOCK();
+	c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
+	while (c) {
+		if (c->c_time <= ticks) {
+			sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+			TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+			c_func = c->c_func;
+			c_arg = c->c_arg;
+			c->c_flags &= ~SCTP_CALLOUT_PENDING;
+			sctp_os_timer_current = c;
+			SCTP_TIMERQ_UNLOCK();
+			c_func(c_arg);
+			SCTP_TIMERQ_LOCK();
+			sctp_os_timer_current = NULL;
+			c = sctp_os_timer_next;
+		} else {
+			c = TAILQ_NEXT(c, tqe);
+		}
+	}
+	sctp_os_timer_next = NULL;
+	SCTP_TIMERQ_UNLOCK();
+
+#if defined(__APPLE__)
+	/* restart the main timer */
+	sctp_start_main_timer();
+#endif
+}
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_callout.h
@@ -0,0 +1,99 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifndef _NETINET_SCTP_CALLOUT_
+#define _NETINET_SCTP_CALLOUT_
+
+/*
+ * NOTE: the following MACROS are required for locking the callout
+ * queue along with a lock/mutex in the OS specific headers and
+ * implementation files::
+ * - SCTP_TIMERQ_LOCK()
+ * - SCTP_TIMERQ_UNLOCK()
+ * - SCTP_TIMERQ_LOCK_INIT()
+ * - SCTP_TIMERQ_LOCK_DESTROY()
+ */
+
+#define _SCTP_NEEDS_CALLOUT_ 1
+
+#define SCTP_TICKS_PER_FASTTIMO 20	/* called about every 20ms */
+
+#if defined(__Userspace__)
+#if defined(__Userspace_os_Windows)
+#define SCTP_TIMERQ_LOCK()          EnterCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK()        LeaveCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_INIT()     InitializeCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_DESTROY()  DeleteCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#else
+#define SCTP_TIMERQ_LOCK()          (void)pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK()        (void)pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_INIT()     (void)pthread_mutex_init(&SCTP_BASE_VAR(timer_mtx), NULL)
+#define SCTP_TIMERQ_LOCK_DESTROY()  (void)pthread_mutex_destroy(&SCTP_BASE_VAR(timer_mtx))
+#endif
+
+extern int ticks;
+extern void sctp_start_timer();
+#endif
+
+TAILQ_HEAD(calloutlist, sctp_callout);
+
+struct sctp_callout {
+	TAILQ_ENTRY(sctp_callout) tqe;
+	int c_time;		/* ticks to the event */
+	void *c_arg;		/* function argument */
+	void (*c_func)(void *);	/* function to call */
+	int c_flags;		/* state of this entry */
+};
+typedef struct sctp_callout sctp_os_timer_t;
+
+#define	SCTP_CALLOUT_ACTIVE	0x0002	/* callout is currently active */
+#define	SCTP_CALLOUT_PENDING	0x0004	/* callout is waiting for timeout */
+
+void sctp_os_timer_init(sctp_os_timer_t *tmr);
+void sctp_os_timer_start(sctp_os_timer_t *, int, void (*)(void *), void *);
+int sctp_os_timer_stop(sctp_os_timer_t *);
+
+#define SCTP_OS_TIMER_INIT	sctp_os_timer_init
+#define SCTP_OS_TIMER_START	sctp_os_timer_start
+#define SCTP_OS_TIMER_STOP	sctp_os_timer_stop
+/* MT FIXME: Is the following correct? */
+#define SCTP_OS_TIMER_STOP_DRAIN SCTP_OS_TIMER_STOP
+#define	SCTP_OS_TIMER_PENDING(tmr) ((tmr)->c_flags & SCTP_CALLOUT_PENDING)
+#define	SCTP_OS_TIMER_ACTIVE(tmr) ((tmr)->c_flags & SCTP_CALLOUT_ACTIVE)
+#define	SCTP_OS_TIMER_DEACTIVATE(tmr) ((tmr)->c_flags &= ~SCTP_CALLOUT_ACTIVE)
+
+void sctp_timeout(void *);
+
+#endif
new file mode 100755
--- /dev/null
+++ b/netwerk/sctp/src/netinet/sctp_cc_functions.c
@@ -0,0 +1,2483 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 240158 2012-09-06 07:03:56Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+#include <netinet/sctp_dtrace_declare.h>
+#endif
+
+#define SHIFT_MPTCP_MULTI_N 40
+#define SHIFT_MPTCP_MULTI_Z 16
+#define SHIFT_MPTCP_MULTI 8
+
+static void
+sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_association *assoc;
+	uint32_t cwnd_in_mtu;
+
+	assoc = &stcb->asoc;
+	cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+	if (cwnd_in_mtu == 0) {
+		/* Using 0 means that the value of RFC 4960 is used. */
+		net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+	} else {
+		/*
+		 * We take the minimum of the burst limit and the
+		 * initial congestion window.
+		 */
+		if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
+			cwnd_in_mtu = assoc->max_burst;
+		net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+	}
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		/* In case of resource pooling initialize appropriately */
+		net->cwnd /= assoc->numnets;
+		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+			net->cwnd = net->mtu - sizeof(struct sctphdr);
+		}
+	}
+	net->ssthresh = assoc->peers_rwnd;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE(sctp, cwnd, net, init,
+	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+	          0, net->cwnd);
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+	    (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+	}
+}
+
+static void
+sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+                          struct sctp_association *asoc)
+{
+	struct sctp_nets *net;
+	uint32_t t_ssthresh, t_cwnd;
+	uint64_t t_ucwnd_sbw;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	t_ucwnd_sbw = 0;
+	if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			t_ssthresh += net->ssthresh;
+			t_cwnd += net->cwnd;
+			if (net->lastsa > 0) {
+				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
+			}
+		}
+		if (t_ucwnd_sbw == 0) {
+			t_ucwnd_sbw = 1;
+		}
+	}
+
+	/*-
+	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+	 * (net->fast_retran_loss_recovery == 0)))
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if ((asoc->fast_retran_loss_recovery == 0) ||
+		    (asoc->sctp_cmt_on_off > 0)) {
+			/* out of a RFC2582 Fast recovery window? */
+			if (net->net_ack > 0) {
+				/*
+				 * per section 7.2.3, are there any
+				 * destinations that had a fast retransmit
+				 * to them. If so what we need to do is
+				 * adjust ssthresh and cwnd.
+				 */
+				struct sctp_tmit_chunk *lchk;
+				int old_cwnd = net->cwnd;
+
+				if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+				    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
+						net->ssthresh = (uint32_t)(((uint64_t)4 *
+					                                    (uint64_t)net->mtu *
+					                                    (uint64_t)net->ssthresh) /
+						                           (uint64_t)t_ssthresh);
+
+					}
+					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
+						uint32_t srtt;
+
+						srtt = net->lastsa;
+						/* lastsa>>3;  we don't need to devide ...*/
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						/* Short Version => Equal to Contel Version MBe */
+						net->ssthresh = (uint32_t) (((uint64_t)4 *
+						                             (uint64_t)net->mtu *
+						                             (uint64_t)net->cwnd) /
+						                            ((uint64_t)srtt *
+						                             t_ucwnd_sbw));
+									     /* INCREASE FACTOR */;
+					}
+					if ((net->cwnd > t_cwnd / 2) &&
+					    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+						net->ssthresh = net->cwnd - t_cwnd / 2;
+					}
+					if (net->ssthresh < net->mtu) {
+						net->ssthresh = net->mtu;
+					}
+				} else {
+					net->ssthresh = net->cwnd / 2;
+					if (net->ssthresh < (net->mtu * 2)) {
+						net->ssthresh = 2 * net->mtu;
+					}
+				}
+				net->cwnd = net->ssthresh;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				SDT_PROBE(sctp, cwnd, net, fr,
+					  stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+					  old_cwnd, net->cwnd);
+#endif
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+						SCTP_CWND_LOG_FROM_FR);
+				}
+				lchk = TAILQ_FIRST(&asoc->send_queue);
+
+				net->partial_bytes_acked = 0;
+				/* Turn on fast recovery window */
+				asoc->fast_retran_loss_recovery = 1;
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+				}
+
+				/*
+				 * CMT fast recovery -- per destination
+				 * recovery variable.
+				 */
+				net->fast_retran_loss_recovery = 1;
+
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					net->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+				}
+
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+						stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA+SCTP_LOC_32 );
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+						 stcb->sctp_ep, stcb, net);
+			}
+		} else if (net->net_ack > 0) {
+			/*
+			 * Mark a peg that we WOULD have done a cwnd
+			 * reduction but RFC2582 prevented this action.
+			 */
+			SCTP_STAT_INCR(sctps_fastretransinrtt);
+		}
+	}
+}
+
+/* Defines for instantaneous bw decisions */
+#define SCTP_INST_LOOSING 1 /* Loosing to other flows */
+#define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
+#define SCTP_INST_GAINING 3 /* Gaining, step down possible */
+
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
+	   uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw,
+	   uint64_t rtt_offset, uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+	if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
+		/*
+		 * rtt increased
+		 * we don't update bw.. so we don't
+		 * update the rtt either.
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 5 */
+		probepoint |=  ((5 << 16) | 1);
+		SDT_PROBE(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+			if (net->cc_mod.rtcc.last_step_state == 5)
+				net->cc_mod.rtcc.step_cnt++;
+			else
+				net->cc_mod.rtcc.step_cnt = 1;
+			net->cc_mod.rtcc.last_step_state = 5;
+			if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+			    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+			     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+				/* Try a step down */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				oth = net->cc_mod.rtcc.vol_reduce;
+				oth <<= 16;
+				oth |= net->cc_mod.rtcc.step_cnt;
+				oth <<= 16;
+				oth |= net->cc_mod.rtcc.last_step_state;
+				SDT_PROBE(sctp, cwnd, net, rttstep,
+					  vtag,
+					  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+					  oth,
+					  probepoint);
+#endif
+				if (net->cwnd > (4 * net->mtu)) {
+					net->cwnd -= net->mtu;
+					net->cc_mod.rtcc.vol_reduce++;
+				} else {
+					net->cc_mod.rtcc.step_cnt = 0;
+				}
+			}
+		}
+		return (1);
+	}
+	if (net->rtt  < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+		/*
+		 * rtt decreased, there could be more room.
+		 * we update both the bw and the rtt here to
+		 * lock this in as a good step down.
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 6 */
+		probepoint |=  ((6 << 16) | 0);
+		SDT_PROBE(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			if ((net->cc_mod.rtcc.last_step_state == 5) &&
+			    (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
+				/* Step down worked */
+				net->cc_mod.rtcc.step_cnt = 0;
+				return (1);
+			} else {
+				net->cc_mod.rtcc.last_step_state = 6;
+				net->cc_mod.rtcc.step_cnt = 0;
+			}
+		}
+		net->cc_mod.rtcc.lbw = nbw;
+		net->cc_mod.rtcc.lbw_rtt = net->rtt;
+		net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+		if (inst_ind == SCTP_INST_GAINING)
+			return (1);
+		else if (inst_ind == SCTP_INST_NEUTRAL)
+			return (1);
+		else
+			return (0);
+	}
+	/* Ok bw and rtt remained the same .. no update to any
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* Probe point 7 */
+	probepoint |=  ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
+	SDT_PROBE(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+		if (net->cc_mod.rtcc.last_step_state == 5)
+			net->cc_mod.rtcc.step_cnt++;
+		else
+			net->cc_mod.rtcc.step_cnt = 1;
+		net->cc_mod.rtcc.last_step_state = 5;
+		if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+		    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+		     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+			/* Try a step down */
+			if (net->cwnd > (4 * net->mtu)) {
+				net->cwnd -= net->mtu;
+				net->cc_mod.rtcc.vol_reduce++;
+				return (1);
+			} else {
+				net->cc_mod.rtcc.step_cnt = 0;
+			}
+		}
+	}
+	if (inst_ind == SCTP_INST_GAINING)
+		return (1);
+	else if (inst_ind == SCTP_INST_NEUTRAL)
+		return (1);
+	else
+		return ((int)net->cc_mod.rtcc.ret_from_eq);
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+	       uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+	       uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+#endif
+
+	/* Bandwidth decreased.*/
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+	if (net->rtt  > net->cc_mod.rtcc.lbw_rtt+rtt_offset) {
+		/* rtt increased */
+		/* Did we add more */
+		if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
+		    (inst_ind != SCTP_INST_LOOSING)) {
+			/* We caused it maybe.. back off? */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			/* PROBE POINT 1 */
+			probepoint |=  ((1 << 16) | 1);
+			SDT_PROBE(sctp, cwnd, net, rttvar,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  net->flight_size,
+				  probepoint);
+#endif
+			if (net->cc_mod.rtcc.ret_from_eq) {
+				/* Switch over to CA if we are less aggressive */
+				net->ssthresh = net->cwnd-1;
+				net->partial_bytes_acked = 0;
+			}
+			return (1);
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 2 */
+		probepoint |=  ((2 << 16) | 0);
+		SDT_PROBE(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		/* Someone else - fight for more? */
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			/* Did we voluntarily give up some? if so take
+			 * one back please
+			 */
+			if ((net->cc_mod.rtcc.vol_reduce) &&
+			    (inst_ind != SCTP_INST_GAINING)) {
+				net->cwnd += net->mtu;
+				net->cc_mod.rtcc.vol_reduce--;
+			}
+			net->cc_mod.rtcc.last_step_state = 2;
+			net->cc_mod.rtcc.step_cnt = 0;
+		}
+		goto out_decision;
+	} else  if (net->rtt  < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+		/* bw & rtt decreased */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 3 */
+		probepoint |=  ((3 << 16) | 0);
+		SDT_PROBE(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			if ((net->cc_mod.rtcc.vol_reduce) &&
+			    (inst_ind != SCTP_INST_GAINING)) {
+				net->cwnd += net->mtu;
+				net->cc_mod.rtcc.vol_reduce--;
+			}
+			net->cc_mod.rtcc.last_step_state = 3;
+			net->cc_mod.rtcc.step_cnt = 0;
+		}
+		goto out_decision;
+	}
+	/* The bw decreased but rtt stayed the same */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* Probe point 4 */
+	probepoint |=  ((4 << 16) | 0);
+	SDT_PROBE(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		oth = net->cc_mod.rtcc.vol_reduce;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.step_cnt;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.last_step_state;
+		SDT_PROBE(sctp, cwnd, net, rttstep,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  oth,
+			  probepoint);
+#endif
+		if ((net->cc_mod.rtcc.vol_reduce) &&
+		    (inst_ind != SCTP_INST_GAINING)) {
+			net->cwnd += net->mtu;
+			net->cc_mod.rtcc.vol_reduce--;
+		}
+		net->cc_mod.rtcc.last_step_state = 4;
+		net->cc_mod.rtcc.step_cnt = 0;
+	}
+out_decision:
+	net->cc_mod.rtcc.lbw = nbw;
+	net->cc_mod.rtcc.lbw_rtt = net->rtt;
+	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+	if (inst_ind == SCTP_INST_GAINING) {
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
+#else
+static int
+cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+
+#endif
+	/* BW increased, so update and
+	 * return 0, since all actions in
+	 * our table say to do the normal CC
+	 * update. Note that we pay no attention to
+	 * the inst_ind since our overall sum is increasing.
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* PROBE POINT 0 */
+	probepoint = (((uint64_t)net->cwnd) << 32);
+	SDT_PROBE(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		oth = net->cc_mod.rtcc.vol_reduce;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.step_cnt;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.last_step_state;
+		SDT_PROBE(sctp, cwnd, net, rttstep,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  oth,
+			  probepoint);
+#endif
+		net->cc_mod.rtcc.last_step_state = 0;
+		net->cc_mod.rtcc.step_cnt = 0;
+		net->cc_mod.rtcc.vol_reduce = 0;
+	}
+	net->cc_mod.rtcc.lbw = nbw;
+	net->cc_mod.rtcc.lbw_rtt = net->rtt;
+	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+	return (0);
+}
+
+/* RTCC Algoritm to limit growth of cwnd, return
+ * true if you want to NOT allow cwnd growth
+ */
+static int
+cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
+{
+	uint64_t bw_offset, rtt_offset;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t probepoint, rtt, vtag;
+#endif
+	uint64_t bytes_for_this_rtt, inst_bw;
+	uint64_t div, inst_off;
+	int bw_shift;
+	uint8_t inst_ind;
+	int ret;
+	/*-
+	 * Here we need to see if we want
+	 * to limit cwnd growth due to increase
+	 * in overall rtt but no increase in bw.
+	 * We use the following table to figure
+	 * out what we should do. When we return
+	 * 0, cc update goes on as planned. If we
+	 * return 1, then no cc update happens and cwnd
+	 * stays where it is at.
+	 * ----------------------------------
+	 *   BW    |    RTT   | Action
+	 * *********************************
+	 *   INC   |    INC   | return 0
+	 * ----------------------------------
+	 *   INC   |    SAME  | return 0
+	 * ----------------------------------
+	 *   INC   |    DECR  | return 0
+	 * ----------------------------------
+	 *   SAME  |    INC   | return 1
+	 * ----------------------------------
+	 *   SAME  |    SAME  | return 1
+	 * ----------------------------------
+	 *   SAME  |    DECR  | return 0
+	 * ----------------------------------
+	 *   DECR  |    INC   | return 0 or 1 based on if we caused.
+	 * ----------------------------------
+	 *   DECR  |    SAME  | return 0
+	 * ----------------------------------
+	 *   DECR  |    DECR  | return 0
+	 * ----------------------------------
+	 *
+	 * We are a bit fuzz on what an increase or
+	 * decrease is. For BW it is the same if
+	 * it did not change within 1/64th. For
+	 * RTT it stayed the same if it did not
+	 * change within 1/32nd
+	 */
+	bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	rtt = stcb->asoc.my_vtag;
+	vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
+	probepoint = (((uint64_t)net->cwnd) << 32);
+	rtt = net->rtt;
+#endif
+	if (net->cc_mod.rtcc.rtt_set_this_sack) {
+		net->cc_mod.rtcc.rtt_set_this_sack = 0;
+		bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
+		net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+		if (net->rtt) {
+			div = net->rtt / 1000;
+			if (div) {
+				inst_bw = bytes_for_this_rtt / div;
+				inst_off = inst_bw >> bw_shift;
+				if (inst_bw > nbw)
+					inst_ind = SCTP_INST_GAINING;
+				else if ((inst_bw+inst_off) < nbw)
+					inst_ind = SCTP_INST_LOOSING;
+				else
+					inst_ind = SCTP_INST_NEUTRAL;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				probepoint |=  ((0xb << 16) | inst_ind);
+#endif
+			} else {
+				inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
+				/* Can't determine do not change */
+				probepoint |=  ((0xc << 16) | inst_ind);
+#endif
+			}
+		} else {
+			inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			inst_bw = bytes_for_this_rtt;
+			/* Can't determine do not change */
+			probepoint |=  ((0xd << 16) | inst_ind);
+#endif
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((nbw << 32) | inst_bw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+	} else {
+		/* No rtt measurement, use last one */
+		inst_ind = net->cc_mod.rtcc.last_inst_ind;
+	}
+	bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
+	if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		ret = cc_bw_increase(stcb, net, nbw, vtag);
+#else
+		ret = cc_bw_increase(stcb, net, nbw);
+#endif
+		goto out;
+	}
+	rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
+	if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+		ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+		goto out;
+	}
+	/* If we reach here then
+	 * we are in a situation where
+	 * the bw stayed the same.
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+	ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+out:
+	net->cc_mod.rtcc.last_inst_ind = inst_ind;
+	return (ret);
+}
+
+static void
+sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
+				   struct sctp_association *asoc,
+				   int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
+{
+	struct sctp_nets *net;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	int old_cwnd;
+#endif
+	uint32_t t_ssthresh, t_cwnd, incr;
+	uint64_t t_ucwnd_sbw;
+	uint64_t t_path_mptcp;
+	uint64_t mptcp_like_alpha;
+	uint32_t srtt;
+	uint64_t max_path;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	t_ucwnd_sbw = 0;
+	t_path_mptcp = 0;
+	mptcp_like_alpha = 1;
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
+		max_path = 0;
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			t_ssthresh += net->ssthresh;
+			t_cwnd += net->cwnd;
+			/* lastsa>>3;  we don't need to devide ...*/
+			srtt = net->lastsa;
+			if (srtt > 0) {
+				uint64_t tmp;
+
+				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
+				t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
+				                (((uint64_t)net->mtu) * (uint64_t)srtt);
+				tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
+				      ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
+				if (tmp > max_path) {
+					max_path = tmp;
+				}
+			}
+		}
+		if (t_path_mptcp > 0) {
+			mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
+		} else {
+			mptcp_like_alpha = 1;
+		}
+	}
+	if (t_ssthresh == 0) {
+		t_ssthresh = 1;
+	}
+	if (t_ucwnd_sbw == 0) {
+		t_ucwnd_sbw = 1;
+	}
+	/******************************/
+	/* update cwnd and Early FR   */
+	/******************************/
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+		/*
+		 * CMT fast recovery code. Need to debug.
+		 */
+		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+			    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+				net->will_exit_fast_recovery = 1;
+			}
+		}
+#endif
+		/* if nothing was acked on this destination skip it */
+		if (net->net_ack == 0) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+			}
+			continue;
+		}
+#ifdef JANA_CMT_FAST_RECOVERY
+                /* CMT fast recovery code
+		 */
+		/*
+		  if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+		  @@@ Do something
+		  }
+		  else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+		*/
+#endif
+
+		if (asoc->fast_retran_loss_recovery &&
+		    (will_exit == 0) &&
+		    (asoc->sctp_cmt_on_off == 0)) {
+			/*
+			 * If we are in loss recovery we skip any cwnd
+			 * update
+			 */
+			return;
+		}
+		/*
+		 * Did any measurements go on for this network?
+		 */
+		if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
+			uint64_t nbw;
+			/*
+			 * At this point our bw_bytes has been updated
+			 * by incoming sack information.
+			 *
+			 * But our bw may not yet be set.
+			 *
+			 */
+			if ((net->cc_mod.rtcc.new_tot_time/1000) > 0) {
+				nbw = net->cc_mod.rtcc.bw_bytes/(net->cc_mod.rtcc.new_tot_time/1000);
+			} else {
+				nbw = net->cc_mod.rtcc.bw_bytes;
+			}
+			if (net->cc_mod.rtcc.lbw) {
+				if (cc_bw_limit(stcb, net, nbw)) {
+					/* Hold here, no update */
+					continue;
+				}
+			} else {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				uint64_t vtag, probepoint;
+
+				probepoint = (((uint64_t)net->cwnd) << 32);
+				probepoint |=  ((0xa << 16) | 0);
+				vtag = (net->rtt << 32) |
+					(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+					(stcb->rport);
+
+				SDT_PROBE(sctp, cwnd, net, rttvar,
+					  vtag,
+					  nbw,
+					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+					  net->flight_size,
+					  probepoint);
+#endif
+				net->cc_mod.rtcc.lbw = nbw;
+				net->cc_mod.rtcc.lbw_rtt = net->rtt;
+				if (net->cc_mod.rtcc.rtt_set_this_sack) {
+					net->cc_mod.rtcc.rtt_set_this_sack = 0;
+					net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+				}
+			}
+		}
+		/*
+		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+		 * moved.
+		 */
+		if (accum_moved ||
+		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+			/* If the cumulative ack moved we can proceed */
+			if (net->cwnd <= net->ssthresh) {
+				/* We are in slow start */
+				if (net->flight_size + net->net_ack >= net->cwnd) {
+					uint32_t limit;
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					old_cwnd = net->cwnd;
+#endif
+					switch (asoc->sctp_cmt_on_off) {
+					case SCTP_CMT_RPV1:
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+						                    (uint64_t)net->ssthresh) /
+						                   (uint64_t)t_ssthresh);
+						incr = (uint32_t)(((uint64_t)net->net_ack *
+						                   (uint64_t)net->ssthresh) /
+						                  (uint64_t)t_ssthresh);
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_RPV2:
+						/* lastsa>>3;  we don't need to divide ...*/
+						srtt = net->lastsa;
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+						                    (uint64_t)net->cwnd) /
+						                   ((uint64_t)srtt * t_ucwnd_sbw));
+						                   /* INCREASE FACTOR */
+						incr = (uint32_t)(((uint64_t)net->net_ack *
+						                   (uint64_t)net->cwnd) /
+						                  ((uint64_t)srtt * t_ucwnd_sbw));
+						                  /* INCREASE FACTOR */
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_MPTCP:
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    mptcp_like_alpha *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
+						                   SHIFT_MPTCP_MULTI);
+						incr  = (uint32_t)(((uint64_t)net->net_ack *
+						                    mptcp_like_alpha) >>
+						                   SHIFT_MPTCP_MULTI);
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr > net->net_ack) {
+							incr = net->net_ack;
+						}
+						if (incr > net->mtu) {
+							incr = net->mtu;
+						}
+						break;
+					default:
+						incr = net->net_ack;
+						if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
+							incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
+						}
+						break;
+					}
+					net->cwnd += incr;
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+						sctp_log_cwnd(stcb, net, incr,
+						              SCTP_CWND_LOG_FROM_SS);
+					}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					SDT_PROBE(sctp, cwnd, net, ack,
+					          stcb->asoc.my_vtag,
+					          ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+					          net,
+					          old_cwnd, net->cwnd);
+#endif
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							      SCTP_CWND_LOG_NOADV_SS);
+					}
+				}
+			} else {
+				/* We are in congestion avoidance */
+				/*
+				 * Add to pba
+				 */
+			        net->partial_bytes_acked += net->net_ack;
+
+				if ((net->flight_size + net->net_ack >= net->cwnd) &&
+                                    (net->partial_bytes_acked >= net->cwnd)) {
+					net->partial_bytes_acked -= net->cwnd;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					old_cwnd = net->cwnd;
+#endif
+					switch (asoc->sctp_cmt_on_off) {
+					case SCTP_CMT_RPV1:
+						incr = (uint32_t)(((uint64_t)net->mtu *
+						                   (uint64_t)net->ssthresh) /
+						                  (uint64_t)t_ssthresh);
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_RPV2:
+						/* lastsa>>3;  we don't need to divide ... */
+						srtt = net->lastsa;
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						incr = (uint32_t)((uint64_t)net->mtu *
+						                  (uint64_t)net->cwnd /
+						                  ((uint64_t)srtt *
+						                   t_ucwnd_sbw));
+						                  /* INCREASE FACTOR */
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_MPTCP:
+						incr = (uint32_t)((mptcp_like_alpha *
+						                   (uint64_t) net->cwnd) >>
+						                  SHIFT_MPTCP_MULTI);
+						if (incr > net->mtu) {
+							incr = net->mtu;
+						}
+						break;
+					default:
+						incr = net->mtu;
+						break;
+					}
+					net->cwnd += incr;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					SDT_PROBE(sctp, cwnd, net, ack,
+						  stcb->asoc.my_vtag,
+						  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+						  net,
+						  old_cwnd, net->cwnd);
+#endif
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->mtu,
+							      SCTP_CWND_LOG_FROM_CA);
+					}
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							      SCTP_CWND_LOG_NOADV_CA);
+					}
+				}
+			}
+		} else {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->mtu,
+					      SCTP_CWND_LOG_NO_CUMACK);
+			}
+		}
+	}
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
+#else
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	int old_cwnd;
+
+	old_cwnd = net->cwnd;
+#endif
+	net->cwnd = net->mtu;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE(sctp, cwnd, net, ack,
+	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+	          old_cwnd, net->cwnd);
+#endif
+	SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+	        (void *)net, net->cwnd);
+}
+
+
+static void
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	int old_cwnd = net->cwnd;
+	uint32_t t_ssthresh, t_cwnd;
+	uint64_t t_ucwnd_sbw;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		struct sctp_nets *lnet;
+		uint32_t srtt;
+
+		t_ucwnd_sbw = 0;
+		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+			t_ssthresh += lnet->ssthresh;
+			t_cwnd += lnet->cwnd;
+			srtt = lnet->lastsa;
+			/* lastsa>>3;  we don't need to divide ... */
+			if (srtt > 0) {
+				t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
+			}
+		}
+		if (t_ssthresh < 1) {
+			t_ssthresh = 1;
+		}
+		if (t_ucwnd_sbw < 1) {
+			t_ucwnd_sbw = 1;
+		}
+		if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
+			net->ssthresh = (uint32_t)(((uint64_t)4 *
+			                            (uint64_t)net->mtu *
+			                            (uint64_t)net->ssthresh) /
+			                           (uint64_t)t_ssthresh);
+		} else {
+			uint64_t cc_delta;
+
+			srtt = net->lastsa;
+			/* lastsa>>3;  we don't need to divide ... */
+			if (srtt == 0) {
+				srtt = 1;
+			}
+			cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
+			if (cc_delta < t_cwnd) {
+				net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
+			} else {
+				net->ssthresh  = net->mtu;
+			}
+		}
+		if ((net->cwnd > t_cwnd / 2) &&
+		    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+			net->ssthresh = net->cwnd - t_cwnd / 2;
+		}
+		if (net->ssthresh < net->mtu) {
+			net->ssthresh = net->mtu;
+		}
+	} else {
+		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+	}
+	net->cwnd = net->mtu;
+	net->partial_bytes_acked = 0;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE(sctp, cwnd, net, to,
+		  stcb->asoc.my_vtag,
+		  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+		  net,
+		  old_cwnd, net->cwnd);
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+	}
+}
+
+static void
+sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
+					    int in_window, int num_pkt_lost, int use_rtcc)
+{
+	int old_cwnd = net->cwnd;
+	if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
+		/* Data center Congestion Control */
+		if (in_window == 0) {
+			/* Go to CA with the cwnd at the point we sent
+			 * the TSN that was marked with a CE.
+			 */
+			if (net->ecn_prev_cwnd < net->cwnd) {
+				/* Restore to prev cwnd */
+				net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
+			} else {
+				/* Just cut in 1/2 */
+				net->cwnd /= 2;
+			}
+			/* Drop to CA */
+			net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+		} else {
+			/* Further tuning down required over the drastic orginal cut */
+			net->ssthresh -= (net->mtu * num_pkt_lost);
+			net->cwnd -= (net->mtu * num_pkt_lost);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+
+		}
+		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+	}  else {
+		if (in_window == 0) {
+			SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+			net->ssthresh = net->cwnd / 2;
+			if (net->ssthresh < net->mtu) {
+				net->ssthresh = net->mtu;
+				/* here back off the timer as well, to slow us down */
+				net->RTO <<= 1;
+			}
+			net->cwnd = net->ssthresh;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			SDT_PROBE(sctp, cwnd, net, ecn,
+				  stcb->asoc.my_vtag,
+				  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+				  net,
+				  old_cwnd, net->cwnd);
+#endif
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+		}
+	}
+
+}
+
+static void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+	struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+	uint32_t *bottle_bw, uint32_t *on_queue)
+{
+	uint32_t bw_avail;
+	int rtt;
+	unsigned int incr;
+	int old_cwnd = net->cwnd;
+
+	/* need real RTT in msd for this calc */
+	rtt = net->rtt / 1000;
+	/* get bottle neck bw */
+	*bottle_bw = ntohl(cp->bottle_bw);
+	/* and whats on queue */
+	*on_queue = ntohl(cp->current_onq);
+	/*
+	 * adjust the on-queue if our flight is more it could be
+	 * that the router has not yet gotten data "in-flight" to it
+	 */
+	if (*on_queue < net->flight_size)
+		*on_queue = net->flight_size;
+		/* calculate the available space */
+	bw_avail = (*bottle_bw * rtt) / 1000;
+	if (bw_avail > *bottle_bw) {
+		/*
+		 * Cap the growth to no more than the bottle neck.
+		 * This can happen as RTT slides up due to queues.
+		 * It also means if you have more than a 1 second
+		 * RTT with a empty queue you will be limited to the
+		 * bottle_bw per second no matter if other points
+		 * have 1/2 the RTT and you could get more out...
+		 */
+		bw_avail = *bottle_bw;
+	}
+	if (*on_queue > bw_avail) {
+		/*
+		 * No room for anything else don't allow anything
+		 * else to be "added to the fire".
+		 */
+		int seg_inflight, seg_onqueue, my_portion;
+			net->partial_bytes_acked = 0;
+
+		/* how much are we over queue size? */
+		incr = *on_queue - bw_avail;
+		if (stcb->asoc.seen_a_sack_this_pkt) {
+			/*
+			 * undo any cwnd adjustment that the sack
+			 * might have made
+			 */
+			net->cwnd = net->prev_cwnd;
+		}
+		/* Now how much of that is mine? */
+		seg_inflight = net->flight_size / net->mtu;
+		seg_onqueue = *on_queue / net->mtu;
+		my_portion = (incr * seg_inflight) / seg_onqueue;
+
+		/* Have I made an adjustment already */
+		if (net->cwnd > net->flight_size) {
+			/*
+			 * for this flight I made an adjustment we
+			 * need to decrease the portion by a share
+			 * our previous adjustment.
+			 */
+			int diff_adj;
+
+			diff_adj = net->cwnd - net->flight_size;
+			if (diff_adj > my_portion)
+				my_portion = 0;
+			else
+				my_portion -= diff_adj;
+		}
+		/*
+		 * back down to the previous cwnd (assume we have
+		 * had a sack before this packet). minus what ever
+		 * portion of the overage is my fault.
+		 */
+		net->cwnd -= my_portion;
+
+		/* we will NOT back down more than 1 MTU */
+		if (net->cwnd <= net->mtu) {
+			net->cwnd = net->mtu;
+		}
+		/* force into CA */
+		net->ssthresh = net->cwnd - 1;
+	} else {
+		/*
+		 * Take 1/4 of the space left or max burst up ..
+		 * whichever is less.
+		 */
+		incr = (bw_avail - *on_queue) >> 2;
+		if ((stcb->asoc.max_burst > 0) &&
+		    (stcb->asoc.max_burst * net->mtu < incr)) {
+			incr = stcb->asoc.max_burst * net->mtu;
+		}
+		net->cwnd += incr;
+	}
+	if (net->cwnd > bw_avail) {
+		/* We can't exceed the pipe size */
+		net->cwnd = bw_avail;
+	}
+	if (net->cwnd < net->mtu) {
+		/* We always have 1 MTU */
+		net->cwnd = net->mtu;
+	}
+
+	if (net->cwnd - old_cwnd != 0) {
+		/* log only changes */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE(sctp, cwnd, net, pd,
+			  stcb->asoc.my_vtag,
+			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+			  net,
+			  old_cwnd, net->cwnd);
+#endif
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+				SCTP_CWND_LOG_FROM_SAT);
+		}
+	}
+}
+
+static void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+			      struct sctp_nets *net, int burst_limit)
+{
+	int old_cwnd = net->cwnd;
+
+	if (net->ssthresh < net->cwnd)
+		net->ssthresh = net->cwnd;
+	if (burst_limit) {
+		net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE(sctp, cwnd, net, bl,
+			  stcb->asoc.my_vtag,
+			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+			  net,
+			  old_cwnd, net-