Merge mozilla-inbound to mozilla-central. r=merge a=merge
authorshindli <shindli@mozilla.com>
Thu, 30 Nov 2017 12:01:52 +0200
changeset 394296 84d925e10c0d939865fd9e92219bddfc9d4cd5c2
parent 394273 1762ac4166c92a39fc8eaac2f77fd346ab9a7da3 (current diff)
parent 394295 84bcb5742e0c0d8d49c8c9605405a5e1dce671e0 (diff)
child 394297 1f9c36a7909a15536b6faa403a85c9e3b912af30
push id32998
push usershindli@mozilla.com
push dateThu, 30 Nov 2017 10:02:29 +0000
treeherdermozilla-central@84d925e10c0d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone59.0a1
first release with
nightly linux32
84d925e10c0d / 59.0a1 / 20171130101246 / files
nightly linux64
84d925e10c0d / 59.0a1 / 20171130101246 / files
nightly mac
84d925e10c0d / 59.0a1 / 20171130101246 / files
nightly win32
84d925e10c0d / 59.0a1 / 20171130101246 / files
nightly win64
84d925e10c0d / 59.0a1 / 20171130101246 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-inbound to mozilla-central. r=merge a=merge
js/src/gc/DeletePolicy.h
testing/web-platform/meta/secure-contexts/basic-popup-and-iframe-tests.html.ini
--- a/browser/app/macbuild/Contents/Info.plist.in
+++ b/browser/app/macbuild/Contents/Info.plist.in
@@ -233,16 +233,14 @@
   <true/>
   <key>NSPrincipalClass</key>
   <string>GeckoNSApplication</string>
 	<key>SMPrivilegedExecutables</key>
 	<dict>
 		<key>org.mozilla.updater</key>
 		<string>identifier "org.mozilla.updater" and ((anchor apple generic and certificate leaf[field.1.2.840.113635.100.6.1.9]) or (anchor apple generic and certificate 1[field.1.2.840.113635.100.6.2.6] and certificate leaf[field.1.2.840.113635.100.6.1.13] and certificate leaf[subject.OU] = "43AQ936H96"))</string>
 	</dict>
-  <key>NSDisablePersistence</key>
-  <true/>
   <key>MozillaDeveloperRepoPath</key>
   <string>%MOZ_DEVELOPER_REPO_PATH%</string>
   <key>MozillaDeveloperObjPath</key>
   <string>%MOZ_DEVELOPER_OBJ_PATH%</string>
 </dict>
 </plist>
--- a/dom/base/nsDocument.cpp
+++ b/dom/base/nsDocument.cpp
@@ -10766,17 +10766,17 @@ nsIDocument::IsPotentiallyScrollable(HTM
   return true;
 }
 
 Element*
 nsIDocument::GetScrollingElement()
 {
   // Keep this in sync with IsScrollingElement.
   if (GetCompatibilityMode() == eCompatibility_NavQuirks) {
-    HTMLBodyElement* body = GetBodyElement();
+    RefPtr<HTMLBodyElement> body = GetBodyElement();
     if (body && !IsPotentiallyScrollable(body)) {
       return body;
     }
 
     return nullptr;
   }
 
   return GetRootElement();
@@ -10787,25 +10787,27 @@ nsIDocument::IsScrollingElement(Element*
 {
   // Keep this in sync with GetScrollingElement.
   MOZ_ASSERT(aElement);
 
   if (GetCompatibilityMode() != eCompatibility_NavQuirks) {
     return aElement == GetRootElement();
   }
 
+  // In the common case when aElement != body, avoid refcounting.
   HTMLBodyElement* body = GetBodyElement();
   if (aElement != body) {
     return false;
   }
 
   // Now we know body is non-null, since aElement is not null.  It's the
   // scrolling element for the document if it itself is not potentially
   // scrollable.
-  return !IsPotentiallyScrollable(body);
+  RefPtr<HTMLBodyElement> strongBody(body);
+  return !IsPotentiallyScrollable(strongBody);
 }
 
 void
 nsIDocument::ObsoleteSheet(nsIURI *aSheetURI, ErrorResult& rv)
 {
   nsresult res = CSSLoader()->ObsoleteSheet(aSheetURI);
   if (NS_FAILED(res)) {
     rv.Throw(res);
--- a/dom/base/nsGlobalWindowInner.cpp
+++ b/dom/base/nsGlobalWindowInner.cpp
@@ -859,17 +859,16 @@ nsGlobalWindowInner::IsBackgroundInterna
 //*****************************************************************************
 
 nsGlobalWindowInner::nsGlobalWindowInner(nsGlobalWindowOuter *aOuterWindow)
   : nsPIDOMWindowInner(aOuterWindow->AsOuter()),
     mIdleFuzzFactor(0),
     mIdleCallbackIndex(-1),
     mCurrentlyIdle(false),
     mAddActiveEventFuzzTime(true),
-    mIsSecureContextIfOpenerIgnored(false),
     mWasOffline(false),
     mHasHadSlowScript(false),
     mNotifyIdleObserversIdleOnThaw(false),
     mNotifyIdleObserversActiveOnThaw(false),
     mIsChrome(false),
     mCleanMessageManager(false),
     mNeedsFocus(true),
     mHasFocus(false),
@@ -2236,22 +2235,16 @@ nsPIDOMWindowInner::CreatePerformanceObj
 }
 
 bool
 nsPIDOMWindowInner::IsSecureContext() const
 {
   return nsGlobalWindowInner::Cast(this)->IsSecureContext();
 }
 
-bool
-nsPIDOMWindowInner::IsSecureContextIfOpenerIgnored() const
-{
-  return nsGlobalWindowInner::Cast(this)->IsSecureContextIfOpenerIgnored();
-}
-
 void
 nsPIDOMWindowInner::Suspend()
 {
   nsGlobalWindowInner::Cast(this)->Suspend();
 }
 
 void
 nsPIDOMWindowInner::Resume()
@@ -7234,22 +7227,16 @@ nsGlobalWindowInner::GetConsole(ErrorRes
 }
 
 bool
 nsGlobalWindowInner::IsSecureContext() const
 {
   return JS_GetIsSecureContext(js::GetObjectCompartment(GetWrapperPreserveColor()));
 }
 
-bool
-nsGlobalWindowInner::IsSecureContextIfOpenerIgnored() const
-{
-  return mIsSecureContextIfOpenerIgnored;
-}
-
 already_AddRefed<External>
 nsGlobalWindowInner::GetExternal(ErrorResult& aRv)
 {
 #ifdef HAVE_SIDEBAR
   if (!mExternal) {
     AutoJSContext cx;
     JS::Rooted<JSObject*> jsImplObj(cx);
     ConstructJSImplementation("@mozilla.org/sidebar;1", this, &jsImplObj, aRv);
--- a/dom/base/nsGlobalWindowInner.h
+++ b/dom/base/nsGlobalWindowInner.h
@@ -680,17 +680,16 @@ public:
 #if defined(MOZ_WIDGET_ANDROID)
   int16_t Orientation(mozilla::dom::CallerType aCallerType) const;
 #endif
 
   mozilla::dom::Console* GetConsole(mozilla::ErrorResult& aRv);
 
   // https://w3c.github.io/webappsec-secure-contexts/#dom-window-issecurecontext
   bool IsSecureContext() const;
-  bool IsSecureContextIfOpenerIgnored() const;
 
   void GetSidebar(mozilla::dom::OwningExternalOrWindowProxy& aResult,
                   mozilla::ErrorResult& aRv);
   already_AddRefed<mozilla::dom::External> GetExternal(mozilla::ErrorResult& aRv);
 
   // Exposed only for testing
   static bool
   TokenizeDialogOptions(nsAString& aToken, nsAString::const_iterator& aIter,
@@ -1252,17 +1251,16 @@ public:
   void ScheduleIdleRequestDispatch();
   void SuspendIdleRequests();
   void ResumeIdleRequests();
 
   typedef mozilla::LinkedList<RefPtr<mozilla::dom::IdleRequest>> IdleRequests;
   void RemoveIdleCallback(mozilla::dom::IdleRequest* aRequest);
 
 protected:
-  bool                          mIsSecureContextIfOpenerIgnored : 1;
 
   // Window offline status. Checked to see if we need to fire offline event
   bool                          mWasOffline : 1;
 
   // Represents whether the inner window's page has had a slow script notice.
   // Only used by inner windows; will always be false for outer windows.
   // This is used to implement Telemetry measures such as SLOW_SCRIPT_PAGE_COUNT.
   bool                          mHasHadSlowScript : 1;
--- a/dom/base/nsGlobalWindowOuter.cpp
+++ b/dom/base/nsGlobalWindowOuter.cpp
@@ -832,17 +832,16 @@ nsGlobalWindowOuter::nsGlobalWindowOuter
     mCurrentlyIdle(false),
     mAddActiveEventFuzzTime(true),
     mFullScreen(false),
     mFullscreenMode(false),
     mIsClosed(false),
     mInClose(false),
     mHavePendingClose(false),
     mHadOriginalOpener(false),
-    mOriginalOpenerWasSecureContext(false),
     mIsPopupSpam(false),
     mBlockScriptedClosingFlag(false),
     mWasOffline(false),
     mCreatingInnerWindow(false),
     mIsChrome(false),
     mAllowScriptsToClose(false),
     mTopLevelOuterContentWindow(false),
     mSerial(0),
@@ -1488,25 +1487,17 @@ nsGlobalWindowOuter::ComputeIsSecureCont
     nsGlobalWindowInner* parentWin =
       nsGlobalWindowInner::Cast(creatorDoc->GetInnerWindow());
     if (!parentWin) {
       return false; // we must be tearing down
     }
     MOZ_ASSERT(parentWin ==
                nsGlobalWindowInner::Cast(parentOuterWin->GetCurrentInnerWindow()),
                "Creator window mismatch while setting Secure Context state");
-    if (aFlags != SecureContextFlags::eIgnoreOpener) {
-      hadNonSecureContextCreator = !parentWin->IsSecureContext();
-    } else {
-      hadNonSecureContextCreator = !parentWin->IsSecureContextIfOpenerIgnored();
-    }
-  } else if (mHadOriginalOpener) {
-    if (aFlags != SecureContextFlags::eIgnoreOpener) {
-      hadNonSecureContextCreator = !mOriginalOpenerWasSecureContext;
-    }
+    hadNonSecureContextCreator = !parentWin->IsSecureContext();
   }
 
   if (hadNonSecureContextCreator) {
     return false;
   }
 
   if (nsContentUtils::HttpsStateIsModern(aDocument)) {
     return true;
@@ -1854,18 +1845,16 @@ nsGlobalWindowOuter::SetNewDocument(nsID
       rv = CreateNativeGlobalForInner(cx, newInnerWindow,
                                       aDocument->GetDocumentURI(),
                                       aDocument->NodePrincipal(),
                                       &newInnerGlobal,
                                       ComputeIsSecureContext(aDocument));
       NS_ASSERTION(NS_SUCCEEDED(rv) && newInnerGlobal &&
                    newInnerWindow->GetWrapperPreserveColor() == newInnerGlobal,
                    "Failed to get script global");
-      newInnerWindow->mIsSecureContextIfOpenerIgnored =
-        ComputeIsSecureContext(aDocument, SecureContextFlags::eIgnoreOpener);
 
       mCreatingInnerWindow = false;
       createdInnerWindow = true;
 
       NS_ENSURE_SUCCESS(rv, rv);
     }
 
     if (currentInner && currentInner->GetWrapperPreserveColor()) {
@@ -2303,18 +2292,16 @@ nsGlobalWindowOuter::SetOpenerWindow(nsP
   mozilla::Unused << contentOpener;
   MOZ_DIAGNOSTIC_ASSERT(!contentOpener || !mTabGroup ||
     mTabGroup == nsGlobalWindowOuter::Cast(contentOpener)->mTabGroup);
 
   if (aOriginalOpener) {
     MOZ_ASSERT(!mHadOriginalOpener,
                "Probably too late to call ComputeIsSecureContext again");
     mHadOriginalOpener = true;
-    mOriginalOpenerWasSecureContext =
-      aOpener->GetCurrentInnerWindow()->IsSecureContext();
   }
 
 #ifdef DEBUG
   mSetOpenerWindowCalled = true;
 #endif
 }
 
 void
--- a/dom/base/nsGlobalWindowOuter.h
+++ b/dom/base/nsGlobalWindowOuter.h
@@ -1067,17 +1067,16 @@ protected:
   bool                          mFullscreenMode : 1;
   bool                          mIsClosed : 1;
   bool                          mInClose : 1;
   // mHavePendingClose means we've got a termination function set to
   // close us when the JS stops executing or that we have a close
   // event posted.  If this is set, just ignore window.close() calls.
   bool                          mHavePendingClose : 1;
   bool                          mHadOriginalOpener : 1;
-  bool                          mOriginalOpenerWasSecureContext : 1;
   bool                          mIsPopupSpam : 1;
 
   // Indicates whether scripts are allowed to close this window.
   bool                          mBlockScriptedClosingFlag : 1;
 
   // Window offline status. Checked to see if we need to fire offline event
   bool                          mWasOffline : 1;
 
--- a/dom/geolocation/nsGeolocation.cpp
+++ b/dom/geolocation/nsGeolocation.cpp
@@ -1202,17 +1202,17 @@ Geolocation::ShouldBlockInsecureRequests
     return false;
   }
 
   nsCOMPtr<nsIDocument> doc = win->GetDoc();
   if (!doc) {
     return false;
   }
 
-  if (!nsGlobalWindowInner::Cast(win)->IsSecureContextIfOpenerIgnored()) {
+  if (!nsGlobalWindowInner::Cast(win)->IsSecureContext()) {
     nsContentUtils::ReportToConsole(nsIScriptError::errorFlag,
                                     NS_LITERAL_CSTRING("DOM"), doc,
                                     nsContentUtils::eDOM_PROPERTIES,
                                     "GeolocationInsecureRequestIsForbidden");
     return true;
   }
 
   return false;
--- a/dom/webidl/Window.webidl
+++ b/dom/webidl/Window.webidl
@@ -486,29 +486,16 @@ partial interface Window {
 };
 
 dictionary IdleRequestOptions {
   unsigned long timeout;
 };
 
 callback IdleRequestCallback = void (IdleDeadline deadline);
 
-/**
- * Similar to |isSecureContext|, but doesn't pay attention to whether the
- * window's opener (if any) is a secure context or not.
- *
- * WARNING: Do not use this unless you are familiar with the issues that
- * taking opener state into account is designed to address (or else you may
- * introduce security issues).  If in doubt, use |isSecureContext|.  In
- * particular do not use this to gate access to JavaScript APIs.
- */
-partial interface Window {
-  [ChromeOnly] readonly attribute boolean isSecureContextIfOpenerIgnored;
-};
-
 partial interface Window {
   /**
    * Returns a list of locales that the internationalization components
    * should be localized to.
    *
    * The function name refers to Regional Preferences which can be either
    * fetched from the internal internationalization database (CLDR), or
    * from the host environment.
--- a/dom/xml/nsXMLContentSink.cpp
+++ b/dom/xml/nsXMLContentSink.cpp
@@ -147,16 +147,17 @@ NS_IMPL_CYCLE_COLLECTION_CLASS(nsXMLCont
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(nsXMLContentSink,
                                                   nsContentSink)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mCurrentHead)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDocElement)
   for (uint32_t i = 0, count = tmp->mContentStack.Length(); i < count; i++) {
     const StackNode& node = tmp->mContentStack.ElementAt(i);
     cb.NoteXPCOMChild(node.mContent);
   }
+  NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDocumentChildren)
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 
 // nsIContentSink
 NS_IMETHODIMP
 nsXMLContentSink::WillParse(void)
 {
   return WillParseImpl();
 }
@@ -288,18 +289,17 @@ nsXMLContentSink::DidBuildModel(bool aTe
         CheckXSLTParamPI(pi, mXSLTProcessor, mDocument);
       }
       else if (child->IsElement()) {
         // Only honor PIs in the prolog
         break;
       }
     }
 
-    nsCOMPtr<nsIDOMDocument> currentDOMDoc(do_QueryInterface(mDocument));
-    mXSLTProcessor->SetSourceContentModel(currentDOMDoc);
+    mXSLTProcessor->SetSourceContentModel(mDocument, mDocumentChildren);
     // Since the processor now holds a reference to us we drop our reference
     // to it to avoid owning cycles
     mXSLTProcessor = nullptr;
   }
   else {
     // Kick off layout for non-XSLT transformed documents.
 
     // Check if we want to prettyprint
@@ -353,48 +353,38 @@ nsXMLContentSink::OnDocumentCreated(nsID
   }
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsXMLContentSink::OnTransformDone(nsresult aResult,
                                   nsIDocument* aResultDocument)
 {
-  NS_ASSERTION(NS_FAILED(aResult) || aResultDocument,
-               "Don't notify about transform success without a document.");
+  MOZ_ASSERT(aResultDocument, "Don't notify about transform end without a document.");
+
+  mDocumentChildren.Clear();
 
   nsCOMPtr<nsIDOMDocument> domDoc = do_QueryInterface(aResultDocument);
 
   nsCOMPtr<nsIContentViewer> contentViewer;
   mDocShell->GetContentViewer(getter_AddRefs(contentViewer));
 
   if (NS_FAILED(aResult) && contentViewer) {
     // Transform failed.
-    if (domDoc) {
-      aResultDocument->SetMayStartLayout(false);
-      // We have an error document.
-      contentViewer->SetDOMDocument(domDoc);
-    }
-    else {
-      // We don't have an error document, display the
-      // untransformed source document.
-      nsCOMPtr<nsIDOMDocument> document = do_QueryInterface(mDocument);
-      contentViewer->SetDOMDocument(document);
-    }
+    aResultDocument->SetMayStartLayout(false);
+    // We have an error document.
+    contentViewer->SetDOMDocument(domDoc);
   }
 
   nsCOMPtr<nsIDocument> originalDocument = mDocument;
-  if (NS_SUCCEEDED(aResult) || aResultDocument) {
-    // Transform succeeded or it failed and we have an error
-    // document to display.
-    mDocument = aResultDocument;
-    nsCOMPtr<nsIHTMLDocument> htmlDoc = do_QueryInterface(mDocument);
-    if (htmlDoc) {
-      htmlDoc->SetDocWriteDisabled(false);
-    }
+  // Transform succeeded, or it failed and we have an error document to display.
+  mDocument = aResultDocument;
+  nsCOMPtr<nsIHTMLDocument> htmlDoc = do_QueryInterface(mDocument);
+  if (htmlDoc) {
+    htmlDoc->SetDocWriteDisabled(false);
   }
 
   // Notify document observers that all the content has been stuck
   // into the document.
   // XXX do we need to notify for things like PIs?  Or just the
   // documentElement?
   nsIContent *rootElement = mDocument->GetRootElement();
   if (rootElement) {
@@ -610,22 +600,27 @@ nsXMLContentSink::CloseElement(nsIConten
   return rv;
 }
 
 nsresult
 nsXMLContentSink::AddContentAsLeaf(nsIContent *aContent)
 {
   nsresult result = NS_OK;
 
-  if ((eXMLContentSinkState_InProlog == mState) ||
-      (eXMLContentSinkState_InEpilog == mState)) {
-    NS_ASSERTION(mDocument, "Fragments have no prolog or epilog");
-    mDocument->AppendChildTo(aContent, false);
-  }
-  else {
+  if (mState == eXMLContentSinkState_InProlog) {
+    NS_ASSERTION(mDocument, "Fragments have no prolog");
+    mDocumentChildren.AppendElement(aContent);
+  } else if (mState == eXMLContentSinkState_InEpilog) {
+    NS_ASSERTION(mDocument, "Fragments have no epilog");
+    if (mXSLTProcessor) {
+      mDocumentChildren.AppendElement(aContent);
+    } else {
+      mDocument->AppendChildTo(aContent, false);
+    }
+  } else {
     nsCOMPtr<nsIContent> parent = GetCurrentContent();
 
     if (parent) {
       result = parent->AppendChildTo(aContent, false);
     }
   }
   return result;
 }
@@ -870,16 +865,30 @@ nsXMLContentSink::MaybeStartLayout(bool 
 bool
 nsXMLContentSink::SetDocElement(int32_t aNameSpaceID,
                                 nsAtom* aTagName,
                                 nsIContent *aContent)
 {
   if (mDocElement)
     return false;
 
+  mDocElement = aContent;
+
+  if (mXSLTProcessor) {
+    mDocumentChildren.AppendElement(aContent);
+    return true;
+  }
+
+  if (!mDocumentChildren.IsEmpty()) {
+    for (nsIContent* child : mDocumentChildren) {
+      mDocument->AppendChildTo(child, false);
+    }
+    mDocumentChildren.Clear();
+  }
+
   // check for root elements that needs special handling for
   // prettyprinting
   if ((aNameSpaceID == kNameSpaceID_XBL &&
        aTagName == nsGkAtoms::bindings) ||
       (aNameSpaceID == kNameSpaceID_XSLT &&
        (aTagName == nsGkAtoms::stylesheet ||
         aTagName == nsGkAtoms::transform))) {
     mPrettyPrintHasSpecialRoot = true;
@@ -888,17 +897,16 @@ nsXMLContentSink::SetDocElement(int32_t 
       // loading, and auto XLinks since we plan to prettyprint.
       mDocument->ScriptLoader()->SetEnabled(false);
       if (mCSSLoader) {
         mCSSLoader->SetEnabled(false);
       }
     }
   }
 
-  mDocElement = aContent;
   nsresult rv = mDocument->AppendChildTo(mDocElement, NotifyForDocElement());
   if (NS_FAILED(rv)) {
     // If we return false here, the caller will bail out because it won't
     // find a parent content node to append to, which is fine.
     return false;
   }
 
   if (aTagName == nsGkAtoms::html &&
@@ -996,27 +1004,27 @@ nsXMLContentSink::HandleStartElement(con
       mCurrentHead = content;
     }
   }
 
   if (IsMonolithicContainer(nodeInfo)) {
     mInMonolithicContainer++;
   }
 
-  if (content != mDocElement && !mCurrentHead) {
-    // This isn't the root and we're not inside an XHTML <head>.
-    // Might need to start layout
-    MaybeStartLayout(false);
-  }
+  if (!mXSLTProcessor) {
+    if (content == mDocElement) {
+      NotifyDocElementCreated(mDocument);
 
-  if (content == mDocElement) {
-    NotifyDocElementCreated(mDocument);
-
-    if (aInterruptable && NS_SUCCEEDED(result) && mParser && !mParser->IsParserEnabled()) {
-      return NS_ERROR_HTMLPARSER_BLOCK;
+      if (aInterruptable && NS_SUCCEEDED(result) && mParser && !mParser->IsParserEnabled()) {
+        return NS_ERROR_HTMLPARSER_BLOCK;
+      }
+    } else if (!mCurrentHead) {
+      // This isn't the root and we're not inside an XHTML <head>.
+      // Might need to start layout
+      MaybeStartLayout(false);
     }
   }
 
   return aInterruptable && NS_SUCCEEDED(result) ? DidProcessATokenImpl() :
                                                   result;
 }
 
 NS_IMETHODIMP
@@ -1170,19 +1178,19 @@ nsXMLContentSink::HandleDoctypeDecl(cons
   }
 
   MOZ_ASSERT(!aCatalogData, "Need to add back support for catalog style "
                             "sheets");
 
   nsCOMPtr<nsIContent> content = do_QueryInterface(docType);
   NS_ASSERTION(content, "doctype isn't content?");
 
-  rv = mDocument->AppendChildTo(content, false);
+  mDocumentChildren.AppendElement(content);
   DidAddContent();
-  return NS_SUCCEEDED(rv) ? DidProcessATokenImpl() : rv;
+  return DidProcessATokenImpl();
 }
 
 NS_IMETHODIMP
 nsXMLContentSink::HandleCharacterData(const char16_t *aData,
                                       uint32_t aLength)
 {
   return HandleCharacterData(aData, aLength, true);
 }
@@ -1322,16 +1330,17 @@ nsXMLContentSink::ReportError(const char
 
   // XXX need to stop scripts here -- hsivonen
 
   // stop observing in order to avoid crashing when removing content
   mDocument->RemoveObserver(this);
   mIsDocumentObserver = false;
 
   // Clear the current content
+  mDocumentChildren.Clear();
   nsCOMPtr<nsIDOMNode> node(do_QueryInterface(mDocument));
   if (node) {
     for (;;) {
       nsCOMPtr<nsIDOMNode> child, dummy;
       node->GetLastChild(getter_AddRefs(child));
       if (!child)
         break;
       node->RemoveChild(child, getter_AddRefs(dummy));
--- a/dom/xml/nsXMLContentSink.h
+++ b/dom/xml/nsXMLContentSink.h
@@ -134,17 +134,17 @@ protected:
   bool HaveNotifiedForCurrentContent() const;
 
   nsresult FlushTags() override;
 
   void UpdateChildCounts() override;
 
   void DidAddContent()
   {
-    if (IsTimeToNotify()) {
+    if (!mXSLTProcessor && IsTimeToNotify()) {
       FlushTags();
     }
   }
 
   // nsContentSink override
   virtual nsresult ProcessStyleLink(nsIContent* aElement,
                                     const nsAString& aHref,
                                     bool aAlternate,
@@ -186,14 +186,20 @@ protected:
                                 // decided we should in fact prettyprint.
   // True to call prevent script execution in the fragment mode.
   uint8_t mPreventScriptExecution : 1;
 
   nsTArray<StackNode>              mContentStack;
 
   nsCOMPtr<nsIDocumentTransformer> mXSLTProcessor;
 
+  // Holds the children in the prolog until the root element is added, after which they're
+  // inserted in the document. However, if we're doing an XSLT transform this will
+  // actually hold all the children of the source document, until the transform is
+  // finished. After the transform is finished we'll just discard the children. 
+  nsTArray<nsCOMPtr<nsIContent>> mDocumentChildren;
+
   static const int NS_ACCUMULATION_BUFFER_SIZE = 4096;
   // Our currently accumulated text that we have not flushed to a textnode yet.
   char16_t mText[NS_ACCUMULATION_BUFFER_SIZE];
 };
 
 #endif // nsXMLContentSink_h__
--- a/dom/xslt/base/txURIUtils.cpp
+++ b/dom/xslt/base/txURIUtils.cpp
@@ -38,26 +38,19 @@ void URIUtils::resolveHref(const nsAStri
     if (NS_SUCCEEDED(result)) {
         NS_MakeAbsoluteURI(resultHref, href, pURL);
         dest.Append(resultHref);
     }
 } //-- resolveHref
 
 // static
 void
-URIUtils::ResetWithSource(nsIDocument *aNewDoc, nsIDOMNode *aSourceNode)
+URIUtils::ResetWithSource(nsIDocument *aNewDoc, nsINode *aSourceNode)
 {
-    nsCOMPtr<nsINode> node = do_QueryInterface(aSourceNode);
-    if (!node) {
-        // XXXbz passing nullptr as the first arg to Reset is illegal
-        aNewDoc->Reset(nullptr, nullptr);
-        return;
-    }
-
-    nsCOMPtr<nsIDocument> sourceDoc = node->OwnerDoc();
+    nsCOMPtr<nsIDocument> sourceDoc = aSourceNode->OwnerDoc();
     nsIPrincipal* sourcePrincipal = sourceDoc->NodePrincipal();
 
     // Copy the channel and loadgroup from the source document.
     nsCOMPtr<nsILoadGroup> loadGroup = sourceDoc->GetDocumentLoadGroup();
     nsCOMPtr<nsIChannel> channel = sourceDoc->GetChannel();
     if (!channel) {
         // Need to synthesize one
         nsresult rv = NS_NewChannel(getter_AddRefs(channel),
--- a/dom/xslt/base/txURIUtils.h
+++ b/dom/xslt/base/txURIUtils.h
@@ -4,30 +4,30 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef TRANSFRMX_URIUTILS_H
 #define TRANSFRMX_URIUTILS_H
 
 #include "txCore.h"
 
 class nsIDocument;
-class nsIDOMNode;
+class nsINode;
 
 /**
  * A utility class for URI handling
  * Not yet finished, only handles file URI at this point
 **/
 
 class URIUtils {
 public:
 
     /**
      * Reset the given document with the document of the source node
      */
-    static void ResetWithSource(nsIDocument *aNewDoc, nsIDOMNode *aSourceNode);
+    static void ResetWithSource(nsIDocument *aNewDoc, nsINode *aSourceNode);
 
     /**
      * Resolves the given href argument, using the given documentBase
      * if necessary.
      * The new resolved href will be appended to the given dest String
     **/
     static void resolveHref(const nsAString& href, const nsAString& base,
                             nsAString& dest);
--- a/dom/xslt/nsIDocumentTransformer.h
+++ b/dom/xslt/nsIDocumentTransformer.h
@@ -3,19 +3,22 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef nsIDocumentTransformer_h__
 #define nsIDocumentTransformer_h__
 
 #include "nsISupports.h"
 #include "nsStringFwd.h"
 
+template<class> class nsCOMPtr;
+class nsIContent;
 class nsIDocument;
 class nsIDOMNode;
 class nsIURI;
+template<class> class nsTArray;
 
 #define NS_ITRANSFORMOBSERVER_IID \
 { 0x04b2d17c, 0xe98d, 0x45f5, \
   { 0x9a, 0x67, 0xb7, 0x01, 0x19, 0x59, 0x7d, 0xe7 } }
 
 class nsITransformObserver : public nsISupports
 {
 public:
@@ -38,17 +41,18 @@ NS_DEFINE_STATIC_IID_ACCESSOR(nsITransfo
 class nsIDocumentTransformer : public nsISupports
 {
 public:
 
   NS_DECLARE_STATIC_IID_ACCESSOR(NS_IDOCUMENTTRANSFORMER_IID)
 
   NS_IMETHOD SetTransformObserver(nsITransformObserver* aObserver) = 0;
   NS_IMETHOD LoadStyleSheet(nsIURI* aUri, nsIDocument* aLoaderDocument) = 0;
-  NS_IMETHOD SetSourceContentModel(nsIDOMNode* aSource) = 0;
+  NS_IMETHOD SetSourceContentModel(nsIDocument* aDocument,
+                                   const nsTArray<nsCOMPtr<nsIContent>>& aSource) = 0;
   NS_IMETHOD CancelLoads() = 0;
 
   NS_IMETHOD AddXSLTParamNamespace(const nsString& aPrefix,
                                    const nsString& aNamespace) = 0;
   NS_IMETHOD AddXSLTParam(const nsString& aName,
                           const nsString& aNamespace,
                           const nsString& aValue,
                           const nsString& aSelect,
--- a/dom/xslt/xslt/txExecutionState.cpp
+++ b/dom/xslt/xslt/txExecutionState.cpp
@@ -12,27 +12,34 @@
 #include "txXSLTProcessor.h"
 #include "txLog.h"
 #include "txURIUtils.h"
 #include "txXMLParser.h"
 
 const int32_t txExecutionState::kMaxRecursionDepth = 20000;
 
 nsresult
-txLoadedDocumentsHash::init(txXPathNode* aSourceDocument)
+txLoadedDocumentsHash::init(const txXPathNode& aSource)
 {
-    mSourceDocument = aSourceDocument;
+    mSourceDocument = txXPathNodeUtils::getOwnerDocument(aSource);
 
     nsAutoString baseURI;
     nsresult rv = txXPathNodeUtils::getBaseURI(*mSourceDocument, baseURI);
     if (NS_WARN_IF(NS_FAILED(rv))) {
         return rv;
     }
 
-    PutEntry(baseURI)->mDocument = mSourceDocument;
+    // Technically the hash holds documents, but we allow any node that we're transforming
+    // from. In particular, the document() function uses this hash and it can return the
+    // source document, but if we're transforming from a document fragment (through
+    // txMozillaXSLTProcessor::SetSourceContentModel/txMozillaXSLTProcessor::DoTransform)
+    // or from another type of node (through txMozillaXSLTProcessor::TransformToDocument
+    // or txMozillaXSLTProcessor::TransformToFragment) it makes more sense to return the
+    // real root of the source tree, which is the node where the transform started.
+    PutEntry(baseURI)->mDocument = txXPathNativeNode::createXPathNode(txXPathNativeNode::getNode(aSource));
     return NS_OK;
 }
 
 txLoadedDocumentsHash::~txLoadedDocumentsHash()
 {
     if (mSourceDocument) {
         nsAutoString baseURI;
         nsresult rv = txXPathNodeUtils::getBaseURI(*mSourceDocument, baseURI);
@@ -111,17 +118,17 @@ txExecutionState::init(const txXPathNode
         createHandlerWith(mStylesheet->getOutputFormat(), &handler);
     NS_ENSURE_SUCCESS(rv, rv);
 
     mOutputHandler = handler;
     mResultHandler = handler;
     mOutputHandler->startDocument();
 
     // Set up loaded-documents-hash
-    rv = mLoadedDocuments.init(txXPathNodeUtils::getOwnerDocument(aNode));
+    rv = mLoadedDocuments.init(aNode);
     NS_ENSURE_SUCCESS(rv, rv);
 
     // Init members
     rv = mKeyHash.init();
     NS_ENSURE_SUCCESS(rv, rv);
 
     mRecycler = new txResultRecycler;
 
--- a/dom/xslt/xslt/txExecutionState.h
+++ b/dom/xslt/xslt/txExecutionState.h
@@ -51,26 +51,25 @@ public:
     nsAutoPtr<txXPathNode> mDocument;
     nsresult mLoadResult;
 };
 
 class txLoadedDocumentsHash : public nsTHashtable<txLoadedDocumentEntry>
 {
 public:
     txLoadedDocumentsHash()
-        : nsTHashtable<txLoadedDocumentEntry>(4),
-          mSourceDocument(nullptr)
+        : nsTHashtable<txLoadedDocumentEntry>(4)
     {
     }
     ~txLoadedDocumentsHash();
-    MOZ_MUST_USE nsresult init(txXPathNode* aSourceDocument);
+    MOZ_MUST_USE nsresult init(const txXPathNode& aSource);
 
 private:
     friend class txExecutionState;
-    txXPathNode* mSourceDocument;
+    nsAutoPtr<txXPathNode> mSourceDocument;
 };
 
 
 class txExecutionState : public txIMatchContext
 {
 public:
     txExecutionState(txStylesheet* aStylesheet, bool aDisableLoads);
     ~txExecutionState();
--- a/dom/xslt/xslt/txMozillaStylesheetCompiler.cpp
+++ b/dom/xslt/xslt/txMozillaStylesheetCompiler.cpp
@@ -625,18 +625,17 @@ txSyncCompileObserver::loadURI(const nsA
     nsCOMPtr<nsIPrincipal> referrerPrincipal =
       BasePrincipal::CreateCodebasePrincipal(referrerUri, OriginAttributes());
     NS_ENSURE_TRUE(referrerPrincipal, NS_ERROR_FAILURE);
 
     // This is probably called by js, a loadGroup for the channel doesn't
     // make sense.
     nsCOMPtr<nsINode> source;
     if (mProcessor) {
-      source =
-        do_QueryInterface(mProcessor->GetSourceContentModel());
+      source = mProcessor->GetSourceContentModel();
     }
     nsAutoSyncOperation sync(source ? source->OwnerDoc() : nullptr);
     nsCOMPtr<nsIDOMDocument> document;
 
     rv = nsSyncLoadService::LoadDocument(uri, nsIContentPolicy::TYPE_XSLT,
                                          referrerPrincipal,
                                          nsILoadInfo::SEC_REQUIRE_CORS_DATA_INHERITS,
                                          nullptr, false,
--- a/dom/xslt/xslt/txMozillaTextOutput.cpp
+++ b/dom/xslt/xslt/txMozillaTextOutput.cpp
@@ -116,17 +116,17 @@ txMozillaTextOutput::processingInstructi
 
 nsresult
 txMozillaTextOutput::startDocument()
 {
     return NS_OK;
 }
 
 nsresult
-txMozillaTextOutput::createResultDocument(nsIDOMDocument* aSourceDocument,
+txMozillaTextOutput::createResultDocument(nsIDocument* aSourceDocument,
                                           bool aLoadedAsData)
 {
     /*
      * Create an XHTML document to hold the text.
      *
      * <html>
      *   <head />
      *   <body>
@@ -143,21 +143,19 @@ txMozillaTextOutput::createResultDocumen
     // Create the document
     nsresult rv = NS_NewXMLDocument(getter_AddRefs(mDocument),
                                     aLoadedAsData);
     NS_ENSURE_SUCCESS(rv, rv);
     // This should really be handled by nsIDocument::BeginLoad
     MOZ_ASSERT(mDocument->GetReadyStateEnum() ==
                nsIDocument::READYSTATE_UNINITIALIZED, "Bad readyState");
     mDocument->SetReadyStateInternal(nsIDocument::READYSTATE_LOADING);
-    nsCOMPtr<nsIDocument> source = do_QueryInterface(aSourceDocument);
-    NS_ENSURE_STATE(source);
     bool hasHadScriptObject = false;
     nsIScriptGlobalObject* sgo =
-      source->GetScriptHandlingObject(hasHadScriptObject);
+      aSourceDocument->GetScriptHandlingObject(hasHadScriptObject);
     NS_ENSURE_STATE(sgo || !hasHadScriptObject);
 
     NS_ASSERTION(mDocument, "Need document");
 
     // Reset and set up document
     URIUtils::ResetWithSource(mDocument, aSourceDocument);
     // Only do this after resetting the document to ensure we have the
     // correct principal.
--- a/dom/xslt/xslt/txMozillaTextOutput.h
+++ b/dom/xslt/xslt/txMozillaTextOutput.h
@@ -6,33 +6,32 @@
 #ifndef TRANSFRMX_MOZILLA_TEXT_OUTPUT_H
 #define TRANSFRMX_MOZILLA_TEXT_OUTPUT_H
 
 #include "txXMLEventHandler.h"
 #include "nsCOMPtr.h"
 #include "nsWeakPtr.h"
 #include "txOutputFormat.h"
 
-class nsIDOMDocument;
 class nsIDOMDocumentFragment;
 class nsITransformObserver;
 class nsIDocument;
 class nsIContent;
 
 class txMozillaTextOutput : public txAOutputXMLEventHandler
 {
 public:
     explicit txMozillaTextOutput(nsITransformObserver* aObserver);
     explicit txMozillaTextOutput(nsIDOMDocumentFragment* aDest);
     virtual ~txMozillaTextOutput();
 
     TX_DECL_TXAXMLEVENTHANDLER
     TX_DECL_TXAOUTPUTXMLEVENTHANDLER
 
-    nsresult createResultDocument(nsIDOMDocument* aSourceDocument,
+    nsresult createResultDocument(nsIDocument* aSourceDocument,
                                   bool aLoadedAsData);
 
 private:
     nsresult createXHTMLElement(nsAtom* aName, nsIContent** aResult);
 
     nsCOMPtr<nsIContent> mTextParent;
     nsWeakPtr mObserver;
     nsCOMPtr<nsIDocument> mDocument;
--- a/dom/xslt/xslt/txMozillaXMLOutput.cpp
+++ b/dom/xslt/xslt/txMozillaXMLOutput.cpp
@@ -772,17 +772,17 @@ void txMozillaXMLOutput::processHTTPEqui
     // For now we only handle "refresh". There's a longer list in
     // HTMLContentSink::ProcessHeaderData
     if (aHeader == nsGkAtoms::refresh)
         LossyCopyUTF16toASCII(aValue, mRefreshString);
 }
 
 nsresult
 txMozillaXMLOutput::createResultDocument(const nsAString& aName, int32_t aNsID,
-                                         nsIDOMDocument* aSourceDocument,
+                                         nsIDocument* aSourceDocument,
                                          bool aLoadedAsData)
 {
     nsresult rv;
 
     // Create the document
     if (mOutputFormat.mMethod == eHTMLOutput) {
         rv = NS_NewHTMLDocument(getter_AddRefs(mDocument),
                                 aLoadedAsData);
@@ -795,21 +795,19 @@ txMozillaXMLOutput::createResultDocument
                                aLoadedAsData);
         NS_ENSURE_SUCCESS(rv, rv);
     }
     // This should really be handled by nsIDocument::BeginLoad
     MOZ_ASSERT(mDocument->GetReadyStateEnum() ==
                nsIDocument::READYSTATE_UNINITIALIZED, "Bad readyState");
     mDocument->SetReadyStateInternal(nsIDocument::READYSTATE_LOADING);
     mDocument->SetMayStartLayout(false);
-    nsCOMPtr<nsIDocument> source = do_QueryInterface(aSourceDocument);
-    NS_ENSURE_STATE(source);
     bool hasHadScriptObject = false;
     nsIScriptGlobalObject* sgo =
-      source->GetScriptHandlingObject(hasHadScriptObject);
+      aSourceDocument->GetScriptHandlingObject(hasHadScriptObject);
     NS_ENSURE_STATE(sgo || !hasHadScriptObject);
 
     mCurrentNode = mDocument;
     mNodeInfoManager = mDocument->NodeInfoManager();
 
     // Reset and set up the document
     URIUtils::ResetWithSource(mDocument, aSourceDocument);
 
--- a/dom/xslt/xslt/txMozillaXMLOutput.h
+++ b/dom/xslt/xslt/txMozillaXMLOutput.h
@@ -68,17 +68,17 @@ public:
     ~txMozillaXMLOutput();
 
     TX_DECL_TXAXMLEVENTHANDLER
     TX_DECL_TXAOUTPUTXMLEVENTHANDLER
 
     nsresult closePrevious(bool aFlushText);
 
     nsresult createResultDocument(const nsAString& aName, int32_t aNsID,
-                                  nsIDOMDocument* aSourceDocument,
+                                  nsIDocument* aSourceDocument,
                                   bool aLoadedAsData);
 
 private:
     nsresult createTxWrapper();
     nsresult startHTMLElement(nsIContent* aElement, bool aXHTML);
     nsresult endHTMLElement(nsIContent* aElement);
     void processHTTPEquiv(nsAtom* aHeader, const nsString& aValue);
     nsresult createHTMLElement(nsAtom* aName,
--- a/dom/xslt/xslt/txMozillaXSLTProcessor.cpp
+++ b/dom/xslt/xslt/txMozillaXSLTProcessor.cpp
@@ -45,29 +45,29 @@ static NS_DEFINE_CID(kXMLDocumentCID, NS
 
 /**
  * Output Handler Factories
  */
 class txToDocHandlerFactory : public txAOutputHandlerFactory
 {
 public:
     txToDocHandlerFactory(txExecutionState* aEs,
-                          nsIDOMDocument* aSourceDocument,
+                          nsIDocument* aSourceDocument,
                           nsITransformObserver* aObserver,
                           bool aDocumentIsData)
         : mEs(aEs), mSourceDocument(aSourceDocument), mObserver(aObserver),
           mDocumentIsData(aDocumentIsData)
     {
     }
 
     TX_DECL_TXAOUTPUTHANDLERFACTORY
 
 private:
     txExecutionState* mEs;
-    nsCOMPtr<nsIDOMDocument> mSourceDocument;
+    nsCOMPtr<nsIDocument> mSourceDocument;
     nsCOMPtr<nsITransformObserver> mObserver;
     bool mDocumentIsData;
 };
 
 class txToFragmentHandlerFactory : public txAOutputHandlerFactory
 {
 public:
     explicit txToFragmentHandlerFactory(nsIDOMDocumentFragment* aFragment)
@@ -372,25 +372,37 @@ txMozillaXSLTProcessor::~txMozillaXSLTPr
 NS_IMETHODIMP
 txMozillaXSLTProcessor::SetTransformObserver(nsITransformObserver* aObserver)
 {
     mObserver = aObserver;
     return NS_OK;
 }
 
 nsresult
-txMozillaXSLTProcessor::SetSourceContentModel(nsIDOMNode* aSourceDOM)
+txMozillaXSLTProcessor::SetSourceContentModel(nsIDocument* aDocument,
+                                              const nsTArray<nsCOMPtr<nsIContent>>& aSource)
 {
-    mSource = aSourceDOM;
-
     if (NS_FAILED(mTransformResult)) {
         notifyError();
         return NS_OK;
     }
 
+    mSource = aDocument->CreateDocumentFragment();
+
+    ErrorResult rv;
+    for (nsIContent* child : aSource) {
+        // XPath data model doesn't have DocumentType nodes.
+        if (child->NodeType() != nsIDOMNode::DOCUMENT_TYPE_NODE) {
+            mSource->AppendChild(*child, rv);
+            if (rv.Failed()) {
+                return rv.StealNSResult();
+            }
+        }
+    }
+
     if (mStylesheet) {
         return DoTransform();
     }
 
     return NS_OK;
 }
 
 NS_IMETHODIMP
@@ -547,18 +559,17 @@ public:
 
   explicit nsTransformBlockerEvent(txMozillaXSLTProcessor* processor)
     : mozilla::Runnable("nsTransformBlockerEvent")
     , mProcessor(processor)
   {}
 
   ~nsTransformBlockerEvent()
   {
-    nsCOMPtr<nsIDocument> document =
-        do_QueryInterface(mProcessor->GetSourceContentModel());
+    nsCOMPtr<nsIDocument> document = mProcessor->GetSourceContentModel()->OwnerDoc();
     document->UnblockOnload(true);
   }
 
   NS_IMETHOD Run() override
   {
     mProcessor->TransformToDoc(nullptr, false);
     return NS_OK;
   }
@@ -567,23 +578,19 @@ public:
 nsresult
 txMozillaXSLTProcessor::DoTransform()
 {
     NS_ENSURE_TRUE(mSource, NS_ERROR_UNEXPECTED);
     NS_ENSURE_TRUE(mStylesheet, NS_ERROR_UNEXPECTED);
     NS_ASSERTION(mObserver, "no observer");
     NS_ASSERTION(NS_IsMainThread(), "should only be on main thread");
 
-    nsresult rv;
-    nsCOMPtr<nsIDocument> document = do_QueryInterface(mSource, &rv);
-    NS_ENSURE_SUCCESS(rv, rv);
-
     nsCOMPtr<nsIRunnable> event = new nsTransformBlockerEvent(this);
-    document->BlockOnload();
-    rv = NS_DispatchToCurrentThread(event);
+    mSource->OwnerDoc()->BlockOnload();
+    nsresult rv = NS_DispatchToCurrentThread(event);
     if (NS_FAILED(rv)) {
         // XXX Maybe we should just display the source document in this case?
         //     Also, set up context information, see bug 204655.
         reportError(rv, nullptr, nullptr);
     }
 
     return rv;
 }
@@ -638,42 +645,36 @@ txMozillaXSLTProcessor::TransformToDocum
 
     if (!nsContentUtils::CanCallerAccess(aSource)) {
         return NS_ERROR_DOM_SECURITY_ERR;
     }
 
     nsresult rv = ensureStylesheet();
     NS_ENSURE_SUCCESS(rv, rv);
 
-    mSource = aSource;
+    mSource = do_QueryInterface(aSource);
 
     return TransformToDoc(aResult, true);
 }
 
 nsresult
 txMozillaXSLTProcessor::TransformToDoc(nsIDOMDocument **aResult,
                                        bool aCreateDataDocument)
 {
     nsAutoPtr<txXPathNode> sourceNode(txXPathNativeNode::createXPathNode(mSource));
     if (!sourceNode) {
         return NS_ERROR_OUT_OF_MEMORY;
     }
 
-    nsCOMPtr<nsIDOMDocument> sourceDOMDocument;
-    mSource->GetOwnerDocument(getter_AddRefs(sourceDOMDocument));
-    if (!sourceDOMDocument) {
-        sourceDOMDocument = do_QueryInterface(mSource);
-    }
-
     txExecutionState es(mStylesheet, IsLoadDisabled());
 
     // XXX Need to add error observers
 
     // If aResult is non-null, we're a data document
-    txToDocHandlerFactory handlerFactory(&es, sourceDOMDocument, mObserver,
+    txToDocHandlerFactory handlerFactory(&es, mSource->OwnerDoc(), mObserver,
                                          aCreateDataDocument);
     es.mOutputHandlerFactory = &handlerFactory;
 
     nsresult rv = es.init(*sourceNode, &mVariables);
 
     // Process root of XML source document
     if (NS_SUCCEEDED(rv)) {
         rv = txXSLTProcessor::execute(es);
--- a/dom/xslt/xslt/txMozillaXSLTProcessor.h
+++ b/dom/xslt/xslt/txMozillaXSLTProcessor.h
@@ -70,17 +70,18 @@ public:
     NS_DECL_NSIXSLTPROCESSOR
 
     // nsIXSLTProcessorPrivate interface
     NS_DECL_NSIXSLTPROCESSORPRIVATE
 
     // nsIDocumentTransformer interface
     NS_IMETHOD SetTransformObserver(nsITransformObserver* aObserver) override;
     NS_IMETHOD LoadStyleSheet(nsIURI* aUri, nsIDocument* aLoaderDocument) override;
-    NS_IMETHOD SetSourceContentModel(nsIDOMNode* aSource) override;
+    NS_IMETHOD SetSourceContentModel(nsIDocument* aDocument,
+                                     const nsTArray<nsCOMPtr<nsIContent>>& aSource) override;
     NS_IMETHOD CancelLoads() override {return NS_OK;}
     NS_IMETHOD AddXSLTParamNamespace(const nsString& aPrefix,
                                      const nsString& aNamespace) override;
     NS_IMETHOD AddXSLTParam(const nsString& aName,
                             const nsString& aNamespace,
                             const nsString& aSelect,
                             const nsString& aValue,
                             nsIDOMNode* aContext) override;
@@ -131,17 +132,17 @@ public:
 
     uint32_t Flags(mozilla::dom::SystemCallerGuarantee);
     void SetFlags(uint32_t aFlags, mozilla::dom::SystemCallerGuarantee);
 
     nsresult setStylesheet(txStylesheet* aStylesheet);
     void reportError(nsresult aResult, const char16_t *aErrorText,
                      const char16_t *aSourceText);
 
-    nsIDOMNode *GetSourceContentModel()
+    nsINode *GetSourceContentModel()
     {
         return mSource;
     }
 
     nsresult TransformToDoc(nsIDOMDocument **aResult,
                             bool aCreateDataDocument);
 
     bool IsLoadDisabled()
@@ -164,17 +165,17 @@ private:
     nsresult ensureStylesheet();
 
     nsCOMPtr<nsISupports> mOwner;
 
     RefPtr<txStylesheet> mStylesheet;
     nsIDocument* mStylesheetDocument; // weak
     nsCOMPtr<nsIContent> mEmbeddedStylesheetRoot;
 
-    nsCOMPtr<nsIDOMNode> mSource;
+    nsCOMPtr<nsINode> mSource;
     nsresult mTransformResult;
     nsresult mCompileResult;
     nsString mErrorText, mSourceText;
     nsCOMPtr<nsITransformObserver> mObserver;
     txOwningExpandedNameMap<txIGlobalParameter> mVariables;
     txNamespaceMap mParamNamespaceMap;
     RefPtr<txResultRecycler> mRecycler;
 
--- a/gfx/layers/ipc/SharedSurfacesChild.cpp
+++ b/gfx/layers/ipc/SharedSurfacesChild.cpp
@@ -15,16 +15,41 @@
 namespace mozilla {
 namespace layers {
 
 using namespace mozilla::gfx;
 
 class SharedSurfacesChild::ImageKeyData final
 {
 public:
+  ImageKeyData(WebRenderLayerManager* aManager,
+               const wr::ImageKey& aImageKey,
+               uint32_t aGenerationId)
+    : mManager(aManager)
+    , mImageKey(aImageKey)
+    , mGenerationId(aGenerationId)
+  { }
+
+  ImageKeyData(ImageKeyData&& aOther)
+    : mManager(Move(aOther.mManager))
+    , mImageKey(aOther.mImageKey)
+    , mGenerationId(aOther.mGenerationId)
+  { }
+
+  ImageKeyData& operator=(ImageKeyData&& aOther)
+  {
+    mManager = Move(aOther.mManager);
+    mImageKey = aOther.mImageKey;
+    mGenerationId = aOther.mGenerationId;
+    return *this;
+  }
+
+  ImageKeyData(const ImageKeyData&) = delete;
+  ImageKeyData& operator=(const ImageKeyData&) = delete;
+
   RefPtr<WebRenderLayerManager> mManager;
   wr::ImageKey mImageKey;
   uint32_t mGenerationId;
 };
 
 class SharedSurfacesChild::SharedUserData final
 {
 public:
@@ -42,17 +67,17 @@ public:
       } else {
         class DestroyRunnable final : public Runnable
         {
         public:
           DestroyRunnable(const wr::ExternalImageId& aId,
                           nsTArray<ImageKeyData>&& aKeys)
             : Runnable("SharedSurfacesChild::SharedUserData::DestroyRunnable")
             , mId(aId)
-            , mKeys(aKeys)
+            , mKeys(Move(aKeys))
           { }
 
           NS_IMETHOD Run() override
           {
             SharedSurfacesChild::Unshare(mId, mKeys);
             return NS_OK;
           }
 
@@ -120,17 +145,18 @@ public:
           aResources.AddExternalImage(mId, entry.mImageKey);
         }
         key = entry.mImageKey;
       }
     }
 
     if (!found) {
       key = aManager->WrBridge()->GetNextImageKey();
-      mKeys.AppendElement(ImageKeyData { aManager, key, aGenerationId });
+      ImageKeyData data(aManager, key, aGenerationId);
+      mKeys.AppendElement(Move(data));
       aResources.AddExternalImage(mId, key);
     }
 
     return key;
   }
 
 private:
   AutoTArray<ImageKeyData, 1> mKeys;
@@ -268,16 +294,21 @@ SharedSurfacesChild::Share(ImageContaine
 
 /* static */ void
 SharedSurfacesChild::Unshare(const wr::ExternalImageId& aId,
                              nsTArray<ImageKeyData>& aKeys)
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   for (const auto& entry : aKeys) {
+    if (entry.mManager->IsDestroyed()) {
+      continue;
+    }
+
+    entry.mManager->AddImageKeyForDiscard(entry.mImageKey);
     WebRenderBridgeChild* wrBridge = entry.mManager->WrBridge();
     if (wrBridge) {
       wrBridge->DeallocExternalImageId(aId);
     }
   }
 
   CompositorManagerChild* manager = CompositorManagerChild::GetInstance();
   if (MOZ_UNLIKELY(!manager || !manager->CanSend())) {
--- a/gfx/layers/wr/WebRenderUserData.cpp
+++ b/gfx/layers/wr/WebRenderUserData.cpp
@@ -141,36 +141,33 @@ WebRenderImageData::UpdateImageKey(Image
 
   // Reuse old key if generation is not updated.
   if (!aFallback && oldCounter == imageClient->GetLastUpdateGenerationCounter() && mKey) {
     return mKey;
   }
 
   // Delete old key, we are generating a new key.
   // TODO(nical): noooo... we need to reuse image keys.
-  if (mKey) {
-    mWRManager->AddImageKeyForDiscard(mKey.value());
-  }
+  ClearImageKey();
 
   key = WrBridge()->GetNextImageKey();
   aResources.AddExternalImage(mExternalImageId.value(), key);
   mKey = Some(key);
   mOwnsKey = true;
 
   return mKey;
 }
 
 void
 WebRenderImageData::SetKey(const wr::ImageKey& aKey)
 {
-  if (mKey) {
-    MOZ_ASSERT(mKey.value() != aKey);
-    mWRManager->AddImageKeyForDiscard(mKey.value());
-  }
+  MOZ_ASSERT_IF(mKey, mKey.value() != aKey);
+  ClearImageKey();
   mKey = Some(aKey);
+  mOwnsKey = true;
 }
 
 already_AddRefed<ImageClient>
 WebRenderImageData::GetImageClient()
 {
   RefPtr<ImageClient> imageClient = mImageClient;
   return imageClient.forget();
 }
deleted file mode 100644
--- a/js/src/gc/DeletePolicy.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef gc_DeletePolicy_h
-#define gc_DeletePolicy_h
-
-#include "js/TracingAPI.h"
-
-namespace js {
-namespace gc {
-
-struct ClearEdgesTracer : public JS::CallbackTracer
-{
-    ClearEdgesTracer();
-
-#ifdef DEBUG
-    TracerKind getTracerKind() const override { return TracerKind::ClearEdges; }
-#endif
-
-    template <typename T>
-    inline void clearEdge(T** thingp);
-
-    void onObjectEdge(JSObject** objp) override;
-    void onStringEdge(JSString** strp) override;
-    void onSymbolEdge(JS::Symbol** symp) override;
-    void onScriptEdge(JSScript** scriptp) override;
-    void onShapeEdge(js::Shape** shapep) override;
-    void onObjectGroupEdge(js::ObjectGroup** groupp) override;
-    void onBaseShapeEdge(js::BaseShape** basep) override;
-    void onJitCodeEdge(js::jit::JitCode** codep) override;
-    void onLazyScriptEdge(js::LazyScript** lazyp) override;
-    void onScopeEdge(js::Scope** scopep) override;
-    void onRegExpSharedEdge(js::RegExpShared** sharedp) override;
-    void onChild(const JS::GCCellPtr& thing) override;
-};
-
-#ifdef DEBUG
-inline bool
-IsClearEdgesTracer(JSTracer *trc)
-{
-    return trc->isCallbackTracer() &&
-           trc->asCallbackTracer()->getTracerKind() == JS::CallbackTracer::TracerKind::ClearEdges;
-}
-#endif
-
-} // namespace gc
-
-/*
- * Provides a delete policy that can be used for objects which have their
- * lifetime managed by the GC so they can be safely destroyed outside of GC.
- *
- * This is necessary for example when initializing such an object may fail after
- * the initial allocation. The partially-initialized object must be destroyed,
- * but it may not be safe to do so at the current time as the store buffer may
- * contain pointers into it.
- *
- * This policy traces GC pointers in the object and clears them, making sure to
- * trigger barriers while doing so. This will remove any store buffer pointers
- * into the object and make it safe to delete.
- */
-template <typename T>
-struct GCManagedDeletePolicy
-{
-    void operator()(const T* constPtr) {
-        if (constPtr) {
-            auto ptr = const_cast<T*>(constPtr);
-            gc::ClearEdgesTracer trc;
-            ptr->trace(&trc);
-            js_delete(ptr);
-        }
-    }
-};
-
-} // namespace js
-
-#endif // gc_DeletePolicy_h
--- a/js/src/gc/Zone.h
+++ b/js/src/gc/Zone.h
@@ -4,30 +4,31 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef gc_Zone_h
 #define gc_Zone_h
 
 #include "mozilla/Atomics.h"
 #include "mozilla/HashFunctions.h"
+#include "mozilla/MemoryReporting.h"
 
+#include "jscntxt.h"
+
+#include "ds/SplayTree.h"
 #include "gc/FindSCCs.h"
 #include "gc/GCRuntime.h"
 #include "js/GCHashTable.h"
+#include "js/TracingAPI.h"
 #include "vm/MallocProvider.h"
 #include "vm/RegExpShared.h"
-#include "vm/Runtime.h"
-
-struct JSContext;
+#include "vm/TypeInference.h"
 
 namespace js {
 
-class Debugger;
-
 namespace jit {
 class JitZone;
 } // namespace jit
 
 namespace gc {
 
 class GCSchedulingState;
 class GCSchedulingTunables;
@@ -981,11 +982,97 @@ ZoneAllocPolicy::pod_calloc(size_t numEl
 
 template <typename T>
 inline T*
 ZoneAllocPolicy::pod_realloc(T* p, size_t oldSize, size_t newSize)
 {
     return zone->pod_realloc<T>(p, oldSize, newSize);
 }
 
+/*
+ * Provides a delete policy that can be used for objects which have their
+ * lifetime managed by the GC so they can be safely destroyed outside of GC.
+ *
+ * This is necessary for example when initializing such an object may fail after
+ * the initial allocation. The partially-initialized object must be destroyed,
+ * but it may not be safe to do so at the current time as the store buffer may
+ * contain pointers into it.
+ *
+ * This policy traces GC pointers in the object and clears them, making sure to
+ * trigger barriers while doing so. This will remove any store buffer pointers
+ * into the object and make it safe to delete.
+ */
+template <typename T>
+struct GCManagedDeletePolicy
+{
+    struct ClearEdgesTracer : public JS::CallbackTracer
+    {
+        explicit ClearEdgesTracer(JSContext* cx) : CallbackTracer(cx, TraceWeakMapKeysValues) {}
+#ifdef DEBUG
+        TracerKind getTracerKind() const override { return TracerKind::ClearEdges; }
+#endif
+
+        template <typename S>
+        void clearEdge(S** thingp) {
+            InternalBarrierMethods<S*>::preBarrier(*thingp);
+            InternalBarrierMethods<S*>::postBarrier(thingp, *thingp, nullptr);
+            *thingp = nullptr;
+        }
+
+        void onObjectEdge(JSObject** objp) override { clearEdge(objp); }
+        void onStringEdge(JSString** strp) override { clearEdge(strp); }
+        void onSymbolEdge(JS::Symbol** symp) override { clearEdge(symp); }
+        void onScriptEdge(JSScript** scriptp) override { clearEdge(scriptp); }
+        void onShapeEdge(js::Shape** shapep) override { clearEdge(shapep); }
+        void onObjectGroupEdge(js::ObjectGroup** groupp) override { clearEdge(groupp); }
+        void onBaseShapeEdge(js::BaseShape** basep) override { clearEdge(basep); }
+        void onJitCodeEdge(js::jit::JitCode** codep) override { clearEdge(codep); }
+        void onLazyScriptEdge(js::LazyScript** lazyp) override { clearEdge(lazyp); }
+        void onScopeEdge(js::Scope** scopep) override { clearEdge(scopep); }
+        void onRegExpSharedEdge(js::RegExpShared** sharedp) override { clearEdge(sharedp); }
+        void onChild(const JS::GCCellPtr& thing) override { MOZ_CRASH(); }
+    };
+
+    void operator()(const T* constPtr) {
+        if (constPtr) {
+            auto ptr = const_cast<T*>(constPtr);
+            ClearEdgesTracer trc(TlsContext.get());
+            ptr->trace(&trc);
+            js_delete(ptr);
+        }
+    }
+};
+
+#ifdef DEBUG
+inline bool
+IsClearEdgesTracer(JSTracer *trc)
+{
+    return trc->isCallbackTracer() &&
+           trc->asCallbackTracer()->getTracerKind() == JS::CallbackTracer::TracerKind::ClearEdges;
+}
+#endif
+
 } // namespace js
 
+namespace JS {
+
+// Scope data that contain GCPtrs must use the correct DeletePolicy.
+//
+// This is defined here because vm/Scope.h cannot #include "vm/Runtime.h"
+
+template <>
+struct DeletePolicy<js::FunctionScope::Data>
+  : public js::GCManagedDeletePolicy<js::FunctionScope::Data>
+{ };
+
+template <>
+struct DeletePolicy<js::ModuleScope::Data>
+  : public js::GCManagedDeletePolicy<js::ModuleScope::Data>
+{ };
+
+template <>
+struct DeletePolicy<js::WasmInstanceScope::Data>
+  : public js::GCManagedDeletePolicy<js::WasmInstanceScope::Data>
+{ };
+
+} // namespace JS
+
 #endif // gc_Zone_h
--- a/js/src/jit/BaselineCompiler.cpp
+++ b/js/src/jit/BaselineCompiler.cpp
@@ -360,17 +360,17 @@ BaselineCompiler::emitPrologue()
     masm.checkStackAlignment();
 #endif
     emitProfilerEnterFrame();
 
     masm.push(BaselineFrameReg);
     masm.moveStackPtrTo(BaselineFrameReg);
     masm.subFromStackPtr(Imm32(BaselineFrame::Size()));
 
-    // Initialize BaselineFrame. For eval scripts, the scope chain
+    // Initialize BaselineFrame. For eval scripts, the env chain
     // is passed in R1, so we have to be careful not to clobber it.
 
     // Initialize BaselineFrame::flags.
     masm.store32(Imm32(0), frame.addressOfFlags());
 
     // Handle env chain pre-initialization (in case GC gets run
     // during stack check).  For global and eval scripts, the env
     // chain is in R1.  For function scripts, the env chain is in
@@ -648,18 +648,18 @@ bool
 BaselineCompiler::initEnvironmentChain()
 {
     CallVMPhase phase = POST_INITIALIZE;
     if (needsEarlyStackCheck())
         phase = CHECK_OVER_RECURSED;
 
     RootedFunction fun(cx, function());
     if (fun) {
-        // Use callee->environment as scope chain. Note that we do this also
-        // for needsSomeEnvironmentObject functions, so that the scope chain
+        // Use callee->environment as env chain. Note that we do this also
+        // for needsSomeEnvironmentObject functions, so that the env chain
         // slot is properly initialized if the call triggers GC.
         Register callee = R0.scratchReg();
         Register scope = R1.scratchReg();
         masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), callee);
         masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), scope);
         masm.storePtr(scope, frame.addressOfEnvironmentChain());
 
         if (fun->needsFunctionEnvironmentObjects()) {
@@ -2494,17 +2494,17 @@ BaselineCompiler::emit_JSOP_BINDGNAME()
             // exists on the global and is non-configurable, as then it cannot
             // be shadowed.
             if (!shape->configurable()) {
                 frame.push(ObjectValue(script->global()));
                 return true;
             }
         }
 
-        // Otherwise we have to use the dynamic scope chain.
+        // Otherwise we have to use the environment chain.
     }
 
     return emit_JSOP_BINDNAME();
 }
 
 typedef JSObject* (*BindVarFn)(JSContext*, HandleObject);
 static const VMFunction BindVarInfo = FunctionInfo<BindVarFn>(jit::BindVar, "BindVar");
 
@@ -3856,17 +3856,17 @@ static const VMFunction EnterWithInfo =
 bool
 BaselineCompiler::emit_JSOP_ENTERWITH()
 {
     WithScope& withScope = script->getScope(pc)->as<WithScope>();
 
     // Pop "with" object to R0.
     frame.popRegsAndSync(1);
 
-    // Call a stub to push the object onto the scope chain.
+    // Call a stub to push the object onto the environment chain.
     prepareVMCall();
     masm.loadBaselineFramePtr(BaselineFrameReg, R1.scratchReg());
 
     pushArg(ImmGCPtr(&withScope));
     pushArg(R0);
     pushArg(R1.scratchReg());
 
     return callVM(EnterWithInfo);
@@ -3874,17 +3874,17 @@ BaselineCompiler::emit_JSOP_ENTERWITH()
 
 typedef bool (*LeaveWithFn)(JSContext*, BaselineFrame*);
 static const VMFunction LeaveWithInfo =
     FunctionInfo<LeaveWithFn>(jit::LeaveWith, "LeaveWith");
 
 bool
 BaselineCompiler::emit_JSOP_LEAVEWITH()
 {
-    // Call a stub to pop the with object from the scope chain.
+    // Call a stub to pop the with object from the environment chain.
     prepareVMCall();
 
     masm.loadBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
     pushArg(R0.scratchReg());
 
     return callVM(LeaveWithInfo);
 }
 
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -6013,17 +6013,17 @@ enum TranscodeResult
     // Successful encoding / decoding.
     TranscodeResult_Ok = 0,
 
     // A warning message, is set to the message out-param.
     TranscodeResult_Failure = 0x100,
     TranscodeResult_Failure_BadBuildId =          TranscodeResult_Failure | 0x1,
     TranscodeResult_Failure_RunOnceNotSupported = TranscodeResult_Failure | 0x2,
     TranscodeResult_Failure_AsmJSNotSupported =   TranscodeResult_Failure | 0x3,
-    TranscodeResult_Failure_UnknownClassKind =    TranscodeResult_Failure | 0x4,
+    TranscodeResult_Failure_BadDecode =           TranscodeResult_Failure | 0x4,
     TranscodeResult_Failure_WrongCompileOption =  TranscodeResult_Failure | 0x5,
     TranscodeResult_Failure_NotInterpretedFun =   TranscodeResult_Failure | 0x6,
 
     // There is a pending exception on the context.
     TranscodeResult_Throw = 0x200
 };
 
 extern JS_PUBLIC_API(TranscodeResult)
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -13,17 +13,16 @@
 
 #include "js/CharacterEncoding.h"
 #include "js/GCVector.h"
 #include "js/Result.h"
 #include "js/Utility.h"
 #include "js/Vector.h"
 #include "threading/ProtectedData.h"
 #include "vm/ErrorReporting.h"
-#include "vm/MallocProvider.h"
 #include "vm/Runtime.h"
 
 #ifdef _MSC_VER
 #pragma warning(push)
 #pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */
 #endif
 
 struct DtoaState;
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -677,16 +677,20 @@ js::XDRInterpretedFunction(XDRState<mode
         }
 
         bool singleton = firstword & HasSingletonType;
         if (!JSFunction::setTypeForScriptedFunction(cx, fun, singleton))
             return false;
         objp.set(fun);
     }
 
+    // Verify marker at end of function to detect buffer trunction.
+    if (!xdr->codeMarker(0x9E35CA1F))
+        return false;
+
     return true;
 }
 
 template bool
 js::XDRInterpretedFunction(XDRState<XDR_ENCODE>*, HandleScope, HandleScriptSource,
                            MutableHandleFunction);
 
 template bool
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -9042,34 +9042,8 @@ js::gc::detail::CellIsNotGray(const Cell
 
     Zone* sourceZone = rt->gc.marker.stackContainsCrossZonePointerTo(tc);
     if (sourceZone && sourceZone->wasGCStarted())
         return true;
 
     return false;
 }
 #endif
-
-js::gc::ClearEdgesTracer::ClearEdgesTracer()
-  : CallbackTracer(TlsContext.get(), TraceWeakMapKeysValues)
-{}
-
-template <typename S>
-inline void
-js::gc::ClearEdgesTracer::clearEdge(S** thingp)
-{
-    InternalBarrierMethods<S*>::preBarrier(*thingp);
-    InternalBarrierMethods<S*>::postBarrier(thingp, *thingp, nullptr);
-    *thingp = nullptr;
-}
-
-void js::gc::ClearEdgesTracer::onObjectEdge(JSObject** objp) { clearEdge(objp); }
-void js::gc::ClearEdgesTracer::onStringEdge(JSString** strp) { clearEdge(strp); }
-void js::gc::ClearEdgesTracer::onSymbolEdge(JS::Symbol** symp) { clearEdge(symp); }
-void js::gc::ClearEdgesTracer::onScriptEdge(JSScript** scriptp) { clearEdge(scriptp); }
-void js::gc::ClearEdgesTracer::onShapeEdge(js::Shape** shapep) { clearEdge(shapep); }
-void js::gc::ClearEdgesTracer::onObjectGroupEdge(js::ObjectGroup** groupp) { clearEdge(groupp); }
-void js::gc::ClearEdgesTracer::onBaseShapeEdge(js::BaseShape** basep) { clearEdge(basep); }
-void js::gc::ClearEdgesTracer::onJitCodeEdge(js::jit::JitCode** codep) { clearEdge(codep); }
-void js::gc::ClearEdgesTracer::onLazyScriptEdge(js::LazyScript** lazyp) { clearEdge(lazyp); }
-void js::gc::ClearEdgesTracer::onScopeEdge(js::Scope** scopep) { clearEdge(scopep); }
-void js::gc::ClearEdgesTracer::onRegExpSharedEdge(js::RegExpShared** sharedp) { clearEdge(sharedp); }
-void js::gc::ClearEdgesTracer::onChild(const JS::GCCellPtr& thing) { MOZ_CRASH(); }
--- a/js/src/jsscript.cpp
+++ b/js/src/jsscript.cpp
@@ -87,34 +87,29 @@ CheckScriptDataIntegrity(JSScript* scrip
 }
 
 template<XDRMode mode>
 bool
 js::XDRScriptConst(XDRState<mode>* xdr, MutableHandleValue vp)
 {
     JSContext* cx = xdr->cx();
 
-    /*
-     * A script constant can be an arbitrary primitive value as they are used
-     * to implement JSOP_LOOKUPSWITCH. But they cannot be objects, see
-     * bug 407186.
-     */
     enum ConstTag {
-        SCRIPT_INT     = 0,
-        SCRIPT_DOUBLE  = 1,
-        SCRIPT_ATOM    = 2,
-        SCRIPT_TRUE    = 3,
-        SCRIPT_FALSE   = 4,
-        SCRIPT_NULL    = 5,
-        SCRIPT_OBJECT  = 6,
-        SCRIPT_VOID    = 7,
-        SCRIPT_HOLE    = 8
+        SCRIPT_INT,
+        SCRIPT_DOUBLE,
+        SCRIPT_ATOM,
+        SCRIPT_TRUE,
+        SCRIPT_FALSE,
+        SCRIPT_NULL,
+        SCRIPT_OBJECT,
+        SCRIPT_VOID,
+        SCRIPT_HOLE
     };
 
-    uint32_t tag;
+    ConstTag tag;
     if (mode == XDR_ENCODE) {
         if (vp.isInt32()) {
             tag = SCRIPT_INT;
         } else if (vp.isDouble()) {
             tag = SCRIPT_DOUBLE;
         } else if (vp.isString()) {
             tag = SCRIPT_ATOM;
         } else if (vp.isTrue()) {
@@ -128,17 +123,17 @@ js::XDRScriptConst(XDRState<mode>* xdr, 
         } else if (vp.isMagic(JS_ELEMENTS_HOLE)) {
             tag = SCRIPT_HOLE;
         } else {
             MOZ_ASSERT(vp.isUndefined());
             tag = SCRIPT_VOID;
         }
     }
 
-    if (!xdr->codeUint32(&tag))
+    if (!xdr->codeEnum32(&tag))
         return false;
 
     switch (tag) {
       case SCRIPT_INT: {
         uint32_t i;
         if (mode == XDR_ENCODE)
             i = uint32_t(vp.toInt32());
         if (!xdr->codeUint32(&i))
@@ -194,16 +189,20 @@ js::XDRScriptConst(XDRState<mode>* xdr, 
       case SCRIPT_VOID:
         if (mode == XDR_DECODE)
             vp.set(UndefinedValue());
         break;
       case SCRIPT_HOLE:
         if (mode == XDR_DECODE)
             vp.setMagic(JS_ELEMENTS_HOLE);
         break;
+      default:
+        // Fail in debug, but only soft-fail in release
+        MOZ_ASSERT(false, "Bad XDR value kind");
+        return xdr->fail(JS::TranscodeResult_Failure_BadDecode);
     }
     return true;
 }
 
 template bool
 js::XDRScriptConst(XDRState<XDR_ENCODE>*, MutableHandleValue);
 
 template bool
@@ -787,21 +786,30 @@ js::XDRScript(XDRState<mode>* xdr, Handl
                 break;
               case ScopeKind::Module:
               case ScopeKind::WasmInstance:
                 MOZ_CRASH("NYI");
                 break;
               case ScopeKind::WasmFunction:
                 MOZ_CRASH("wasm functions cannot be nested in JSScripts");
                 break;
+              default:
+                // Fail in debug, but only soft-fail in release
+                MOZ_ASSERT(false, "Bad XDR scope kind");
+                return xdr->fail(JS::TranscodeResult_Failure_BadDecode);
             }
 
             if (mode == XDR_DECODE)
                 vector[i].init(scope);
         }
+
+        // Verify marker to detect data corruption after decoding scope data. A
+        // mismatch here indicates we will almost certainly crash in release.
+        if (!xdr->codeMarker(0x48922BAB))
+            return false;
     }
 
     /*
      * Here looping from 0-to-length to xdr objects is essential to ensure that
      * all references to enclosing blocks (via FindScopeIndex below) happen
      * after the enclosing block has been XDR'd.
      */
     for (i = 0; i != nobjects; ++i) {
@@ -877,22 +885,28 @@ js::XDRScript(XDRState<mode>* xdr, Handl
             RootedObject tmp(cx, *objp);
             if (!XDRObjectLiteral(xdr, &tmp))
                 return false;
             *objp = tmp;
             break;
           }
 
           default: {
-            MOZ_ASSERT(false, "Unknown class kind.");
-            return xdr->fail(JS::TranscodeResult_Failure_UnknownClassKind);
+            // Fail in debug, but only soft-fail in release
+            MOZ_ASSERT(false, "Bad XDR class kind");
+            return xdr->fail(JS::TranscodeResult_Failure_BadDecode);
           }
         }
     }
 
+    // Verify marker to detect data corruption after decoding object data. A
+    // mismatch here indicates we will almost certainly crash in release.
+    if (!xdr->codeMarker(0xF83B989A))
+        return false;
+
     if (ntrynotes != 0) {
         JSTryNote* tnfirst = script->trynotes()->vector;
         MOZ_ASSERT(script->trynotes()->length == ntrynotes);
         JSTryNote* tn = tnfirst + ntrynotes;
         do {
             --tn;
             if (!xdr->codeUint8(&tn->kind) ||
                 !xdr->codeUint32(&tn->stackDepth) ||
--- a/js/src/jsweakmap.h
+++ b/js/src/jsweakmap.h
@@ -9,17 +9,16 @@
 
 #include "mozilla/LinkedList.h"
 #include "mozilla/Move.h"
 
 #include "jscompartment.h"
 #include "jsfriendapi.h"
 #include "jsobj.h"
 
-#include "gc/DeletePolicy.h"
 #include "gc/StoreBuffer.h"
 #include "js/HashTable.h"
 
 namespace js {
 
 class GCMarker;
 class WeakMapBase;
 
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -1507,19 +1507,19 @@ ConvertTranscodeResultToJSException(JSCo
       case JS::TranscodeResult_Failure_RunOnceNotSupported:
         MOZ_ASSERT(!cx->isExceptionPending());
         JS_ReportErrorASCII(cx, "run-once script are not supported by XDR");
         return false;
       case JS::TranscodeResult_Failure_AsmJSNotSupported:
         MOZ_ASSERT(!cx->isExceptionPending());
         JS_ReportErrorASCII(cx, "Asm.js is not supported by XDR");
         return false;
-      case JS::TranscodeResult_Failure_UnknownClassKind:
+      case JS::TranscodeResult_Failure_BadDecode:
         MOZ_ASSERT(!cx->isExceptionPending());
-        JS_ReportErrorASCII(cx, "Unknown class kind, go fix it.");
+        JS_ReportErrorASCII(cx, "XDR data corruption");
         return false;
       case JS::TranscodeResult_Failure_WrongCompileOption:
         MOZ_ASSERT(!cx->isExceptionPending());
         JS_ReportErrorASCII(cx, "Compile options differs from Compile options of the encoding");
         return false;
       case JS::TranscodeResult_Failure_NotInterpretedFun:
         MOZ_ASSERT(!cx->isExceptionPending());
         JS_ReportErrorASCII(cx, "Only interepreted functions are supported by XDR");
--- a/js/src/vm/Scope.h
+++ b/js/src/vm/Scope.h
@@ -8,17 +8,16 @@
 #define vm_Scope_h
 
 #include "mozilla/Maybe.h"
 #include "mozilla/Variant.h"
 
 #include "jsobj.h"
 #include "jsopcode.h"
 
-#include "gc/DeletePolicy.h"
 #include "gc/Heap.h"
 #include "gc/Policy.h"
 #include "js/UbiNode.h"
 #include "js/UniquePtr.h"
 #include "vm/Xdr.h"
 
 namespace js {
 
@@ -1534,33 +1533,16 @@ DEFINE_SCOPE_DATA_GCPOLICY(js::FunctionS
 DEFINE_SCOPE_DATA_GCPOLICY(js::VarScope::Data);
 DEFINE_SCOPE_DATA_GCPOLICY(js::GlobalScope::Data);
 DEFINE_SCOPE_DATA_GCPOLICY(js::EvalScope::Data);
 DEFINE_SCOPE_DATA_GCPOLICY(js::ModuleScope::Data);
 DEFINE_SCOPE_DATA_GCPOLICY(js::WasmFunctionScope::Data);
 
 #undef DEFINE_SCOPE_DATA_GCPOLICY
 
-// Scope data that contain GCPtrs must use the correct DeletePolicy.
-
-template <>
-struct DeletePolicy<js::FunctionScope::Data>
-  : public js::GCManagedDeletePolicy<js::FunctionScope::Data>
-{};
-
-template <>
-struct DeletePolicy<js::ModuleScope::Data>
-  : public js::GCManagedDeletePolicy<js::ModuleScope::Data>
-{};
-
-template <>
-struct DeletePolicy<js::WasmInstanceScope::Data>
-  : public js::GCManagedDeletePolicy<js::WasmInstanceScope::Data>
-{ };
-
 namespace ubi {
 
 template <>
 class Concrete<js::Scope> : TracerConcrete<js::Scope>
 {
   protected:
     explicit Concrete(js::Scope* ptr) : TracerConcrete<js::Scope>(ptr) { }
 
--- a/js/src/vm/UnboxedObject.h
+++ b/js/src/vm/UnboxedObject.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef vm_UnboxedObject_h
 #define vm_UnboxedObject_h
 
 #include "jsobj.h"
 
-#include "gc/DeletePolicy.h"
 #include "gc/Zone.h"
 #include "vm/Runtime.h"
 #include "vm/TypeInference.h"
 
 namespace js {
 
 // Memory required for an unboxed value of a given type. Returns zero for types
 // which can't be used for unboxed objects.
--- a/js/src/vm/Xdr.h
+++ b/js/src/vm/Xdr.h
@@ -274,23 +274,27 @@ class XDRState : public XDRCoderBase
     /*
      * Use SFINAE to refuse any specialization which is not an enum.  Uses of
      * this function do not have to specialize the type of the enumerated field
      * as C++ will extract the parameterized from the argument list.
      */
     template <typename T>
     bool codeEnum32(T* val, typename mozilla::EnableIf<mozilla::IsEnum<T>::value, T>::Type * = NULL)
     {
+        // Mix the enumeration value with a random magic number, such that a
+        // corruption with a low-ranged value (like 0) is less likely to cause a
+        // miss-interpretation of the XDR content and instead cause a failure.
+        const uint32_t MAGIC = 0x21AB218C;
         uint32_t tmp;
         if (mode == XDR_ENCODE)
-            tmp = uint32_t(*val);
+            tmp = uint32_t(*val) ^ MAGIC;
         if (!codeUint32(&tmp))
             return false;
         if (mode == XDR_DECODE)
-            *val = T(tmp);
+            *val = T(tmp ^ MAGIC);
         return true;
     }
 
     bool codeDouble(double* dp) {
         union DoublePun {
             double d;
             uint64_t u;
         } pun;
@@ -298,16 +302,28 @@ class XDRState : public XDRCoderBase
             pun.d = *dp;
         if (!codeUint64(&pun.u))
             return false;
         if (mode == XDR_DECODE)
             *dp = pun.d;
         return true;
     }
 
+    bool codeMarker(uint32_t magic) {
+        uint32_t actual = magic;
+        if (!codeUint32(&actual))
+            return false;
+        if (actual != magic) {
+            // Fail in debug, but only soft-fail in release
+            MOZ_ASSERT(false, "Bad XDR marker");
+            return fail(JS::TranscodeResult_Failure_BadDecode);
+        }
+        return true;
+    }
+
     bool codeBytes(void* bytes, size_t len) {
         if (len == 0)
             return true;
         if (mode == XDR_ENCODE) {
             uint8_t* ptr = buf.write(len);
             if (!ptr)
                 return fail(JS::TranscodeResult_Throw);
             memcpy(ptr, bytes, len);
--- a/layout/generic/nsGridContainerFrame.cpp
+++ b/layout/generic/nsGridContainerFrame.cpp
@@ -6920,25 +6920,34 @@ nsGridContainerFrame::GetGridFrameWithCo
     // if any of our properties are missing, generate them
     bool reflowNeeded = (!gridFrame->HasProperty(GridColTrackInfo()) ||
                          !gridFrame->HasProperty(GridRowTrackInfo()) ||
                          !gridFrame->HasProperty(GridColumnLineInfo()) ||
                          !gridFrame->HasProperty(GridRowLineInfo()));
 
     if (reflowNeeded) {
       // Trigger a reflow that generates additional grid property data.
+      // Hold onto aFrame while we do this, in case reflow destroys it.
+      AutoWeakFrame weakFrameRef(aFrame);
+
       nsIPresShell* shell = gridFrame->PresShell();
       gridFrame->AddStateBits(NS_STATE_GRID_GENERATE_COMPUTED_VALUES);
       shell->FrameNeedsReflow(gridFrame,
                               nsIPresShell::eResize,
                               NS_FRAME_IS_DIRTY);
       shell->FlushPendingNotifications(FlushType::Layout);
 
-      // Since the reflow may have side effects, get the grid frame again.
-      gridFrame = GetGridContainerFrame(aFrame);
+      // Since the reflow may have side effects, get the grid frame
+      // again. But if the weakFrameRef is no longer valid, then we
+      // must bail out.
+      if (!weakFrameRef.IsAlive()) {
+        return nullptr;
+      }
+
+      gridFrame = GetGridContainerFrame(weakFrameRef.GetFrame());
 
       // Assert the grid properties are present
       MOZ_ASSERT(!gridFrame ||
                   gridFrame->HasProperty(GridColTrackInfo()));
       MOZ_ASSERT(!gridFrame ||
                   gridFrame->HasProperty(GridRowTrackInfo()));
       MOZ_ASSERT(!gridFrame ||
                   gridFrame->HasProperty(GridColumnLineInfo()));
--- a/mobile/android/base/java/org/mozilla/gecko/BrowserApp.java
+++ b/mobile/android/base/java/org/mozilla/gecko/BrowserApp.java
@@ -158,16 +158,17 @@ import org.mozilla.gecko.util.ContextUti
 import org.mozilla.gecko.util.DrawableUtil;
 import org.mozilla.gecko.util.EventCallback;
 import org.mozilla.gecko.util.GamepadUtils;
 import org.mozilla.gecko.util.GeckoBundle;
 import org.mozilla.gecko.util.HardwareUtils;
 import org.mozilla.gecko.util.IntentUtils;
 import org.mozilla.gecko.util.MenuUtils;
 import org.mozilla.gecko.util.PrefUtils;
+import org.mozilla.gecko.util.ShortcutUtils;
 import org.mozilla.gecko.util.StringUtils;
 import org.mozilla.gecko.util.ThreadUtils;
 import org.mozilla.gecko.util.WindowUtil;
 import org.mozilla.gecko.widget.ActionModePresenter;
 import org.mozilla.gecko.widget.AnchoredPopup;
 import org.mozilla.gecko.widget.AnimatedProgressBar;
 import org.mozilla.gecko.widget.GeckoActionProvider;
 import org.mozilla.gecko.widget.SplashScreen;
@@ -3644,17 +3645,18 @@ public class BrowserApp extends GeckoApp
         final boolean distSetAsHomepage = GeckoSharedPrefs.forProfile(this).getBoolean(GeckoPreferences.PREFS_SET_AS_HOMEPAGE, false);
         MenuUtils.safeSetVisible(aMenu, R.id.set_as_homepage, distSetAsHomepage);
 
         // NOTE: Use MenuUtils.safeSetEnabled because some actions might
         // be on the BrowserToolbar context menu.
         MenuUtils.safeSetEnabled(aMenu, R.id.page, !isAboutHome(tab));
         MenuUtils.safeSetEnabled(aMenu, R.id.subscribe, tab.hasFeeds());
         MenuUtils.safeSetEnabled(aMenu, R.id.add_search_engine, tab.hasOpenSearch());
-        MenuUtils.safeSetEnabled(aMenu, R.id.add_to_launcher, !isAboutHome(tab));
+        MenuUtils.safeSetEnabled(aMenu, R.id.add_to_launcher,
+            !isAboutHome(tab) && ShortcutUtils.isPinShortcutSupported());
         MenuUtils.safeSetEnabled(aMenu, R.id.set_as_homepage, !isAboutHome(tab));
         onPrepareOptionsMenuPinToTopSites(aMenu, tab);
 
         // This provider also applies to the quick share menu item.
         final GeckoActionProvider provider = ((GeckoMenuItem) share).getGeckoActionProvider();
         if (provider != null) {
             Intent shareIntent = provider.getIntent();
 
--- a/mobile/android/base/java/org/mozilla/gecko/Tab.java
+++ b/mobile/android/base/java/org/mozilla/gecko/Tab.java
@@ -20,16 +20,17 @@ import org.mozilla.gecko.icons.IconDescr
 import org.mozilla.gecko.icons.IconRequestBuilder;
 import org.mozilla.gecko.icons.IconResponse;
 import org.mozilla.gecko.icons.Icons;
 import org.mozilla.gecko.reader.ReaderModeUtils;
 import org.mozilla.gecko.reader.ReadingListHelper;
 import org.mozilla.gecko.toolbar.BrowserToolbar.TabEditingState;
 import org.mozilla.gecko.toolbar.PageActionLayout;
 import org.mozilla.gecko.util.GeckoBundle;
+import org.mozilla.gecko.util.ShortcutUtils;
 import org.mozilla.gecko.util.ThreadUtils;
 import org.mozilla.gecko.webapps.WebAppManifest;
 import org.mozilla.gecko.widget.SiteLogins;
 
 import android.content.ContentResolver;
 import android.content.Context;
 import android.graphics.Bitmap;
 import android.graphics.drawable.BitmapDrawable;
@@ -475,16 +476,20 @@ public class Tab {
     }
 
     public void setManifestUrl(String manifestUrl) {
         mManifestUrl = manifestUrl;
         updatePageAction();
     }
 
     public void updatePageAction() {
+        if (!ShortcutUtils.isPinShortcutSupported()) {
+            return;
+        }
+
         if (mManifestUrl != null) {
             showPwaPageAction();
 
         } else {
             clearPwaPageAction();
         }
     }
 
--- a/mobile/android/base/java/org/mozilla/gecko/delegates/BookmarkStateChangeDelegate.java
+++ b/mobile/android/base/java/org/mozilla/gecko/delegates/BookmarkStateChangeDelegate.java
@@ -26,16 +26,17 @@ import org.mozilla.gecko.Tabs;
 import org.mozilla.gecko.Telemetry;
 import org.mozilla.gecko.TelemetryContract;
 import org.mozilla.gecko.home.HomeConfig;
 import org.mozilla.gecko.promotion.SimpleHelperUI;
 import org.mozilla.gecko.prompts.Prompt;
 import org.mozilla.gecko.prompts.PromptListItem;
 import org.mozilla.gecko.util.DrawableUtil;
 import org.mozilla.gecko.util.GeckoBundle;
+import org.mozilla.gecko.util.ShortcutUtils;
 import org.mozilla.gecko.util.ThreadUtils;
 
 /**
  * Delegate to watch for bookmark state changes.
  *
  * This is responsible for showing snackbars and helper UIs related to the addition/removal
  * of bookmarks, or reader view bookmarks.
  */
@@ -187,19 +188,25 @@ public class BookmarkStateChangeDelegate
                                 GeckoApplication.createBrowserShortcut(title, url);
                             }
                         });
                     }
                 }
             }
         });
 
-        final PromptListItem[] items = new PromptListItem[2];
-        items[0] = new PromptListItem(res.getString(R.string.contextmenu_edit_bookmark));
-        items[1] = new PromptListItem(res.getString(R.string.contextmenu_add_page_shortcut));
+        final PromptListItem[] items;
+        if  (ShortcutUtils.isPinShortcutSupported()) {
+            items = new PromptListItem[2];
+            items[0] = new PromptListItem(res.getString(R.string.contextmenu_edit_bookmark));
+            items[1] = new PromptListItem(res.getString(R.string.contextmenu_add_page_shortcut));
+        } else {
+            items = new PromptListItem[1];
+            items[0] = new PromptListItem(res.getString(R.string.contextmenu_edit_bookmark));
+        }
 
         ps.show("", "", items, ListView.CHOICE_MODE_NONE);
     }
 
     private void showReaderModeBookmarkAddedSnackbar() {
         final BrowserApp browserApp = getBrowserApp();
         if (browserApp == null) {
             return;
--- a/mobile/android/base/java/org/mozilla/gecko/toolbar/PageActionLayout.java
+++ b/mobile/android/base/java/org/mozilla/gecko/toolbar/PageActionLayout.java
@@ -6,21 +6,22 @@
 package org.mozilla.gecko.toolbar;
 
 import org.mozilla.gecko.EventDispatcher;
 import org.mozilla.gecko.GeckoSharedPrefs;
 import org.mozilla.gecko.R;
 import org.mozilla.gecko.Tab;
 import org.mozilla.gecko.Tabs;
 import org.mozilla.gecko.preferences.GeckoPreferences;
+import org.mozilla.gecko.util.BundleEventListener;
 import org.mozilla.gecko.util.DrawableUtil;
-import org.mozilla.gecko.util.ResourceDrawableUtils;
-import org.mozilla.gecko.util.BundleEventListener;
 import org.mozilla.gecko.util.EventCallback;
 import org.mozilla.gecko.util.GeckoBundle;
+import org.mozilla.gecko.util.ResourceDrawableUtils;
+import org.mozilla.gecko.util.ShortcutUtils;
 import org.mozilla.gecko.util.ThreadUtils;
 import org.mozilla.gecko.widget.GeckoPopupMenu;
 import org.mozilla.gecko.widget.themed.ThemedImageButton;
 import org.mozilla.gecko.widget.themed.ThemedLinearLayout;
 
 import android.content.Context;
 import android.content.SharedPreferences;
 import android.content.res.ColorStateList;
@@ -164,17 +165,17 @@ public class PageActionLayout extends Th
         // only show pwa at normal mode
         final Tab selectedTab = Tabs.getInstance().getSelectedTab();
         if (selectedTab.isPrivate()) {
             return;
         }
         if (UUID_PAGE_ACTION_PWA.equals(id)) {
             final SharedPreferences prefs = GeckoSharedPrefs.forApp(getContext());
             final boolean show = prefs.getBoolean(PREF_PWA_ONBOARDING, true);
-            if (show) {
+            if (show && ShortcutUtils.isPinShortcutSupported()) {
                 PwaOnboarding.show(getContext());
                 prefs.edit().putBoolean(PREF_PWA_ONBOARDING, false).apply();
             }
         }
     }
 
     private boolean isPwaAdded(String id) {
         for (PageAction pageAction : mPageActionList) {
--- a/mobile/android/base/java/org/mozilla/gecko/util/ShortcutUtils.java
+++ b/mobile/android/base/java/org/mozilla/gecko/util/ShortcutUtils.java
@@ -109,16 +109,39 @@ public class ShortcutUtils {
         //     .setIcon(Icon.createWithBitmap(getLauncherIcon(aIcon, GeckoAppShell.getPreferredIconSize())))
         //     .setIntent(shortcutIntent)
         //     .setShortLabel(aTitle != null ? aTitle : aURI)
         //     .build();
 
         // mgr.requestPinShortcut(info, null);
     }
 
+    public static boolean isPinShortcutSupported() {
+        if (Versions.feature26Plus) {
+            return isPinShortcutSupported26();
+        }
+        return true;
+    }
+
+    @TargetApi(26)
+    private static boolean isPinShortcutSupported26() {
+        final Context context = GeckoAppShell.getApplicationContext();
+        try {
+            final Class<?> mgrCls = Class.forName("android.content.pm.ShortcutManager");
+            final Object mgr = context.getSystemService(mgrCls);
+
+            final boolean supported = (boolean)
+                mgrCls.getDeclaredMethod("isRequestPinShortcutSupported")
+                .invoke(mgr);
+            return supported;
+        } catch (final Exception e) {
+            return false;
+        }
+    }
+
     private static Bitmap getLauncherIcon(Bitmap aSource, int size) {
         final float[] DEFAULT_LAUNCHER_ICON_HSV = { 32.0f, 1.0f, 1.0f };
         final int kOffset = 6;
         final int kRadius = 5;
 
         final int insetSize = aSource != null ? size * 2 / 3 : size;
 
         final Bitmap bitmap = Bitmap.createBitmap(size, size, Bitmap.Config.ARGB_8888);
new file mode 100644
--- /dev/null
+++ b/netwerk/sctp/android.patch
@@ -0,0 +1,80 @@
+exporting patch:
+# HG changeset patch
+# User Randell Jesup <rjesup@jesup.org>
+# Date 1425533209 18000
+#      Thu Mar 05 00:26:49 2015 -0500
+# Node ID 4c7148e103e122f2fae1736685210b70be452c49
+# Parent  190f209ef80df453961b357a4a31d80247afd72b
+Bug 1297418: rollup of sctp modifications for Android and a capitalization change rs=jesup
+
+diff --git a/netwerk/sctp/src/netinet/sctp_bsd_addr.c b/netwerk/sctp/src/netinet/sctp_bsd_addr.c
+--- a/netwerk/sctp/src/netinet/sctp_bsd_addr.c
++++ b/netwerk/sctp/src/netinet/sctp_bsd_addr.c
+@@ -43,19 +43,24 @@
+ #include <netinet/sctp_output.h>
+ #include <netinet/sctp_bsd_addr.h>
+ #include <netinet/sctp_uio.h>
+ #include <netinet/sctputil.h>
+ #include <netinet/sctp_timer.h>
+ #include <netinet/sctp_asconf.h>
+ #include <netinet/sctp_sysctl.h>
+ #include <netinet/sctp_indata.h>
++#if defined(ANDROID)
++#include <unistd.h>
++#include <ifaddrs-android-ext.h>
++#else
+ #if defined(__FreeBSD__)
+ #include <sys/unistd.h>
+ #endif
++#endif
+ 
+ /* Declare all of our malloc named types */
+ #ifndef __Panda__
+ MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
+ MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
+ MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
+ MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
+ MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
+diff --git a/netwerk/sctp/src/netinet/sctp_os_userspace.h b/netwerk/sctp/src/netinet/sctp_os_userspace.h
+--- a/netwerk/sctp/src/netinet/sctp_os_userspace.h
++++ b/netwerk/sctp/src/netinet/sctp_os_userspace.h
+@@ -40,18 +40,18 @@
+  */
+ 
+ #include <errno.h>
+ 
+ #if defined(__Userspace_os_Windows)
+ #include <winsock2.h>
+ #include <ws2tcpip.h>
+ #include <iphlpapi.h>
+-#include <Mswsock.h>
+-#include <Windows.h>
++#include <mswsock.h>
++#include <windows.h>
+ #include "user_environment.h"
+ typedef CRITICAL_SECTION userland_mutex_t;
+ #if WINVER < 0x0600
+ enum {
+ 	C_SIGNAL = 0,
+ 	C_BROADCAST = 1,
+ 	C_MAX_EVENTS = 2
+ };
+@@ -490,17 +490,17 @@ struct sx {int dummy;};
+ #include <user_ip_icmp.h>
+ #endif
+ /* #include <netinet/in_pcb.h> ported to userspace */
+ #include <user_inpcb.h>
+ 
+ /* for getifaddrs */
+ #include <sys/types.h>
+ #if !defined(__Userspace_os_Windows)
+-#if defined(INET) || defined(INET6)
++#if !defined(ANDROID) && (defined(INET) || defined(INET6))
+ #include <ifaddrs.h>
+ #endif
+ 
+ /* for ioctl */
+ #include <sys/ioctl.h>
+ 
+ /* for close, etc. */
+ #include <unistd.h>
--- a/netwerk/sctp/sctp_update.log
+++ b/netwerk/sctp/sctp_update.log
@@ -11,8 +11,9 @@ sctp updated to version 8165 from SVN on
 sctp updated to version 8176 from SVN on Wed Sep  5 18:02:08 EDT 2012
 sctp updated to version 8263 from SVN on Sun Sep 16 00:48:48 EDT 2012
 sctp updated to version 8279 from SVN on Thu Sep 20 18:19:24 EDT 2012
 sctp updated to version 8397 from SVN on Wed Jan  9 00:41:16 EST 2013
 sctp updated to version 8443 from SVN on Sun Mar 31 09:05:07 EDT 2013
 sctp updated to version 8815 from SVN on Tue Mar  4 08:50:51 EST 2014
 sctp updated to version 9168 from SVN on Tue Mar  3 12:11:40 EST 2015
 sctp updated to version 9209 from SVN on Tue Mar 24 18:11:59 EDT 2015
+sctp updated to version 0e076261b832121cf120ddc04aaff87ac3a34d30 from git on Tue Nov 28 15:20:51 EST 2017
--- a/netwerk/sctp/src/netinet/sctp.h
+++ b/netwerk/sctp/src/netinet/sctp.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 279859 2015-03-10 19:49:25Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 323657 2017-09-16 21:26:06Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_H_
 #define _NETINET_SCTP_H_
 
 #if (defined(__APPLE__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Darwin))
 #include <stdint.h>
 #endif
@@ -197,16 +199,19 @@ struct sctp_paramhdr {
 #define SCTP_CMT_ON_OFF                 0x00001200
 #define SCTP_CMT_USE_DAC                0x00001201
 /* JRS - Pluggable Congestion Control Socket option */
 #define SCTP_PLUGGABLE_CC               0x00001202
 /* RS - Pluggable Stream Scheduling Socket option */
 #define SCTP_PLUGGABLE_SS		0x00001203
 #define SCTP_SS_VALUE			0x00001204
 #define SCTP_CC_OPTION			0x00001205 /* Options for CC modules */
+/* For I-DATA */
+#define SCTP_INTERLEAVING_SUPPORTED	0x00001206
+
 /* read only */
 #define SCTP_GET_SNDBUF_USE		0x00001101
 #define SCTP_GET_STAT_LOG		0x00001103
 #define SCTP_PCB_STATUS			0x00001104
 #define SCTP_GET_NONCE_VALUES           0x00001105
 
 
 /* Special hook for dynamically setting primary for all assoc's,
@@ -397,51 +402,55 @@ struct sctp_gen_error_cause {
 
 struct sctp_error_cause {
 	uint16_t code;
 	uint16_t length;
 	/* optional cause-specific info may follow */
 } SCTP_PACKED;
 
 struct sctp_error_invalid_stream {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_INVALID_STREAM */
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_INVALID_STREAM */
 	uint16_t stream_id;	/* stream id of the DATA in error */
 	uint16_t reserved;
 } SCTP_PACKED;
 
 struct sctp_error_missing_param {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_MISSING_PARAM */
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_MISSING_PARAM */
 	uint32_t num_missing_params;	/* number of missing parameters */
-	/* uint16_t param_type's follow */
+	uint16_t type[];
 } SCTP_PACKED;
 
 struct sctp_error_stale_cookie {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_STALE_COOKIE */
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_STALE_COOKIE */
 	uint32_t stale_time;	/* time in usec of staleness */
 } SCTP_PACKED;
 
 struct sctp_error_out_of_resource {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_OUT_OF_RESOURCES */
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_OUT_OF_RESOURCES */
 } SCTP_PACKED;
 
 struct sctp_error_unresolv_addr {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_UNRESOLVABLE_ADDR */
-
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNRESOLVABLE_ADDR */
 } SCTP_PACKED;
 
 struct sctp_error_unrecognized_chunk {
-	struct sctp_error_cause cause;	/* code=SCTP_ERROR_UNRECOG_CHUNK */
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNRECOG_CHUNK */
 	struct sctp_chunkhdr ch;/* header from chunk in error */
 } SCTP_PACKED;
 
 struct sctp_error_no_user_data {
 	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_NO_USER_DATA */
 	uint32_t tsn;			/* TSN of the empty data chunk */
 } SCTP_PACKED;
 
+struct sctp_error_auth_invalid_hmac {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNSUPPORTED_HMACID */
+	uint16_t hmac_id;
+} SCTP_PACKED;
+
 /*
  * Main SCTP chunk types we place these here so natd and f/w's in user land
  * can find them.
  */
 /************0x00 series ***********/
 #define SCTP_DATA		0x00
 #define SCTP_INITIATION		0x01
 #define SCTP_INITIATION_ACK	0x02
@@ -457,32 +466,33 @@ struct sctp_error_no_user_data {
 #define SCTP_ECN_ECHO		0x0c
 #define SCTP_ECN_CWR		0x0d
 #define SCTP_SHUTDOWN_COMPLETE	0x0e
 /* RFC4895 */
 #define SCTP_AUTHENTICATION     0x0f
 /* EY nr_sack chunk id*/
 #define SCTP_NR_SELECTIVE_ACK	0x10
 /************0x40 series ***********/
+#define SCTP_IDATA		0x40
 /************0x80 series ***********/
 /* RFC5061 */
 #define	SCTP_ASCONF_ACK		0x80
 /* draft-ietf-stewart-pktdrpsctp */
 #define SCTP_PACKET_DROPPED	0x81
 /* draft-ietf-stewart-strreset-xxx */
 #define SCTP_STREAM_RESET       0x82
 
 /* RFC4820                         */
 #define SCTP_PAD_CHUNK          0x84
 /************0xc0 series ***********/
 /* RFC3758 */
 #define SCTP_FORWARD_CUM_TSN	0xc0
 /* RFC5061 */
 #define SCTP_ASCONF		0xc1
-
+#define SCTP_IFORWARD_CUM_TSN	0xc2
 
 /* ABORT and SHUTDOWN COMPLETE FLAG */
 #define SCTP_HAD_NO_TCB		0x01
 
 /* Packet dropped flags */
 #define SCTP_FROM_MIDDLE_BOX	SCTP_HAD_NO_TCB
 #define SCTP_BADCRC		0x02
 #define SCTP_PACKET_TRUNCATED	0x04
@@ -556,17 +566,16 @@ struct sctp_error_no_user_data {
  */
 #define SCTP_PCB_FLAGS_DO_NOT_PMTUD      0x0000000000000001
 #define SCTP_PCB_FLAGS_EXT_RCVINFO       0x0000000000000002 /* deprecated */
 #define SCTP_PCB_FLAGS_DONOT_HEARTBEAT   0x0000000000000004
 #define SCTP_PCB_FLAGS_FRAG_INTERLEAVE   0x0000000000000008
 #define SCTP_PCB_FLAGS_INTERLEAVE_STRMS  0x0000000000000010
 #define SCTP_PCB_FLAGS_DO_ASCONF         0x0000000000000020
 #define SCTP_PCB_FLAGS_AUTO_ASCONF       0x0000000000000040
-#define SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE  0x0000000000000080
 /* socket options */
 #define SCTP_PCB_FLAGS_NODELAY           0x0000000000000100
 #define SCTP_PCB_FLAGS_AUTOCLOSE         0x0000000000000200
 #define SCTP_PCB_FLAGS_RECVDATAIOEVNT    0x0000000000000400 /* deprecated */
 #define SCTP_PCB_FLAGS_RECVASSOCEVNT     0x0000000000000800
 #define SCTP_PCB_FLAGS_RECVPADDREVNT     0x0000000000001000
 #define SCTP_PCB_FLAGS_RECVPEERERR       0x0000000000002000
 #define SCTP_PCB_FLAGS_RECVSENDFAILEVNT  0x0000000000004000 /* deprecated */
@@ -596,17 +605,17 @@ struct sctp_error_no_user_data {
 #define SCTP_MOBILITY_BASE               0x00000001
 #define SCTP_MOBILITY_FASTHANDOFF        0x00000002
 #define SCTP_MOBILITY_PRIM_DELETED       0x00000004
 
 
 #define SCTP_SMALLEST_PMTU 512	 /* smallest pmtu allowed when disabling PMTU discovery */
 
 #if defined(__Userspace_os_Windows)
-#pragma pack()
+#pragma pack(pop)
 #endif
 #undef SCTP_PACKED
 
 #include <netinet/sctp_uio.h>
 
 /* This dictates the size of the packet
  * collection buffer. This only applies
  * if SCTP_PACKET_LOGGING is enabled in
--- a/netwerk/sctp/src/netinet/sctp_asconf.c
+++ b/netwerk/sctp/src/netinet/sctp_asconf.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 277347 2015-01-18 20:53:20Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 324056 2017-09-27 13:05:23Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
 #include <netinet/sctp_var.h>
 #include <netinet/sctp_sysctl.h>
 #include <netinet/sctp_pcb.h>
 #include <netinet/sctp_header.h>
 #include <netinet/sctputil.h>
@@ -182,17 +184,17 @@ sctp_process_asconf_add_ip(struct sockad
 #ifdef INET
 	case SCTP_IPV4_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v4addr = (struct sctp_ipv4addr_param *)ph;
 		sin = &store.sin;
-		bzero(sin, sizeof(*sin));
+		memset(sin, 0, sizeof(*sin));
 		sin->sin_family = AF_INET;
 #ifdef HAVE_SIN_LEN
 		sin->sin_len = sizeof(struct sockaddr_in);
 #endif
 		sin->sin_port = stcb->rport;
 		sin->sin_addr.s_addr = v4addr->addr;
 		if ((sin->sin_addr.s_addr == INADDR_BROADCAST) ||
 		    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
@@ -207,17 +209,17 @@ sctp_process_asconf_add_ip(struct sockad
 #ifdef INET6
 	case SCTP_IPV6_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v6addr = (struct sctp_ipv6addr_param *)ph;
 		sin6 = &store.sin6;
-		bzero(sin6, sizeof(*sin6));
+		memset(sin6, 0, sizeof(*sin6));
 		sin6->sin6_family = AF_INET6;
 #ifdef HAVE_SIN6_LEN
 		sin6->sin6_len = sizeof(struct sockaddr_in6);
 #endif
 		sin6->sin6_port = stcb->rport;
 		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
 		    sizeof(struct in6_addr));
 		if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
@@ -243,18 +245,19 @@ sctp_process_asconf_add_ip(struct sockad
 		        "process_asconf_add_ip: using source addr ");
 		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
 	}
 	/* add the address */
 	if (bad_address) {
 		m_reply = sctp_asconf_error_response(aph->correlation_id,
 		    SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
 		    aparam_length);
-	} else if (sctp_add_remote_addr(stcb, sa, &net, SCTP_DONOT_SETSCOPE,
-	                         SCTP_ADDR_DYNAMIC_ADDED) != 0) {
+	} else if (sctp_add_remote_addr(stcb, sa, &net, stcb->asoc.port,
+	                                SCTP_DONOT_SETSCOPE,
+	                                SCTP_ADDR_DYNAMIC_ADDED) != 0) {
 		SCTPDBG(SCTP_DEBUG_ASCONF1,
 			"process_asconf_add_ip: error adding address\n");
 		m_reply = sctp_asconf_error_response(aph->correlation_id,
 		    SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
 		    aparam_length);
 	} else {
 		/* notify upper layer */
 		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
@@ -336,17 +339,17 @@ sctp_process_asconf_delete_ip(struct soc
 #ifdef INET
 	case SCTP_IPV4_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v4addr = (struct sctp_ipv4addr_param *)ph;
 		sin = &store.sin;
-		bzero(sin, sizeof(*sin));
+		memset(sin, 0, sizeof(*sin));
 		sin->sin_family = AF_INET;
 #ifdef HAVE_SIN_LEN
 		sin->sin_len = sizeof(struct sockaddr_in);
 #endif
 		sin->sin_port = stcb->rport;
 		sin->sin_addr.s_addr = v4addr->addr;
 		if (sin->sin_addr.s_addr == INADDR_ANY)
 			zero_address = 1;
@@ -358,17 +361,17 @@ sctp_process_asconf_delete_ip(struct soc
 #ifdef INET6
 	case SCTP_IPV6_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v6addr = (struct sctp_ipv6addr_param *)ph;
 		sin6 = &store.sin6;
-		bzero(sin6, sizeof(*sin6));
+		memset(sin6, 0, sizeof(*sin6));
 		sin6->sin6_family = AF_INET6;
 #ifdef HAVE_SIN6_LEN
 		sin6->sin6_len = sizeof(struct sockaddr_in6);
 #endif
 		sin6->sin6_port = stcb->rport;
 		memcpy(&sin6->sin6_addr, v6addr->addr,
 		    sizeof(struct in6_addr));
 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
@@ -471,17 +474,17 @@ sctp_process_asconf_set_primary(struct s
 #ifdef INET
 	case SCTP_IPV4_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v4addr = (struct sctp_ipv4addr_param *)ph;
 		sin = &store.sin;
-		bzero(sin, sizeof(*sin));
+		memset(sin, 0, sizeof(*sin));
 		sin->sin_family = AF_INET;
 #ifdef HAVE_SIN_LEN
 		sin->sin_len = sizeof(struct sockaddr_in);
 #endif
 		sin->sin_addr.s_addr = v4addr->addr;
 		if (sin->sin_addr.s_addr == INADDR_ANY)
 			zero_address = 1;
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
@@ -491,17 +494,17 @@ sctp_process_asconf_set_primary(struct s
 #ifdef INET6
 	case SCTP_IPV6_ADDRESS:
 		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
 			/* invalid param size */
 			return (NULL);
 		}
 		v6addr = (struct sctp_ipv6addr_param *)ph;
 		sin6 = &store.sin6;
-		bzero(sin6, sizeof(*sin6));
+		memset(sin6, 0, sizeof(*sin6));
 		sin6->sin6_family = AF_INET6;
 #ifdef HAVE_SIN6_LEN
 		sin6->sin6_len = sizeof(struct sockaddr_in6);
 #endif
 		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
 		    sizeof(struct in6_addr));
 		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
 			zero_address = 1;
@@ -555,17 +558,19 @@ sctp_process_asconf_set_primary(struct s
 		                                 SCTP_MOBILITY_BASE) ||
 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
 		                                SCTP_MOBILITY_FASTHANDOFF)) &&
 		    sctp_is_mobility_feature_on(stcb->sctp_ep,
 		                                SCTP_MOBILITY_PRIM_DELETED) &&
 		    (stcb->asoc.primary_destination->dest_state &
 		     SCTP_ADDR_UNCONFIRMED) == 0) {
 
-			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER+SCTP_LOC_7);
+			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1);
 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
 					SCTP_MOBILITY_FASTHANDOFF)) {
 				sctp_assoc_immediate_retrans(stcb,
 						stcb->asoc.primary_destination);
 			}
 			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
 					SCTP_MOBILITY_BASE)) {
 				sctp_move_chunks_from_net(stcb,
@@ -925,17 +930,17 @@ sctp_addr_match(struct sctp_paramhdr *ph
  */
 void
 sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
 {
 	/*
 	 * clear out any existing asconfs going out
 	 */
 	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
-			SCTP_FROM_SCTP_ASCONF+SCTP_LOC_2);
+			SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2);
 	stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
 	/* remove the old ASCONF on our outbound queue */
 	sctp_toss_old_asconf(stcb);
 }
 
 /*
  * cleanup any cached source addresses that may be topologically
  * incorrect after a new address has been added to this interface.
@@ -993,17 +998,17 @@ sctp_assoc_immediate_retrans(struct sctp
 
 	if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is ");
 		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is ");
 		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa);
 		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb,
 				stcb->asoc.deleted_primary,
-				SCTP_FROM_SCTP_TIMER+SCTP_LOC_8);
+				SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
 		stcb->asoc.num_send_timers_up--;
 		if (stcb->asoc.num_send_timers_up < 0) {
 			stcb->asoc.num_send_timers_up = 0;
 		}
 		SCTP_TCB_LOCK_ASSERT(stcb);
 		error = sctp_t3rxt_timer(stcb->sctp_ep, stcb,
 					stcb->asoc.deleted_primary);
 		if (error) {
@@ -1033,17 +1038,17 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *
 
 void
 sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
 {
 	struct sctp_tmit_chunk *chk;
 
 	SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO);
 	sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
-	    SCTP_FROM_SCTP_TIMER+SCTP_LOC_5);
+	                SCTP_FROM_SCTP_ASCONF + SCTP_LOC_4);
 	stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
 	net->error_count = 0;
 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
 		if (chk->whoTo == net) {
 			if (chk->sent < SCTP_DATAGRAM_RESEND) {
 				chk->sent = SCTP_DATAGRAM_RESEND;
 				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
 				sctp_flight_size_decrease(chk);
@@ -1109,17 +1114,18 @@ sctp_path_check_and_react(struct sctp_tc
 		}
 		/* Check if the nexthop is corresponding to the new address.
 		   If the new address is corresponding to the current nexthop,
 		   the path will be changed.
 		   If the new address is NOT corresponding to the current
 		   nexthop, the path will not be changed.
 		 */
 		SCTP_RTALLOC((sctp_route_t *)&net->ro,
-			     stcb->sctp_ep->def_vrf_id);
+			     stcb->sctp_ep->def_vrf_id,
+			     stcb->sctp_ep->fibnum);
 		if (net->ro.ro_rt == NULL)
 			continue;
 
 		changed = 0;
 		switch (net->ro._l_addr.sa.sa_family) {
 #ifdef INET
 		case AF_INET:
 			if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *)&net->ro)) {
@@ -1329,35 +1335,42 @@ sctp_asconf_queue_mgmt(struct sctp_tcb *
  * advisable.
  */
 static int
 sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
 		      uint16_t type)
 {
 	uint32_t status;
 	int pending_delete_queued = 0;
+	int last;
 
 	/* see if peer supports ASCONF */
 	if (stcb->asoc.asconf_supported == 0) {
 		return (-1);
 	}
 
 	/*
 	 * if this is deleting the last address from the assoc, mark it as
 	 * pending.
 	 */
-	if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending &&
-	    (sctp_local_addr_count(stcb) < 2)) {
-		/* set the pending delete info only */
-		stcb->asoc.asconf_del_pending = 1;
-		stcb->asoc.asconf_addr_del_pending = ifa;
-		atomic_add_int(&ifa->refcount, 1);
-		SCTPDBG(SCTP_DEBUG_ASCONF2,
-			"asconf_queue_add: mark delete last address pending\n");
-		return (-1);
+	if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending) {
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+			last = (sctp_local_addr_count(stcb) == 0);
+		} else {
+			last = (sctp_local_addr_count(stcb) == 1);
+		}
+		if (last) {
+			/* set the pending delete info only */
+			stcb->asoc.asconf_del_pending = 1;
+			stcb->asoc.asconf_addr_del_pending = ifa;
+			atomic_add_int(&ifa->refcount, 1);
+			SCTPDBG(SCTP_DEBUG_ASCONF2,
+				"asconf_queue_add: mark delete last address pending\n");
+			return (-1);
+		}
 	}
 
 	/* queue an asconf parameter */
 	status = sctp_asconf_queue_mgmt(stcb, ifa, type);
 
 	/*
 	 * if this is an add, and there is a delete also pending (i.e. the
 	 * last local address is being changed), queue the pending delete too.
@@ -1632,17 +1645,17 @@ sctp_asconf_process_param_ack(struct sct
 /*
  * cleanup from a bad asconf ack parameter
  */
 static void
 sctp_asconf_ack_clear(struct sctp_tcb *stcb SCTP_UNUSED)
 {
 	/* assume peer doesn't really know how to do asconfs */
 	/* XXX we could free the pending queue here */
-	
+
 }
 
 void
 sctp_handle_asconf_ack(struct mbuf *m, int offset,
 		       struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
 		       struct sctp_nets *net, int *abort_no_unlock)
 {
 	struct sctp_association *asoc;
@@ -1675,32 +1688,38 @@ sctp_handle_asconf_ack(struct mbuf *m, i
 	 * serial numbers
 	 */
 
 	/*
 	 * if the serial number is the next expected, but I didn't send it,
 	 * abort the asoc, since someone probably just hijacked us...
 	 */
 	if (serial_num == (asoc->asconf_seq_out + 1)) {
+		struct mbuf *op_err;
+		char msg[SCTP_DIAG_INFO_LEN];
+
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
-		sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, SCTP_SO_NOT_LOCKED);
+		snprintf(msg, sizeof(msg), "Never sent serial number %8.8x",
+			 serial_num);
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
 		*abort_no_unlock = 1;
 		return;
 	}
 	if (serial_num != asoc->asconf_seq_out_acked + 1) {
 		/* got a duplicate/unexpected ASCONF-ACK */
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
 			serial_num, asoc->asconf_seq_out_acked + 1);
 		return;
 	}
 
 	if (serial_num == asoc->asconf_seq_out - 1) {
 		/* stop our timer */
 		sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
-				SCTP_FROM_SCTP_ASCONF+SCTP_LOC_3);
+				SCTP_FROM_SCTP_ASCONF + SCTP_LOC_5);
 	}
 
 	/* process the ASCONF-ACK contents */
 	ack_length = ntohs(cp->ch.chunk_length) -
 	    sizeof(struct sctp_asconf_ack_chunk);
 	offset += sizeof(struct sctp_asconf_ack_chunk);
 	/* process through all parameters */
 	while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
@@ -1985,17 +2004,18 @@ sctp_addr_mgmt_assoc(struct sctp_inpcb *
 			status = sctp_asconf_queue_add(stcb, ifa, type);
 
 			/*
 			 * if queued ok, and in the open state, send out the
 			 * ASCONF.  If in the non-open state, these will be
 			 * sent when the state goes open.
 			 */
 			if (status == 0 &&
-			    SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+			    ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+			     (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED))) {
 #ifdef SCTP_TIMER_BASED_ASCONF
 				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
 				    stcb, stcb->asoc.primary_destination);
 #else
 				sctp_send_asconf(stcb, NULL, addr_locked);
 #endif
 			}
 		}
@@ -2237,17 +2257,18 @@ sctp_asconf_iterator_stcb(struct sctp_in
 		    stcb->asoc.asconf_supported == 1) {
 			/* queue an asconf for this addr */
 			status = sctp_asconf_queue_add(stcb, ifa, type);
 			/*
 			 * if queued ok, and in the open state, update the
 			 * count of queued params.  If in the non-open state,
 			 * these get sent when the assoc goes open.
 			 */
-			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
 				if (status >= 0) {
 					num_queued++;
 				}
 			}
 		}
 	}
 	/*
 	 * If we have queued params in the open state, send out an ASCONF.
@@ -2298,17 +2319,18 @@ sctp_set_primary_ip_address_sa(struct sc
 
 	/* queue an ASCONF:SET_PRIM_ADDR to be sent */
 	if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) {
 		/* set primary queuing succeeded */
 		SCTPDBG(SCTP_DEBUG_ASCONF1,
 			"set_primary_ip_address_sa: queued on tcb=%p, ",
 			(void *)stcb);
 		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
-		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
 #ifdef SCTP_TIMER_BASED_ASCONF
 			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
 					 stcb->sctp_ep, stcb,
 					 stcb->asoc.primary_destination);
 #else
 			sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
 #endif
 		}
@@ -2316,48 +2338,16 @@ sctp_set_primary_ip_address_sa(struct sc
 		SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
 			(void *)stcb);
 		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
 		return (-1);
 	}
 	return (0);
 }
 
-void
-sctp_set_primary_ip_address(struct sctp_ifa *ifa)
-{
-	struct sctp_inpcb *inp;
-
-	/* go through all our PCB's */
-	LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
-		struct sctp_tcb *stcb;
-
-		/* process for all associations for this endpoint */
-		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
-			/* queue an ASCONF:SET_PRIM_ADDR to be sent */
-			if (!sctp_asconf_queue_add(stcb, ifa,
-						   SCTP_SET_PRIM_ADDR)) {
-				/* set primary queuing succeeded */
-				SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address: queued on stcb=%p, ",
-					(void *)stcb);
-				SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &ifa->address.sa);
-				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
-#ifdef SCTP_TIMER_BASED_ASCONF
-					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
-							 stcb->sctp_ep, stcb,
-							 stcb->asoc.primary_destination);
-#else
-					sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
-#endif
-				}
-			}
-		} /* for each stcb */
-	} /* for each inp */
-}
-
 int
 sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
 {
 	struct sctp_tmit_chunk *chk, *nchk;
 	unsigned int offset, asconf_limit;
 	struct sctp_asconf_chunk *acp;
 	struct sctp_asconf_paramhdr *aph;
 	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
@@ -2610,17 +2600,17 @@ sctp_compose_asconf(struct sctp_tcb *stc
 		SCTPDBG(SCTP_DEBUG_ASCONF1,
 			"compose_asconf: couldn't get mbuf!\n");
 		sctp_m_freem(m_asconf_chk);
 		return (NULL);
 	}
 	SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk);
 	SCTP_BUF_LEN(m_asconf) = 0;
 	acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
-	bzero(acp, sizeof(struct sctp_asconf_chunk));
+	memset(acp, 0, sizeof(struct sctp_asconf_chunk));
 	/* save pointers to lookup address and asconf params */
 	lookup_ptr = (caddr_t)(acp + 1);	/* after the header */
 	ptr = mtod(m_asconf, caddr_t);	/* beginning of cluster */
 
 	/* fill in chunk header info */
 	acp->ch.chunk_type = SCTP_ASCONF;
 	acp->ch.chunk_flags = 0;
 	acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
@@ -2743,17 +2733,17 @@ sctp_compose_asconf(struct sctp_tcb *stc
 			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
 		} else {
 			/* uh oh... don't have any address?? */
 			SCTPDBG(SCTP_DEBUG_ASCONF1,
 				"compose_asconf: no lookup addr!\n");
 			/* XXX for now, we send a IPv4 address of 0.0.0.0 */
 			lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS);
 			lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)));
-			bzero(lookup->addr, sizeof(struct in_addr));
+			memset(lookup->addr, 0, sizeof(struct in_addr));
 			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param));
 		}
 	}
 	/* chain it all together */
 	SCTP_BUF_NEXT(m_asconf_chk) = m_asconf;
 	*retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
 	acp->ch.chunk_length = htons(*retlen);
 
@@ -3245,16 +3235,17 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *
 					if (laddr->ifa == ifa) {
 						sctp_del_local_addr_ep(inp, ifa);
 					}
 				}
 			}
 		} else {
 			struct sctp_asconf_iterator *asc;
 			struct sctp_laddr *wi;
+			int ret;
 
 			SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
 			            sizeof(struct sctp_asconf_iterator),
 			            SCTP_M_ASC_IT);
 			if (asc == NULL) {
 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
 				return (ENOMEM);
 			}
@@ -3266,24 +3257,30 @@ sctp_addr_mgmt_ep_sa(struct sctp_inpcb *
 			}
 			LIST_INIT(&asc->list_of_work);
 			asc->cnt = 1;
 			SCTP_INCR_LADDR_COUNT();
 			wi->ifa = ifa;
 			wi->action = type;
 			atomic_add_int(&ifa->refcount, 1);
 			LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
-			(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
+			ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
 			                             sctp_asconf_iterator_stcb,
 			                             sctp_asconf_iterator_ep_end,
 			                             SCTP_PCB_ANY_FLAGS,
 			                             SCTP_PCB_ANY_FEATURES,
 			                             SCTP_ASOC_ANY_STATE,
 			                             (void *)asc, 0,
 			                             sctp_asconf_iterator_end, inp, 0);
+			if (ret) {
+				SCTP_PRINTF("Failed to initiate iterator for addr_mgmt_ep_sa\n");
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EFAULT);
+				                    sctp_asconf_iterator_end(asc, 0);
+				return (EFAULT);
+			}
 		}
 		return (0);
 	} else {
 		/* invalid address! */
 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL);
 		return (EADDRNOTAVAIL);
 	}
 }
--- a/netwerk/sctp/src/netinet/sctp_asconf.h
+++ b/netwerk/sctp/src/netinet/sctp_asconf.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 237715 2012-06-28 16:01:08Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 324056 2017-09-27 13:05:23Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_ASCONF_H_
 #define _NETINET_SCTP_ASCONF_H_
 
 #if defined(_KERNEL) || defined(__Userspace__)
 
 /*
@@ -68,19 +70,16 @@ extern void sctp_asconf_iterator_stcb(st
 extern void sctp_asconf_iterator_end(void *ptr, uint32_t val);
 
 
 extern int32_t
 sctp_set_primary_ip_address_sa(struct sctp_tcb *,
     struct sockaddr *);
 
 extern void
-sctp_set_primary_ip_address(struct sctp_ifa *ifa);
-
-extern void
 sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
     struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
 
 extern void
 sctp_assoc_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
 extern void
 sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
--- a/netwerk/sctp/src/netinet/sctp_auth.c
+++ b/netwerk/sctp/src/netinet/sctp_auth.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 271673 2014-09-16 14:20:33Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 324971 2017-10-25 09:12:22Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
 #include <netinet/sctp.h>
 #include <netinet/sctp_header.h>
 #include <netinet/sctp_pcb.h>
 #include <netinet/sctp_var.h>
 #include <netinet/sctp_sysctl.h>
@@ -50,17 +52,17 @@
 #define SCTP_AUTH_DEBUG		(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
 #define SCTP_AUTH_DEBUG2	(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
 #endif /* SCTP_DEBUG */
 
 
 void
 sctp_clear_chunklist(sctp_auth_chklist_t *chklist)
 {
-	bzero(chklist, sizeof(*chklist));
+	memset(chklist, 0, sizeof(*chklist));
 	/* chklist->num_chunks = 0; */
 }
 
 sctp_auth_chklist_t *
 sctp_alloc_chunklist(void)
 {
 	sctp_auth_chklist_t *chklist;
 
@@ -89,17 +91,17 @@ sctp_copy_chunklist(sctp_auth_chklist_t 
 	if (list == NULL)
 		return (NULL);
 
 	/* get a new list */
 	new_list = sctp_alloc_chunklist();
 	if (new_list == NULL)
 		return (NULL);
 	/* copy it */
-	bcopy(list, new_list, sizeof(*new_list));
+	memcpy(new_list, list, sizeof(*new_list));
 
 	return (new_list);
 }
 
 
 /*
  * add a chunk to the required chunks list
  */
@@ -335,17 +337,17 @@ sctp_set_key(uint8_t *key, uint32_t keyl
 {
 	sctp_key_t *new_key;
 
 	new_key = sctp_alloc_key(keylen);
 	if (new_key == NULL) {
 		/* out of memory */
 		return (NULL);
 	}
-	bcopy(key, new_key->key, keylen);
+	memcpy(new_key->key, key, keylen);
 	return (new_key);
 }
 
 /*-
  * given two keys of variable size, compute which key is "larger/smaller"
  * returns:  1 if key1 > key2
  *          -1 if key1 < key2
  *           0 if key1 = key2
@@ -424,38 +426,38 @@ sctp_compute_hashkey(sctp_key_t *key1, s
 		/* all keys empty/null?! */
 		return (NULL);
 	}
 
 	/* concatenate the keys */
 	if (sctp_compare_key(key1, key2) <= 0) {
 		/* key is shared + key1 + key2 */
 		if (sctp_get_keylen(shared)) {
-			bcopy(shared->key, key_ptr, shared->keylen);
+			memcpy(key_ptr, shared->key, shared->keylen);
 			key_ptr += shared->keylen;
 		}
 		if (sctp_get_keylen(key1)) {
-			bcopy(key1->key, key_ptr, key1->keylen);
+			memcpy(key_ptr, key1->key, key1->keylen);
 			key_ptr += key1->keylen;
 		}
 		if (sctp_get_keylen(key2)) {
-			bcopy(key2->key, key_ptr, key2->keylen);
+			memcpy(key_ptr, key2->key, key2->keylen);
 		}
 	} else {
 		/* key is shared + key2 + key1 */
 		if (sctp_get_keylen(shared)) {
-			bcopy(shared->key, key_ptr, shared->keylen);
+			memcpy(key_ptr, shared->key, shared->keylen);
 			key_ptr += shared->keylen;
 		}
 		if (sctp_get_keylen(key2)) {
-			bcopy(key2->key, key_ptr, key2->keylen);
+			memcpy(key_ptr, key2->key, key2->keylen);
 			key_ptr += key2->keylen;
 		}
 		if (sctp_get_keylen(key1)) {
-			bcopy(key1->key, key_ptr, key1->keylen);
+			memcpy(key_ptr, key1->key, key1->keylen);
 		}
 	}
 	return (new_key);
 }
 
 
 sctp_sharedkey_t *
 sctp_alloc_sharedkey(void)
@@ -539,33 +541,33 @@ sctp_insert_sharedkey(struct sctp_keyhea
 		}
 		if (LIST_NEXT(skey, next) == NULL) {
 			/* belongs at the end of the list */
 			LIST_INSERT_AFTER(skey, new_skey, next);
 			return (0);
 		}
 	}
 	/* shouldn't reach here */
-	return (0);
+	return (EINVAL);
 }
 
 void
 sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
 {
 	sctp_sharedkey_t *skey;
 
 	/* find the shared key */
 	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
 
 	/* bump the ref count */
 	if (skey) {
 		atomic_add_int(&skey->refcount, 1);
 		SCTPDBG(SCTP_DEBUG_AUTH2,
 			"%s: stcb %p key %u refcount acquire to %d\n",
-			__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+			__func__, (void *)stcb, key_id, skey->refcount);
 	}
 }
 
 void
 sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked
 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
 	SCTP_UNUSED
 #endif
@@ -573,30 +575,30 @@ sctp_auth_key_release(struct sctp_tcb *s
 {
 	sctp_sharedkey_t *skey;
 
 	/* find the shared key */
 	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
 
 	/* decrement the ref count */
 	if (skey) {
-		sctp_free_sharedkey(skey);
 		SCTPDBG(SCTP_DEBUG_AUTH2,
 			"%s: stcb %p key %u refcount release to %d\n",
-			__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+			__func__, (void *)stcb, key_id, skey->refcount);
 
 		/* see if a notification should be generated */
-		if ((skey->refcount <= 1) && (skey->deactivated)) {
+		if ((skey->refcount <= 2) && (skey->deactivated)) {
 			/* notify ULP that key is no longer used */
 			sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
 					key_id, 0, so_locked);
 			SCTPDBG(SCTP_DEBUG_AUTH2,
 				"%s: stcb %p key %u no longer used, %d\n",
-				__FUNCTION__, (void *)stcb, key_id, skey->refcount);
+				__func__, (void *)stcb, key_id, skey->refcount);
 		}
+		sctp_free_sharedkey(skey);
 	}
 }
 
 static sctp_sharedkey_t *
 sctp_copy_sharedkey(const sctp_sharedkey_t *skey)
 {
 	sctp_sharedkey_t *new_skey;
 
@@ -619,18 +621,21 @@ sctp_copy_skeylist(const struct sctp_key
 	sctp_sharedkey_t *skey, *new_skey;
 	int count = 0;
 
 	if ((src == NULL) || (dest == NULL))
 		return (0);
 	LIST_FOREACH(skey, src, next) {
 		new_skey = sctp_copy_sharedkey(skey);
 		if (new_skey != NULL) {
-			(void)sctp_insert_sharedkey(dest, new_skey);
-			count++;
+			if (sctp_insert_sharedkey(dest, new_skey)) {
+				sctp_free_sharedkey(new_skey);
+			} else {
+				count++;
+			}
 		}
 	}
 	return (count);
 }
 
 
 sctp_hmaclist_t *
 sctp_alloc_hmaclist(uint16_t num_hmacs)
@@ -767,17 +772,17 @@ sctp_serialize_hmaclist(sctp_hmaclist_t 
 	int i;
 	uint16_t hmac_id;
 
 	if (list == NULL)
 		return (0);
 
 	for (i = 0; i < list->num_algo; i++) {
 		hmac_id = htons(list->hmac[i]);
-		bcopy(&hmac_id, ptr, sizeof(hmac_id));
+		memcpy(ptr, &hmac_id, sizeof(hmac_id));
 		ptr += sizeof(hmac_id);
 	}
 	return (list->num_algo * sizeof(hmac_id));
 }
 
 int
 sctp_verify_hmac_param (struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
 {
@@ -798,17 +803,17 @@ sctp_alloc_authinfo(void)
 
 	SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
 		    SCTP_M_AUTH_IF);
 
 	if (new_authinfo == NULL) {
 		/* out of memory */
 		return (NULL);
 	}
-	bzero(new_authinfo, sizeof(*new_authinfo));
+	memset(new_authinfo, 0, sizeof(*new_authinfo));
 	return (new_authinfo);
 }
 
 void
 sctp_free_authinfo(sctp_authinfo_t *authinfo)
 {
 	if (authinfo == NULL)
 		return;
@@ -969,20 +974,20 @@ sctp_hmac(uint16_t hmac_algo, uint8_t *k
 		sctp_hmac_init(hmac_algo, &ctx);
 		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
 		sctp_hmac_final(hmac_algo, &ctx, temp);
 		/* set the hashed key as the key */
 		keylen = digestlen;
 		key = temp;
 	}
 	/* initialize the inner/outer pads with the key and "append" zeroes */
-	bzero(ipad, blocklen);
-	bzero(opad, blocklen);
-	bcopy(key, ipad, keylen);
-	bcopy(key, opad, keylen);
+	memset(ipad, 0, blocklen);
+	memset(opad, 0, blocklen);
+	memcpy(ipad, key, keylen);
+	memcpy(opad, key, keylen);
 
 	/* XOR the key with ipad and opad values */
 	for (i = 0; i < blocklen; i++) {
 		ipad[i] ^= 0x36;
 		opad[i] ^= 0x5c;
 	}
 
 	/* perform inner hash */
@@ -1029,20 +1034,20 @@ sctp_hmac_m(uint16_t hmac_algo, uint8_t 
 		sctp_hmac_init(hmac_algo, &ctx);
 		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
 		sctp_hmac_final(hmac_algo, &ctx, temp);
 		/* set the hashed key as the key */
 		keylen = digestlen;
 		key = temp;
 	}
 	/* initialize the inner/outer pads with the key and "append" zeroes */
-	bzero(ipad, blocklen);
-	bzero(opad, blocklen);
-	bcopy(key, ipad, keylen);
-	bcopy(key, opad, keylen);
+	memset(ipad, 0, blocklen);
+	memset(opad, 0, blocklen);
+	memcpy(ipad, key, keylen);
+	memcpy(opad, key, keylen);
 
 	/* XOR the key with ipad and opad values */
 	for (i = 0; i < blocklen; i++) {
 		ipad[i] ^= 0x36;
 		opad[i] ^= 0x5c;
 	}
 
 	/* perform inner hash */
@@ -1140,17 +1145,17 @@ sctp_compute_hmac(uint16_t hmac_algo, sc
 	/* hash the key if it is longer than the hash block size */
 	blocklen = sctp_get_hmac_block_len(hmac_algo);
 	if (key->keylen > blocklen) {
 		sctp_hmac_init(hmac_algo, &ctx);
 		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
 		sctp_hmac_final(hmac_algo, &ctx, temp);
 		/* save the hashed key as the new key */
 		key->keylen = digestlen;
-		bcopy(temp, key->key, key->keylen);
+		memcpy(key->key, temp, key->keylen);
 	}
 	return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
 	    digest));
 }
 
 /* mbuf version */
 uint32_t
 sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key, struct mbuf *m,
@@ -1174,17 +1179,17 @@ sctp_compute_hmac_m(uint16_t hmac_algo, 
 	/* hash the key if it is longer than the hash block size */
 	blocklen = sctp_get_hmac_block_len(hmac_algo);
 	if (key->keylen > blocklen) {
 		sctp_hmac_init(hmac_algo, &ctx);
 		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
 		sctp_hmac_final(hmac_algo, &ctx, temp);
 		/* save the hashed key as the new key */
 		key->keylen = digestlen;
-		bcopy(temp, key->key, key->keylen);
+		memcpy(key->key, temp, key->keylen);
 	}
 	return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest, 0));
 }
 
 int
 sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id)
 {
 	int i;
@@ -1452,30 +1457,30 @@ sctp_auth_get_cookie_params(struct sctp_
 
 		if ((plen == 0) || (offset + plen > length))
 			break;
 
 		if (ptype == SCTP_RANDOM) {
 			if (plen > sizeof(random_store))
 				break;
 			phdr = sctp_get_next_param(m, offset,
-			    (struct sctp_paramhdr *)random_store, min(plen, sizeof(random_store)));
+			    (struct sctp_paramhdr *)random_store, plen);
 			if (phdr == NULL)
 				return;
 			/* save the random and length for the key */
 			p_random = (struct sctp_auth_random *)phdr;
 			random_len = plen - sizeof(*p_random);
 		} else if (ptype == SCTP_HMAC_LIST) {
 			uint16_t num_hmacs;
 			uint16_t i;
 
 			if (plen > sizeof(hmacs_store))
 				break;
 			phdr = sctp_get_next_param(m, offset,
-			    (struct sctp_paramhdr *)hmacs_store, min(plen,sizeof(hmacs_store)));
+			    (struct sctp_paramhdr *)hmacs_store, plen);
 			if (phdr == NULL)
 				return;
 			/* save the hmacs list and num for the key */
 			hmacs = (struct sctp_auth_hmac_algo *)phdr;
 			hmacs_len = plen - sizeof(*hmacs);
 			num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
 			if (stcb->asoc.local_hmacs != NULL)
 				sctp_free_hmaclist(stcb->asoc.local_hmacs);
@@ -1487,17 +1492,17 @@ sctp_auth_get_cookie_params(struct sctp_
 				}
 			}
 		} else if (ptype == SCTP_CHUNK_LIST) {
 			int i;
 
 			if (plen > sizeof(chunks_store))
 				break;
 			phdr = sctp_get_next_param(m, offset,
-			    (struct sctp_paramhdr *)chunks_store, min(plen,sizeof(chunks_store)));
+			    (struct sctp_paramhdr *)chunks_store, plen);
 			if (phdr == NULL)
 				return;
 			chunks = (struct sctp_auth_chunk_list *)phdr;
 			num_chunks = plen - sizeof(*chunks);
 			/* save chunks list and num for the key */
 			if (stcb->asoc.local_auth_chunks != NULL)
 				sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
 			else
@@ -1519,28 +1524,28 @@ sctp_auth_get_cookie_params(struct sctp_
 	if (chunks != NULL) {
 		keylen += sizeof(*chunks) + num_chunks;
 	}
 	new_key = sctp_alloc_key(keylen);
 	if (new_key != NULL) {
 	    /* copy in the RANDOM */
 	    if (p_random != NULL) {
 		keylen = sizeof(*p_random) + random_len;
-		bcopy(p_random, new_key->key, keylen);
+		memcpy(new_key->key, p_random, keylen);
 	    }
 	    /* append in the AUTH chunks */
 	    if (chunks != NULL) {
-		bcopy(chunks, new_key->key + keylen,
-		      sizeof(*chunks) + num_chunks);
+		memcpy(new_key->key + keylen, chunks,
+		       sizeof(*chunks) + num_chunks);
 		keylen += sizeof(*chunks) + num_chunks;
 	    }
 	    /* append in the HMACs */
 	    if (hmacs != NULL) {
-		bcopy(hmacs, new_key->key + keylen,
-		      sizeof(*hmacs) + hmacs_len);
+		memcpy(new_key->key + keylen, hmacs,
+		       sizeof(*hmacs) + hmacs_len);
 	    }
 	}
 	if (stcb->asoc.authinfo.random != NULL)
 		sctp_free_key(stcb->asoc.authinfo.random);
 	stcb->asoc.authinfo.random = new_key;
 	stcb->asoc.authinfo.random_len = random_len;
 	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
 	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
@@ -1568,17 +1573,17 @@ sctp_fill_hmac_digest_m(struct mbuf *m, 
 	sctp_sharedkey_t *skey;
 	sctp_key_t *key;
 
 	if ((stcb == NULL) || (auth == NULL))
 		return;
 
 	/* zero the digest + chunk padding */
 	digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
-	bzero(auth->hmac, SCTP_SIZE32(digestlen));
+	memset(auth->hmac, 0, SCTP_SIZE32(digestlen));
 
 	/* is the desired key cached? */
 	if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
 	    (stcb->asoc.authinfo.assoc_key == NULL)) {
 		if (stcb->asoc.authinfo.assoc_key != NULL) {
 			/* free the old cached key */
 			sctp_free_key(stcb->asoc.authinfo.assoc_key);
 		}
@@ -1607,17 +1612,17 @@ sctp_fill_hmac_digest_m(struct mbuf *m, 
 
 	/* compute and fill in the digest */
 	(void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
 				  m, auth_offset, auth->hmac);
 }
 
 
 static void
-sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+sctp_zero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
 {
 	struct mbuf *m_tmp;
 	uint8_t *data;
 
 	/* sanity check */
 	if (m == NULL)
 		return;
 
@@ -1625,21 +1630,21 @@ sctp_bzero_m(struct mbuf *m, uint32_t m_
 	m_tmp = m;
 	while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
 		m_offset -= SCTP_BUF_LEN(m_tmp);
 		m_tmp = SCTP_BUF_NEXT(m_tmp);
 	}
 	/* now use the rest of the mbuf chain */
 	while ((m_tmp != NULL) && (size > 0)) {
 		data = mtod(m_tmp, uint8_t *) + m_offset;
-		if (size > (uint32_t) SCTP_BUF_LEN(m_tmp)) {
-			bzero(data, SCTP_BUF_LEN(m_tmp));
-			size -= SCTP_BUF_LEN(m_tmp);
+		if (size > (uint32_t)(SCTP_BUF_LEN(m_tmp) - m_offset)) {
+			memset(data, 0, SCTP_BUF_LEN(m_tmp) - m_offset);
+			size -= SCTP_BUF_LEN(m_tmp) - m_offset;
 		} else {
-			bzero(data, size);
+			memset(data, 0, size);
 			size = 0;
 		}
 		/* clear the offset since it's only for the first mbuf */
 		m_offset = 0;
 		m_tmp = SCTP_BUF_NEXT(m_tmp);
 	}
 }
 
@@ -1673,41 +1678,40 @@ sctp_handle_auth(struct sctp_tcb *stcb, 
 	shared_key_id = ntohs(auth->shared_key_id);
 	hmac_id = ntohs(auth->hmac_id);
 	SCTPDBG(SCTP_DEBUG_AUTH1,
 		"SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
 		shared_key_id, hmac_id);
 
 	/* is the indicated HMAC supported? */
 	if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
-		struct mbuf *m_err;
-		struct sctp_auth_invalid_hmac *err;
+		struct mbuf *op_err;
+		struct sctp_error_auth_invalid_hmac *cause;
 
 		SCTP_STAT_INCR(sctps_recvivalhmacid);
 		SCTPDBG(SCTP_DEBUG_AUTH1,
 			"SCTP Auth: unsupported HMAC id %u\n",
 			hmac_id);
 		/*
 		 * report this in an Error Chunk: Unsupported HMAC
 		 * Identifier
 		 */
-		m_err = sctp_get_mbuf_for_msg(sizeof(*err), 0, M_NOWAIT,
-					      1, MT_HEADER);
-		if (m_err != NULL) {
+		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_auth_invalid_hmac),
+		                               0, M_NOWAIT, 1, MT_HEADER);
+		if (op_err != NULL) {
 			/* pre-reserve some space */
-			SCTP_BUF_RESV_UF(m_err, sizeof(struct sctp_chunkhdr));
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
 			/* fill in the error */
-			err = mtod(m_err, struct sctp_auth_invalid_hmac *);
-			bzero(err, sizeof(*err));
-			err->ph.param_type = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
-			err->ph.param_length = htons(sizeof(*err));
-			err->hmac_id = ntohs(hmac_id);
-			SCTP_BUF_LEN(m_err) = sizeof(*err);
+			cause = mtod(op_err, struct sctp_error_auth_invalid_hmac *);
+			cause->cause.code = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+			cause->cause.length = htons(sizeof(struct sctp_error_auth_invalid_hmac));
+			cause->hmac_id = ntohs(hmac_id);
+			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_auth_invalid_hmac);
 			/* queue it */
-			sctp_queue_op_err(stcb, m_err);
+			sctp_queue_op_err(stcb, op_err);
 		}
 		return (-1);
 	}
 	/* get the indicated shared key, if available */
 	if ((stcb->asoc.authinfo.recv_key == NULL) ||
 	    (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
 		/* find the shared key on the assoc first */
 		skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
@@ -1747,18 +1751,18 @@ sctp_handle_auth(struct sctp_tcb *stcb, 
 	if (chunklen < (sizeof(*auth) + digestlen)) {
 		/* invalid digest length */
 		SCTP_STAT_INCR(sctps_recvauthfailed);
 		SCTPDBG(SCTP_DEBUG_AUTH1,
 			"SCTP Auth: chunk too short for HMAC\n");
 		return (-1);
 	}
 	/* save a copy of the digest, zero the pseudo header, and validate */
-	bcopy(auth->hmac, digest, digestlen);
-	sctp_bzero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+	memcpy(digest, auth->hmac, digestlen);
+	sctp_zero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
 	(void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
 	    m, offset, computed_digest);
 
 	/* compare the computed digest with the one in the AUTH chunk */
 	if (memcmp(digest, computed_digest, digestlen) != 0) {
 		SCTP_STAT_INCR(sctps_recvauthfailed);
 		SCTPDBG(SCTP_DEBUG_AUTH1,
 			"SCTP Auth: HMAC digest check failed\n");
@@ -1818,61 +1822,65 @@ sctp_notify_authentication(struct sctp_t
 	/* append to socket */
 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
 	    0, 0, stcb->asoc.context, 0, 0, 0, m_notify);
 	if (control == NULL) {
 		/* no memory */
 		sctp_m_freem(m_notify);
 		return;
 	}
+	control->length = SCTP_BUF_LEN(m_notify);
 	control->spec_flags = M_NOTIFICATION;
-	control->length = SCTP_BUF_LEN(m_notify);
 	/* not that we need this */
 	control->tail_mbuf = m_notify;
 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
 }
 
 
 /*-
  * validates the AUTHentication related parameters in an INIT/INIT-ACK
  * Note: currently only used for INIT as INIT-ACK is handled inline
  * with sctp_load_addresses_from_init()
  */
 int
 sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
 {
-	struct sctp_paramhdr *phdr, parm_buf;
+	struct sctp_paramhdr *phdr, param_buf;
 	uint16_t ptype, plen;
 	int peer_supports_asconf = 0;
 	int peer_supports_auth = 0;
 	int got_random = 0, got_hmacs = 0, got_chklist = 0;
 	uint8_t saw_asconf = 0;
 	uint8_t saw_asconf_ack = 0;
 
 	/* go through each of the params. */
-	phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+	phdr = sctp_get_next_param(m, offset, &param_buf, sizeof(param_buf));
 	while (phdr) {
 		ptype = ntohs(phdr->param_type);
 		plen = ntohs(phdr->param_length);
 
 		if (offset + plen > limit) {
 			break;
 		}
 		if (plen < sizeof(struct sctp_paramhdr)) {
 			break;
 		}
 		if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
 			/* A supported extension chunk */
 			struct sctp_supported_chunk_types_param *pr_supported;
-			uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+			uint8_t local_store[SCTP_SMALL_CHUNK_STORE];
 			int num_ent, i;
 
+			if (plen > sizeof(local_store)) {
+				break;
+			}
 			phdr = sctp_get_next_param(m, offset,
-			    (struct sctp_paramhdr *)&local_store, min(plen,sizeof(local_store)));
+			                           (struct sctp_paramhdr *)&local_store,
+			                           plen);
 			if (phdr == NULL) {
 				return (-1);
 			}
 			pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
 			num_ent = plen - sizeof(struct sctp_paramhdr);
 			for (i = 0; i < num_ent; i++) {
 				switch (pr_supported->chunk_types[i]) {
 				case SCTP_ASCONF:
@@ -1880,56 +1888,61 @@ sctp_validate_init_auth_params(struct mb
 					peer_supports_asconf = 1;
 					break;
 				default:
 					/* one we don't care about */
 					break;
 				}
 			}
 		} else if (ptype == SCTP_RANDOM) {
-			got_random = 1;
 			/* enforce the random length */
 			if (plen != (sizeof(struct sctp_auth_random) +
 				     SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
 				SCTPDBG(SCTP_DEBUG_AUTH1,
 					"SCTP: invalid RANDOM len\n");
 				return (-1);
 			}
+			got_random = 1;
 		} else if (ptype == SCTP_HMAC_LIST) {
+			struct sctp_auth_hmac_algo *hmacs;
 			uint8_t store[SCTP_PARAM_BUFFER_SIZE];
-			struct sctp_auth_hmac_algo *hmacs;
 			int num_hmacs;
 
-			if (plen > sizeof(store))
+			if (plen > sizeof(store)) {
 				break;
+			}
 			phdr = sctp_get_next_param(m, offset,
-			    (struct sctp_paramhdr *)store, min(plen,sizeof(store)));
-			if (phdr == NULL)
+			                           (struct sctp_paramhdr *)store,
+			                           plen);
+			if (phdr == NULL) {
 				return (-1);
+			}
 			hmacs = (struct sctp_auth_hmac_algo *)phdr;
-			num_hmacs = (plen - sizeof(*hmacs)) /
-			    sizeof(hmacs->hmac_ids[0]);
+			num_hmacs = (plen - sizeof(*hmacs)) / sizeof(hmacs->hmac_ids[0]);
 			/* validate the hmac list */
 			if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
 				SCTPDBG(SCTP_DEBUG_AUTH1,
 					"SCTP: invalid HMAC param\n");
 				return (-1);
 			}
 			got_hmacs = 1;
 		} else if (ptype == SCTP_CHUNK_LIST) {
-			int i, num_chunks;
+			struct sctp_auth_chunk_list *chunks;
 			uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
-			/* did the peer send a non-empty chunk list? */
-			struct sctp_auth_chunk_list *chunks = NULL;
+			int i, num_chunks;
+
+			if (plen > sizeof(chunks_store)) {
+				break;
+			}
 			phdr = sctp_get_next_param(m, offset,
 						   (struct sctp_paramhdr *)chunks_store,
-						   min(plen,sizeof(chunks_store)));
-			if (phdr == NULL)
+						   plen);
+			if (phdr == NULL) {
 				return (-1);
-
+			}
 			/*-
 			 * Flip through the list and mark that the
 			 * peer supports asconf/asconf_ack.
 			 */
 			chunks = (struct sctp_auth_chunk_list *)phdr;
 			num_chunks = plen - sizeof(*chunks);
 			for (i = 0; i < num_chunks; i++) {
 				/* record asconf/asconf-ack if listed */
@@ -1942,18 +1955,18 @@ sctp_validate_init_auth_params(struct mb
 			if (num_chunks)
 				got_chklist = 1;
 		}
 
 		offset += SCTP_SIZE32(plen);
 		if (offset >= limit) {
 			break;
 		}
-		phdr = sctp_get_next_param(m, offset, &parm_buf,
-		    sizeof(parm_buf));
+		phdr = sctp_get_next_param(m, offset, &param_buf,
+		    sizeof(param_buf));
 	}
 	/* validate authentication required parameters */
 	if (got_random && got_hmacs) {
 		peer_supports_auth = 1;
 	} else {
 		peer_supports_auth = 0;
 	}
 	if (!peer_supports_auth && got_chklist) {
--- a/netwerk/sctp/src/netinet/sctp_auth.h
+++ b/netwerk/sctp/src/netinet/sctp_auth.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 271673 2014-09-16 14:20:33Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 310590 2016-12-26 11:06:41Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_AUTH_H_
 #define _NETINET_SCTP_AUTH_H_
 
 #include <netinet/sctp_os.h>
 
 /* digest lengths */
--- a/netwerk/sctp/src/netinet/sctp_bsd_addr.c
+++ b/netwerk/sctp/src/netinet/sctp_bsd_addr.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 276914 2015-01-10 20:49:57Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 310590 2016-12-26 11:06:41Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
 #include <netinet/sctp_var.h>
 #include <netinet/sctp_pcb.h>
 #include <netinet/sctp_header.h>
 #include <netinet/sctputil.h>
 #include <netinet/sctp_output.h>
@@ -124,16 +126,19 @@ sctp_wakeup_iterator(void)
 
 #if defined(__Userspace__)
 static void *
 #else
 static void
 #endif
 sctp_iterator_thread(void *v SCTP_UNUSED)
 {
+#if defined(__Userspace__)
+	sctp_userspace_set_threadname("SCTP iterator");
+#endif
 	SCTP_IPI_ITERATOR_WQ_LOCK();
 	/* In FreeBSD this thread never terminates. */
 #if defined(__FreeBSD__)
 	for (;;) {
 #else
 	while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) == 0) {
 #endif
 #if !defined(__Userspace__)
@@ -196,21 +201,17 @@ sctp_startup_iterator(void)
 	             (void *)NULL,
 	             &sctp_it_ctl.thread_proc,
 	             RFPROC,
 	             SCTP_KTHREAD_PAGES,
 	             SCTP_KTRHEAD_NAME);
 #elif defined(__APPLE__)
 	kernel_thread_start((thread_continue_t)sctp_iterator_thread, NULL, &sctp_it_ctl.thread_proc);
 #elif defined(__Userspace__)
-#if defined(__Userspace_os_Windows)
-	if ((sctp_it_ctl.thread_proc = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)&sctp_iterator_thread, NULL, 0, NULL)) == NULL) {
-#else
-	if (pthread_create(&sctp_it_ctl.thread_proc, NULL, &sctp_iterator_thread, NULL)) {
-#endif
+	if (sctp_userspace_thread_create(&sctp_it_ctl.thread_proc, &sctp_iterator_thread)) {
 		SCTP_PRINTF("ERROR: Creating sctp_iterator_thread failed.\n");
 	}
 #endif
 }
 
 #ifdef INET6
 
 #if defined(__Userspace__)
@@ -307,21 +308,28 @@ sctp_is_desired_interface_type(struct if
 int
 sctp_is_vmware_interface(struct ifnet *ifn)
 {
 	return (strncmp(ifnet_name(ifn), "vmnet", 5) == 0);
 }
 #endif
 
 #if defined(__Userspace_os_Windows)
+#ifdef MALLOC
+#undef MALLOC
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#endif
+#ifdef FREE
+#undef FREE
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+#endif
 static void
 sctp_init_ifns_for_vrf(int vrfid)
 {
 #if defined(INET) || defined(INET6)
-	struct ifaddrs *ifa;
 	struct sctp_ifa *sctp_ifa;
 	DWORD Err, AdapterAddrsSize;
 	PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
 	PIP_ADAPTER_UNICAST_ADDRESS pUnicast;
 #endif
 
 #ifdef INET
 	AdapterAddrsSize = 0;
@@ -337,48 +345,42 @@ sctp_init_ifns_for_vrf(int vrfid)
 	/* Allocate memory from sizing information */
 	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
 		SCTP_PRINTF("Memory allocation error!\n");
 		return;
 	}
 	/* Get actual adapter information */
 	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
 		SCTP_PRINTF("GetAdaptersV4Addresses() failed with error code %d\n", Err);
+		FREE(pAdapterAddrs);
 		return;
 	}
 	/* Enumerate through each returned adapter and save its information */
 	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
 		if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
 			for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
 				if (IN4_ISLINKLOCAL_ADDRESS(&(((struct sockaddr_in *)(pUnicast->Address.lpSockaddr))->sin_addr))) {
 					continue;
 				}
-				ifa = (struct ifaddrs*)malloc(sizeof(struct ifaddrs));
-				ifa->ifa_name = _strdup(pAdapt->AdapterName);
-				ifa->ifa_flags = pAdapt->Flags;
-				ifa->ifa_addr = (struct sockaddr *)malloc(sizeof(struct sockaddr_in));
-				memcpy(ifa->ifa_addr, pUnicast->Address.lpSockaddr, sizeof(struct sockaddr_in));
-
 				sctp_ifa = sctp_add_addr_to_vrf(0,
-				                                ifa,
+				                                NULL,
 				                                pAdapt->IfIndex,
 				                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
-				                                ifa->ifa_name,
-				                                (void *)ifa,
-				                                ifa->ifa_addr,
-				                                ifa->ifa_flags,
+				                                pAdapt->AdapterName,
+				                                NULL,
+				                                pUnicast->Address.lpSockaddr,
+				                                pAdapt->Flags,
 				                                0);
 				if (sctp_ifa) {
 					sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
 				}
 			}
 		}
 	}
-	if (pAdapterAddrs)
-		GlobalFree(pAdapterAddrs);
+	FREE(pAdapterAddrs);
 #endif
 #ifdef INET6
 	AdapterAddrsSize = 0;
 
 	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
 		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
 			SCTP_PRINTF("GetAdaptersV6Addresses() sizing failed with error code %d\n", Err);
 			SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
@@ -388,61 +390,56 @@ sctp_init_ifns_for_vrf(int vrfid)
 	/* Allocate memory from sizing information */
 	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
 		SCTP_PRINTF("Memory allocation error!\n");
 		return;
 	}
 	/* Get actual adapter information */
 	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
 		SCTP_PRINTF("GetAdaptersV6Addresses() failed with error code %d\n", Err);
+		FREE(pAdapterAddrs);
 		return;
 	}
 	/* Enumerate through each returned adapter and save its information */
 	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
 		if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
 			for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
-				ifa = (struct ifaddrs*)malloc(sizeof(struct ifaddrs));
-				ifa->ifa_name = _strdup(pAdapt->AdapterName);
-				ifa->ifa_flags = pAdapt->Flags;
-				ifa->ifa_addr = (struct sockaddr *)malloc(sizeof(struct sockaddr_in6));
-				memcpy(ifa->ifa_addr, pUnicast->Address.lpSockaddr, sizeof(struct sockaddr_in6));
 				sctp_ifa = sctp_add_addr_to_vrf(0,
-				                                ifa,
+				                                NULL,
 				                                pAdapt->Ipv6IfIndex,
 				                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
-				                                ifa->ifa_name,
-				                                (void *)ifa,
-				                                ifa->ifa_addr,
-				                                ifa->ifa_flags,
+				                                pAdapt->AdapterName,
+				                                NULL,
+				                                pUnicast->Address.lpSockaddr,
+				                                pAdapt->Flags,
 				                                0);
 				if (sctp_ifa) {
 					sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
 				}
 			}
 		}
 	}
-	if (pAdapterAddrs)
-		GlobalFree(pAdapterAddrs);
+	FREE(pAdapterAddrs);
 #endif
 }
 #elif defined(__Userspace__)
 static void
 sctp_init_ifns_for_vrf(int vrfid)
 {
 #if defined(INET) || defined(INET6)
 	int rc;
-	struct ifaddrs *ifa = NULL;
+	struct ifaddrs *ifa, *ifas;
 	struct sctp_ifa *sctp_ifa;
 	uint32_t ifa_flags;
 
-	rc = getifaddrs(&g_interfaces);
+	rc = getifaddrs(&ifas);
 	if (rc != 0) {
 		return;
 	}
-	for (ifa = g_interfaces; ifa; ifa = ifa->ifa_next) {
+	for (ifa = ifas; ifa; ifa = ifa->ifa_next) {
 		if (ifa->ifa_addr == NULL) {
 			continue;
 		}
 #if !defined(INET)
 		if (ifa->ifa_addr->sa_family != AF_INET6) {
 			/* non inet6 skip */
 			continue;
 		}
@@ -467,28 +464,29 @@ sctp_init_ifns_for_vrf(int vrfid)
 #if defined(INET)
 		if (ifa->ifa_addr->sa_family == AF_INET &&
 		    ((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
 			continue;
 		}
 #endif
 		ifa_flags = 0;
 		sctp_ifa = sctp_add_addr_to_vrf(vrfid,
-		                                ifa,
+		                                NULL,
 		                                if_nametoindex(ifa->ifa_name),
 		                                0,
 		                                ifa->ifa_name,
-		                                (void *)ifa,
+		                                NULL,
 		                                ifa->ifa_addr,
 		                                ifa_flags,
 		                                0);
 		if (sctp_ifa) {
 			sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
 		}
 	}
+	freeifaddrs(ifas);
 #endif
 }
 #endif
 
 #if defined(__APPLE__)
 static void
 sctp_init_ifns_for_vrf(int vrfid)
 {
@@ -544,21 +542,21 @@ sctp_init_ifns_for_vrf(int vrfid)
 			if (ifa->ifa_addr->sa_family == AF_INET6) {
 				ifa6 = (struct in6_ifaddr *)ifa;
 				ifa_flags = ifa6->ia6_flags;
 			} else {
 				ifa_flags = 0;
 			}
 			snprintf(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn));
 			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
-			                                (void *)ifn,
+			                                (void *)ifn, /* XXX */
 			                                ifnet_index(ifn),
 			                                ifnet_type(ifn),
 			                                name,
-			                                (void *)ifa,
+			                                (void *)ifa, /* XXX */
 			                                ifa->ifa_addr,
 			                                ifa_flags,
 			                                0);
 			if (sctp_ifa) {
 				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
 			}
 		}
 		ifnet_free_address_list(ifaddrlist);
@@ -580,27 +578,27 @@ sctp_init_ifns_for_vrf(int vrfid)
 	struct ifaddr *ifa;
 	struct sctp_ifa *sctp_ifa;
 	uint32_t ifa_flags;
 #ifdef INET6
 	struct in6_ifaddr *ifa6;
 #endif
 
 	IFNET_RLOCK();
-	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
 		if (sctp_is_desired_interface_type(ifn) == 0) {
 			/* non desired type */
 			continue;
 		}
 #if (__FreeBSD_version >= 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000
 		IF_ADDR_RLOCK(ifn);
 #else
 		IF_ADDR_LOCK(ifn);
 #endif
-		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+		TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
 			if (ifa->ifa_addr == NULL) {
 				continue;
 			}
 			switch (ifa->ifa_addr->sa_family) {
 #ifdef INET
 			case AF_INET:
 				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
 					continue;
@@ -675,20 +673,24 @@ sctp_init_vrf_list(int vrfid)
 
 void
 sctp_addr_change(struct ifaddr *ifa, int cmd)
 {
 #if defined(__Userspace__)
         return;
 #else
 	uint32_t ifa_flags = 0;
+
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+		return;
+	}
 	/* BSD only has one VRF, if this changes
 	 * we will need to hook in the right
 	 * things here to get the id to pass to
-	 * the address managment routine.
+	 * the address management routine.
 	 */
 	if (SCTP_BASE_VAR(first_time) == 0) {
 		/* Special test to see if my ::1 will showup with this */
 		SCTP_BASE_VAR(first_time) = 1;
 		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
 	}
 
 	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
@@ -753,21 +755,21 @@ sctp_addr_change(struct ifaddr *ifa, int
 #if defined(__FreeBSD__)
 void
 sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
 {
 	struct ifnet *ifn;
 	struct ifaddr *ifa;
 
 	IFNET_RLOCK();
-	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_list) {
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
 		if (!(*pred)(ifn)) {
 			continue;
 		}
-		TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
+		TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
 			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
 		}
 	}
 	IFNET_RUNLOCK();
 }
 #endif
 #if defined(__APPLE__)
 void
@@ -824,20 +826,16 @@ sctp_get_mbuf_for_msg(unsigned int space
 	if (allonebuf == 0)
 		mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
 	else
 		mbuf_threshold = 1;
 
 
 	if ((int)space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
 		MCLGET(m, how);
-		if (m == NULL) {
-			return (NULL);
-		}
-
 		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
 			sctp_m_freem(m);
 			return (NULL);
 		}
 	}
 	SCTP_BUF_LEN(m) = 0;
 	SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
 
--- a/netwerk/sctp/src/netinet/sctp_bsd_addr.h
+++ b/netwerk/sctp/src/netinet/sctp_bsd_addr.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 237540 2012-06-24 21:25:54Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 310590 2016-12-26 11:06:41Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_BSD_ADDR_H_
 #define _NETINET_SCTP_BSD_ADDR_H_
 
 #include <netinet/sctp_pcb.h>
 
 #if defined(_KERNEL) || defined(__Userspace__)
--- a/netwerk/sctp/src/netinet/sctp_callout.c
+++ b/netwerk/sctp/src/netinet/sctp_callout.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -51,32 +53,41 @@
 #include <netinet/sctp_callout.h>
 #include <netinet/sctp_pcb.h>
 #endif
 
 /*
  * Callout/Timer routines for OS that doesn't have them
  */
 #if defined(__APPLE__) || defined(__Userspace__)
-int ticks = 0;
+static int ticks = 0;
 #else
 extern int ticks;
 #endif
 
+int sctp_get_tick_count(void) {
+	int ret;
+
+	SCTP_TIMERQ_LOCK();
+	ret = ticks;
+	SCTP_TIMERQ_UNLOCK();
+	return ret;
+}
+
 /*
  * SCTP_TIMERQ_LOCK protects:
  * - SCTP_BASE_INFO(callqueue)
  * - sctp_os_timer_next: next timer to check
  */
 static sctp_os_timer_t *sctp_os_timer_next = NULL;
 
 void
 sctp_os_timer_init(sctp_os_timer_t *c)
 {
-	bzero(c, sizeof(*c));
+	memset(c, 0, sizeof(*c));
 }
 
 void
 sctp_os_timer_start(sctp_os_timer_t *c, int to_ticks, void (*ftn) (void *),
                     void *arg)
 {
 	/* paranoia */
 	if ((c == NULL) || (ftn == NULL))
@@ -173,16 +184,17 @@ sctp_timeout(void *arg SCTP_UNUSED)
 #endif
 
 #if defined(__Userspace__)
 #define TIMEOUT_INTERVAL 10
 
 void *
 user_sctp_timer_iterate(void *arg)
 {
+	sctp_userspace_set_threadname("SCTP timer");
 	for (;;) {
 #if defined (__Userspace_os_Windows)
 		Sleep(TIMEOUT_INTERVAL);
 #else
 		struct timeval timeout;
 
 		timeout.tv_sec  = 0;
 		timeout.tv_usec = 1000 * TIMEOUT_INTERVAL;
@@ -198,23 +210,17 @@ user_sctp_timer_iterate(void *arg)
 
 void
 sctp_start_timer(void)
 {
 	/*
 	 * No need to do SCTP_TIMERQ_LOCK_INIT();
 	 * here, it is being done in sctp_pcb_init()
 	 */
-#if defined (__Userspace_os_Windows)
-	if ((SCTP_BASE_VAR(timer_thread) = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)user_sctp_timer_iterate, NULL, 0, NULL)) == NULL) {
-		SCTP_PRINTF("ERROR; Creating ithread failed\n");
-	}
-#else
 	int rc;
 
-	rc = pthread_create(&SCTP_BASE_VAR(timer_thread), NULL, user_sctp_timer_iterate, NULL);
+	rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(timer_thread), user_sctp_timer_iterate);
 	if (rc) {
-		SCTP_PRINTF("ERROR; return code from pthread_create() is %d\n", rc);
+		SCTP_PRINTF("ERROR; return code from sctp_thread_create() is %d\n", rc);
 	}
-#endif
 }
 
 #endif
--- a/netwerk/sctp/src/netinet/sctp_callout.h
+++ b/netwerk/sctp/src/netinet/sctp_callout.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
  * are met:
  * 1. Redistributions of source code must retain the above copyright
@@ -52,24 +54,29 @@
 
 #if defined(__Userspace__)
 #if defined(__Userspace_os_Windows)
 #define SCTP_TIMERQ_LOCK()          EnterCriticalSection(&SCTP_BASE_VAR(timer_mtx))
 #define SCTP_TIMERQ_UNLOCK()        LeaveCriticalSection(&SCTP_BASE_VAR(timer_mtx))
 #define SCTP_TIMERQ_LOCK_INIT()     InitializeCriticalSection(&SCTP_BASE_VAR(timer_mtx))
 #define SCTP_TIMERQ_LOCK_DESTROY()  DeleteCriticalSection(&SCTP_BASE_VAR(timer_mtx))
 #else
+#ifdef INVARIANTS
+#define SCTP_TIMERQ_LOCK()          KASSERT(pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx already locked", __func__))
+#define SCTP_TIMERQ_UNLOCK()        KASSERT(pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx not locked", __func__))
+#else
 #define SCTP_TIMERQ_LOCK()          (void)pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx))
 #define SCTP_TIMERQ_UNLOCK()        (void)pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx))
-#define SCTP_TIMERQ_LOCK_INIT()     (void)pthread_mutex_init(&SCTP_BASE_VAR(timer_mtx), NULL)
+#endif
+#define SCTP_TIMERQ_LOCK_INIT()     (void)pthread_mutex_init(&SCTP_BASE_VAR(timer_mtx), &SCTP_BASE_VAR(mtx_attr))
 #define SCTP_TIMERQ_LOCK_DESTROY()  (void)pthread_mutex_destroy(&SCTP_BASE_VAR(timer_mtx))
 #endif
+#endif
 
-extern int ticks;
-#endif
+int sctp_get_tick_count(void);
 
 TAILQ_HEAD(calloutlist, sctp_callout);
 
 struct sctp_callout {
 	TAILQ_ENTRY(sctp_callout) tqe;
 	int c_time;		/* ticks to the event */
 	void *c_arg;		/* function argument */
 	void (*c_func)(void *);	/* function to call */
--- a/netwerk/sctp/src/netinet/sctp_cc_functions.c
+++ b/netwerk/sctp/src/netinet/sctp_cc_functions.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 279859 2015-03-10 19:49:25Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 310590 2016-12-26 11:06:41Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
 #include <netinet/sctp_var.h>
 #include <netinet/sctp_sysctl.h>
 #include <netinet/sctp_pcb.h>
 #include <netinet/sctp_header.h>
 #include <netinet/sctputil.h>
@@ -95,17 +97,17 @@ sctp_set_initial_cc_param(struct sctp_tc
 		net->cwnd /= assoc->numnets;
 		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
 			net->cwnd = net->mtu - sizeof(struct sctphdr);
 		}
 	}
 	sctp_enforce_cwnd_limit(assoc, net);
 	net->ssthresh = assoc->peers_rwnd;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-	SDT_PROBE(sctp, cwnd, net, init,
+	SDT_PROBE5(sctp, cwnd, net, init,
 	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
 	          0, net->cwnd);
 #endif
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
 	    (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
 		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
 	}
 }
@@ -190,66 +192,67 @@ sctp_cwnd_update_after_fr(struct sctp_tc
 					net->ssthresh = net->cwnd / 2;
 					if (net->ssthresh < (net->mtu * 2)) {
 						net->ssthresh = 2 * net->mtu;
 					}
 				}
 				net->cwnd = net->ssthresh;
 				sctp_enforce_cwnd_limit(asoc, net);
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-				SDT_PROBE(sctp, cwnd, net, fr,
+				SDT_PROBE5(sctp, cwnd, net, fr,
 					  stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
 					  old_cwnd, net->cwnd);
 #endif
 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
 						SCTP_CWND_LOG_FROM_FR);
 				}
 				lchk = TAILQ_FIRST(&asoc->send_queue);
 
 				net->partial_bytes_acked = 0;
 				/* Turn on fast recovery window */
 				asoc->fast_retran_loss_recovery = 1;
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				/*
 				 * CMT fast recovery -- per destination
 				 * recovery variable.
 				 */
 				net->fast_retran_loss_recovery = 1;
 
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					net->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
-						stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA+SCTP_LOC_32 );
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
 						 stcb->sctp_ep, stcb, net);
 			}
 		} else if (net->net_ack > 0) {
 			/*
 			 * Mark a peg that we WOULD have done a cwnd
 			 * reduction but RFC2582 prevented this action.
 			 */
 			SCTP_STAT_INCR(sctps_fastretransinrtt);
 		}
 	}
 }
 
 /* Defines for instantaneous bw decisions */
-#define SCTP_INST_LOOSING 1 /* Loosing to other flows */
+#define SCTP_INST_LOOSING 1 /* Losing to other flows */
 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
 
 
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 static int
 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
 	   uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
@@ -270,17 +273,17 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UN
 		/*
 		 * rtt increased
 		 * we don't update bw.. so we don't
 		 * update the rtt either.
 		 */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		/* Probe point 5 */
 		probepoint |=  ((5 << 16) | 1);
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 		if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
 			if (net->cc_mod.rtcc.last_step_state == 5)
@@ -293,17 +296,17 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UN
 			     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
 				/* Try a step down */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 				oth = net->cc_mod.rtcc.vol_reduce;
 				oth <<= 16;
 				oth |= net->cc_mod.rtcc.step_cnt;
 				oth <<= 16;
 				oth |= net->cc_mod.rtcc.last_step_state;
-				SDT_PROBE(sctp, cwnd, net, rttstep,
+				SDT_PROBE5(sctp, cwnd, net, rttstep,
 					  vtag,
 					  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 					  oth,
 					  probepoint);
 #endif
 				if (net->cwnd > (4 * net->mtu)) {
 					net->cwnd -= net->mtu;
@@ -319,31 +322,31 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UN
 		/*
 		 * rtt decreased, there could be more room.
 		 * we update both the bw and the rtt here to
 		 * lock this in as a good step down.
 		 */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		/* Probe point 6 */
 		probepoint |=  ((6 << 16) | 0);
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 		if (net->cc_mod.rtcc.steady_step) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 			oth = net->cc_mod.rtcc.vol_reduce;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.step_cnt;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.last_step_state;
-			SDT_PROBE(sctp, cwnd, net, rttstep,
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
 				  vtag,
 				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 				  oth,
 				  probepoint);
 #endif
 			if ((net->cc_mod.rtcc.last_step_state == 5) &&
 			    (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
@@ -365,17 +368,17 @@ cc_bw_same(struct sctp_tcb *stcb SCTP_UN
 		else
 			return (0);
 	}
 	/* Ok bw and rtt remained the same .. no update to any
 	 */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	/* Probe point 7 */
 	probepoint |=  ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
-	SDT_PROBE(sctp, cwnd, net, rttvar,
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
 		  vtag,
 		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 		  net->flight_size,
 		  probepoint);
 #endif
 	if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
 		if (net->cc_mod.rtcc.last_step_state == 5)
@@ -426,49 +429,49 @@ cc_bw_decrease(struct sctp_tcb *stcb SCT
 		/* rtt increased */
 		/* Did we add more */
 		if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
 		    (inst_ind != SCTP_INST_LOOSING)) {
 			/* We caused it maybe.. back off? */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 			/* PROBE POINT 1 */
 			probepoint |=  ((1 << 16) | 1);
-			SDT_PROBE(sctp, cwnd, net, rttvar,
+			SDT_PROBE5(sctp, cwnd, net, rttvar,
 				  vtag,
 				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 				  net->flight_size,
 				  probepoint);
 #endif
 			if (net->cc_mod.rtcc.ret_from_eq) {
 				/* Switch over to CA if we are less aggressive */
 				net->ssthresh = net->cwnd-1;
 				net->partial_bytes_acked = 0;
 			}
 			return (1);
 		}
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		/* Probe point 2 */
 		probepoint |=  ((2 << 16) | 0);
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 		/* Someone else - fight for more? */
 		if (net->cc_mod.rtcc.steady_step) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 			oth = net->cc_mod.rtcc.vol_reduce;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.step_cnt;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.last_step_state;
-			SDT_PROBE(sctp, cwnd, net, rttstep,
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
 				  vtag,
 				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 				  oth,
 				  probepoint);
 #endif
 			/* Did we voluntarily give up some? if so take
 			 * one back please
@@ -483,31 +486,31 @@ cc_bw_decrease(struct sctp_tcb *stcb SCT
 			net->cc_mod.rtcc.step_cnt = 0;
 		}
 		goto out_decision;
 	} else  if (net->rtt  < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
 		/* bw & rtt decreased */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		/* Probe point 3 */
 		probepoint |=  ((3 << 16) | 0);
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 		if (net->cc_mod.rtcc.steady_step) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 			oth = net->cc_mod.rtcc.vol_reduce;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.step_cnt;
 			oth <<= 16;
 			oth |= net->cc_mod.rtcc.last_step_state;
-			SDT_PROBE(sctp, cwnd, net, rttstep,
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
 				  vtag,
 				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 				  oth,
 				  probepoint);
 #endif
 			if ((net->cc_mod.rtcc.vol_reduce) &&
 			    (inst_ind != SCTP_INST_GAINING)) {
@@ -519,31 +522,31 @@ cc_bw_decrease(struct sctp_tcb *stcb SCT
 			net->cc_mod.rtcc.step_cnt = 0;
 		}
 		goto out_decision;
 	}
 	/* The bw decreased but rtt stayed the same */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	/* Probe point 4 */
 	probepoint |=  ((4 << 16) | 0);
-	SDT_PROBE(sctp, cwnd, net, rttvar,
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
 		  vtag,
 		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 		  net->flight_size,
 		  probepoint);
 #endif
 	if (net->cc_mod.rtcc.steady_step) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		oth = net->cc_mod.rtcc.vol_reduce;
 		oth <<= 16;
 		oth |= net->cc_mod.rtcc.step_cnt;
 		oth <<= 16;
 		oth |= net->cc_mod.rtcc.last_step_state;
-		SDT_PROBE(sctp, cwnd, net, rttstep,
+		SDT_PROBE5(sctp, cwnd, net, rttstep,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  oth,
 			  probepoint);
 #endif
 		if ((net->cc_mod.rtcc.vol_reduce) &&
 		    (inst_ind != SCTP_INST_GAINING)) {
@@ -581,48 +584,48 @@ cc_bw_increase(struct sctp_tcb *stcb SCT
 	 * return 0, since all actions in
 	 * our table say to do the normal CC
 	 * update. Note that we pay no attention to
 	 * the inst_ind since our overall sum is increasing.
 	 */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	/* PROBE POINT 0 */
 	probepoint = (((uint64_t)net->cwnd) << 32);
-	SDT_PROBE(sctp, cwnd, net, rttvar,
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
 		  vtag,
 		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 		  net->flight_size,
 		  probepoint);
 #endif
 	if (net->cc_mod.rtcc.steady_step) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		oth = net->cc_mod.rtcc.vol_reduce;
 		oth <<= 16;
 		oth |= net->cc_mod.rtcc.step_cnt;
 		oth <<= 16;
 		oth |= net->cc_mod.rtcc.last_step_state;
-		SDT_PROBE(sctp, cwnd, net, rttstep,
+		SDT_PROBE5(sctp, cwnd, net, rttstep,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  oth,
 			  probepoint);
 #endif
 		net->cc_mod.rtcc.last_step_state = 0;
 		net->cc_mod.rtcc.step_cnt = 0;
 		net->cc_mod.rtcc.vol_reduce = 0;
 	}
 	net->cc_mod.rtcc.lbw = nbw;
 	net->cc_mod.rtcc.lbw_rtt = net->rtt;
 	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
 	return (0);
 }
 
-/* RTCC Algoritm to limit growth of cwnd, return
+/* RTCC Algorithm to limit growth of cwnd, return
  * true if you want to NOT allow cwnd growth
  */
 static int
 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
 {
 	uint64_t bw_offset, rtt_offset;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	uint64_t probepoint, rtt, vtag;
@@ -706,17 +709,17 @@ cc_bw_limit(struct sctp_tcb *stcb, struc
 			inst_ind = net->cc_mod.rtcc.last_inst_ind;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 			inst_bw = bytes_for_this_rtt;
 			/* Can't determine do not change */
 			probepoint |=  ((0xd << 16) | inst_ind);
 #endif
 		}
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((nbw << 32) | inst_bw),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 	} else {
 		/* No rtt measurement, use last one */
@@ -880,17 +883,17 @@ sctp_cwnd_update_after_sack_common(struc
 				uint64_t vtag, probepoint;
 
 				probepoint = (((uint64_t)net->cwnd) << 32);
 				probepoint |=  ((0xa << 16) | 0);
 				vtag = (net->rtt << 32) |
 					(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
 					(stcb->rport);
 
-				SDT_PROBE(sctp, cwnd, net, rttvar,
+				SDT_PROBE5(sctp, cwnd, net, rttvar,
 					  vtag,
 					  nbw,
 					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 					  net->flight_size,
 					  probepoint);
 #endif
 				net->cc_mod.rtcc.lbw = nbw;
 				net->cc_mod.rtcc.lbw_rtt = net->rtt;
@@ -980,17 +983,17 @@ sctp_cwnd_update_after_sack_common(struc
 					}
 					net->cwnd += incr;
 					sctp_enforce_cwnd_limit(asoc, net);
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 						sctp_log_cwnd(stcb, net, incr,
 						              SCTP_CWND_LOG_FROM_SS);
 					}
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-					SDT_PROBE(sctp, cwnd, net, ack,
+					SDT_PROBE5(sctp, cwnd, net, ack,
 					          stcb->asoc.my_vtag,
 					          ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 					          net,
 					          old_cwnd, net->cwnd);
 #endif
 				} else {
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
 						sctp_log_cwnd(stcb, net, net->net_ack,
@@ -1044,17 +1047,17 @@ sctp_cwnd_update_after_sack_common(struc
 						break;
 					default:
 						incr = net->mtu;
 						break;
 					}
 					net->cwnd += incr;
 					sctp_enforce_cwnd_limit(asoc, net);
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-					SDT_PROBE(sctp, cwnd, net, ack,
+					SDT_PROBE5(sctp, cwnd, net, ack,
 						  stcb->asoc.my_vtag,
 						  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 						  net,
 						  old_cwnd, net->cwnd);
 #endif
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 						sctp_log_cwnd(stcb, net, net->mtu,
 							      SCTP_CWND_LOG_FROM_CA);
@@ -1085,17 +1088,17 @@ sctp_cwnd_update_exit_pf_common(struct s
 {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	int old_cwnd;
 
 	old_cwnd = net->cwnd;
 #endif
 	net->cwnd = net->mtu;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-	SDT_PROBE(sctp, cwnd, net, ack,
+	SDT_PROBE5(sctp, cwnd, net, ack,
 	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
 	          old_cwnd, net->cwnd);
 #endif
 	SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
 	        (void *)net, net->cwnd);
 }
 
 
@@ -1158,17 +1161,17 @@ sctp_cwnd_update_after_timeout(struct sc
 			net->ssthresh = net->mtu;
 		}
 	} else {
 		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
 	}
 	net->cwnd = net->mtu;
 	net->partial_bytes_acked = 0;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-	SDT_PROBE(sctp, cwnd, net, to,
+	SDT_PROBE5(sctp, cwnd, net, to,
 		  stcb->asoc.my_vtag,
 		  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 		  net,
 		  old_cwnd, net->cwnd);
 #endif
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
 	}
@@ -1193,17 +1196,17 @@ sctp_cwnd_update_after_ecn_echo_common(s
 				net->cwnd /= 2;
 			}
 			/* Drop to CA */
 			net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 			}
 		} else {
-			/* Further tuning down required over the drastic orginal cut */
+			/* Further tuning down required over the drastic original cut */
 			net->ssthresh -= (net->mtu * num_pkt_lost);
 			net->cwnd -= (net->mtu * num_pkt_lost);
 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 			}
 
 		}
 		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
@@ -1213,17 +1216,17 @@ sctp_cwnd_update_after_ecn_echo_common(s
 			net->ssthresh = net->cwnd / 2;
 			if (net->ssthresh < net->mtu) {
 				net->ssthresh = net->mtu;
 				/* here back off the timer as well, to slow us down */
 				net->RTO <<= 1;
 			}
 			net->cwnd = net->ssthresh;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-			SDT_PROBE(sctp, cwnd, net, ecn,
+			SDT_PROBE5(sctp, cwnd, net, ecn,
 				  stcb->asoc.my_vtag,
 				  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 				  net,
 				  old_cwnd, net->cwnd);
 #endif
 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 			}
@@ -1334,17 +1337,17 @@ sctp_cwnd_update_after_packet_dropped(st
 	if (net->cwnd < net->mtu) {
 		/* We always have 1 MTU */
 		net->cwnd = net->mtu;
 	}
 	sctp_enforce_cwnd_limit(&stcb->asoc, net);
 	if (net->cwnd - old_cwnd != 0) {
 		/* log only changes */
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-		SDT_PROBE(sctp, cwnd, net, pd,
+		SDT_PROBE5(sctp, cwnd, net, pd,
 			  stcb->asoc.my_vtag,
 			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 			  net,
 			  old_cwnd, net->cwnd);
 #endif
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
 				SCTP_CWND_LOG_FROM_SAT);
@@ -1359,48 +1362,48 @@ sctp_cwnd_update_after_output(struct sct
 	int old_cwnd = net->cwnd;
 
 	if (net->ssthresh < net->cwnd)
 		net->ssthresh = net->cwnd;
 	if (burst_limit) {
 		net->cwnd = (net->flight_size + (burst_limit * net->mtu));
 		sctp_enforce_cwnd_limit(&stcb->asoc, net);
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
-		SDT_PROBE(sctp, cwnd, net, bl,
+		SDT_PROBE5(sctp, cwnd, net, bl,
 			  stcb->asoc.my_vtag,
 			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
 			  net,
 			  old_cwnd, net->cwnd);
 #endif
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
 		}
 	}
 }
 
 static void
 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
 			    struct sctp_association *asoc,
 			    int accum_moved, int reneged_all, int will_exit)
 {
-	/* Passing a zero argument in last disables the rtcc algoritm */
+	/* Passing a zero argument in last disables the rtcc algorithm */
 	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
 }
 
 static void
 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
 	int in_window, int num_pkt_lost)
 {
-	/* Passing a zero argument in last disables the rtcc algoritm */
+	/* Passing a zero argument in last disables the rtcc algorithm */
 	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
 }
 
 /* Here starts the RTCCVAR type CC invented by RRS which
  * is a slight mod to RFC2581. We reuse a common routine or
- * two since these algoritms are so close and need to
+ * two since these algorithms are so close and need to
  * remain the same.
  */
 static void
 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
 				     int in_window, int num_pkt_lost)
 {
 	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
 }
@@ -1437,17 +1440,17 @@ sctp_cwnd_new_rtcc_transmission_begins(s
 	if (net->cc_mod.rtcc.lbw) {
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 		/* Clear the old bw.. we went to 0 in-flight */
 		vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
 			(stcb->rport);
 		probepoint = (((uint64_t)net->cwnd) << 32);
 		/* Probe point 8 */
 		probepoint |=  ((8 << 16) | 0);
-		SDT_PROBE(sctp, cwnd, net, rttvar,
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
 			  vtag,
 			  ((net->cc_mod.rtcc.lbw << 32) | 0),
 			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
 			  net->flight_size,
 			  probepoint);
 #endif
 		net->cc_mod.rtcc.lbw_rtt = 0;
 		net->cc_mod.rtcc.cwnd_at_bw_set = 0;
@@ -1498,17 +1501,17 @@ sctp_set_rtcc_initial_cc_param(struct sc
 	sctp_set_initial_cc_param(stcb, net);
 	stcb->asoc.use_precise_time = 1;
 #if defined(__FreeBSD__) && __FreeBSD_version >= 803000
 	probepoint = (((uint64_t)net->cwnd) << 32);
 	probepoint |=  ((9 << 16) | 0);
 	vtag = (net->rtt << 32) |
 		(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
 		(stcb->rport);
-	SDT_PROBE(sctp, cwnd, net, rttvar,
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
 		  vtag,
 		  0,
 		  0,
 		  0,
 		  probepoint);
 #endif
 	net->cc_mod.rtcc.lbw_rtt = 0;
 	net->cc_mod.rtcc.cwnd_at_bw_set = 0;
@@ -1595,39 +1598,39 @@ sctp_cwnd_update_rtcc_packet_transmitted
 	}
 }
 
 static void
 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
 				 struct sctp_association *asoc,
 				 int accum_moved, int reneged_all, int will_exit)
 {
-	/* Passing a one argument at the last enables the rtcc algoritm */
+	/* Passing a one argument at the last enables the rtcc algorithm */
 	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
 }
 
 static void
 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
                          struct sctp_nets *net,
                          struct timeval *now SCTP_UNUSED)
 {
 	net->cc_mod.rtcc.rtt_set_this_sack = 1;
 }
 
 /* Here starts Sally Floyds HS-TCP */
 
 struct sctp_hs_raise_drop {
 	int32_t cwnd;
-	int32_t increase;
-	int32_t drop_percent;
+	int8_t increase;
+	int8_t drop_percent;
 };
 
 #define SCTP_HS_TABLE_SIZE 73
 
-struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
+static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
 	{38, 1, 50},		/* 0   */
 	{118, 2, 44},		/* 1   */
 	{221, 3, 41},		/* 2   */
 	{347, 4, 38},		/* 3   */
 	{495, 5, 37},		/* 4   */
 	{663, 6, 35},		/* 5   */
 	{851, 7, 34},		/* 6   */
 	{1058, 8, 33},		/* 7   */
@@ -1717,17 +1720,17 @@ sctp_hs_cwnd_increase(struct sctp_tcb *s
 	} else {
 		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
 			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
 				indx = i;
 				break;
 			}
 		}
 		net->last_hs_used = indx;
-		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
+		incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
 		net->cwnd += incr;
 	}
 	sctp_enforce_cwnd_limit(&stcb->asoc, net);
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
 	}
 }
 
@@ -1743,17 +1746,17 @@ sctp_hs_cwnd_decrease(struct sctp_tcb *s
 		net->ssthresh = net->cwnd / 2;
 		if (net->ssthresh < (net->mtu * 2)) {
 			net->ssthresh = 2 * net->mtu;
 		}
 		net->cwnd = net->ssthresh;
 	} else {
 		/* drop by the proper amount */
 		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
-		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
+		    (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
 		net->cwnd = net->ssthresh;
 		/* now where are we */
 		indx = net->last_hs_used;
 		cur_val = net->cwnd >> 10;
 		/* reset where we are in the table */
 		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
 			/* feel out of hs */
 			net->last_hs_used = 0;
@@ -1800,34 +1803,35 @@ sctp_hs_cwnd_update_after_fr(struct sctp
 
 				net->partial_bytes_acked = 0;
 				/* Turn on fast recovery window */
 				asoc->fast_retran_loss_recovery = 1;
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				/*
 				 * CMT fast recovery -- per destination
 				 * recovery variable.
 				 */
 				net->fast_retran_loss_recovery = 1;
 
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					net->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
-						stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA+SCTP_LOC_32);
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
 						 stcb->sctp_ep, stcb, net);
 			}
 		} else if (net->net_ack > 0) {
 			/*
 			 * Mark a peg that we WOULD have done a cwnd
 			 * reduction but RFC2582 prevented this action.
 			 */
@@ -2326,34 +2330,35 @@ sctp_htcp_cwnd_update_after_fr(struct sc
 
 				net->partial_bytes_acked = 0;
 				/* Turn on fast recovery window */
 				asoc->fast_retran_loss_recovery = 1;
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				/*
 				 * CMT fast recovery -- per destination
 				 * recovery variable.
 				 */
 				net->fast_retran_loss_recovery = 1;
 
 				if (lchk == NULL) {
 					/* Mark end of the window */
 					net->fast_recovery_tsn = asoc->sending_seq - 1;
 				} else {
-					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
 				}
 
 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
-						stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA+SCTP_LOC_32);
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
 						 stcb->sctp_ep, stcb, net);
 			}
 		} else if (net->net_ack > 0) {
 			/*
 			 * Mark a peg that we WOULD have done a cwnd
 			 * reduction but RFC2582 prevented this action.
 			 */
@@ -2398,17 +2403,17 @@ sctp_htcp_cwnd_update_after_ecn_echo(str
 		net->cwnd = net->ssthresh;
 		sctp_enforce_cwnd_limit(&stcb->asoc, net);
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
 			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
 		}
 	}
 }
 
-struct sctp_cc_functions sctp_cc_functions[] = {
+const struct sctp_cc_functions sctp_cc_functions[] = {
 {
 #if defined(__Windows__) || defined(__Userspace_os_Windows)
 	sctp_set_initial_cc_param,
 	sctp_cwnd_update_after_sack,
 	sctp_cwnd_update_exit_pf_common,
 	sctp_cwnd_update_after_fr,
 	sctp_cwnd_update_after_timeout,
 	sctp_cwnd_update_after_ecn_echo,
--- a/netwerk/sctp/src/netinet/sctp_constants.h
+++ b/netwerk/sctp/src/netinet/sctp_constants.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_constants.h 271204 2014-09-06 19:12:14Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_constants.h 324615 2017-10-14 10:02:59Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_CONSTANTS_H_
 #define _NETINET_SCTP_CONSTANTS_H_
 
 #if defined(__Userspace_os_Windows)
 extern void getwintimeofday(struct timeval *tv);
 #endif
@@ -66,16 +68,20 @@ extern void getwintimeofday(struct timev
  */
 #define SCTP_ADDRESS_LIMIT 1080
 
 /* We need at least 2k of space for us, inits
  * larger than that lets abort.
  */
 #define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
 
+/* Largest length of a chunk */
+#define SCTP_MAX_CHUNK_LENGTH 0xffff
+/* Largest length of an error cause */
+#define SCTP_MAX_CAUSE_LENGTH 0xffff
 /* Number of addresses where we just skip the counting */
 #define SCTP_COUNT_LIMIT 40
 
 #define SCTP_ZERO_COPY_TICK_DELAY (((100 * hz) + 999) / 1000)
 #define SCTP_ZERO_COPY_SENDQ_TICK_DELAY (((100 * hz) + 999) / 1000)
 
 /* Number of ticks to delay before running
  * iterator on an address change.
@@ -93,20 +99,16 @@ extern void getwintimeofday(struct timev
 #define SCTP_MCORE_NAME "sctp_core_worker"
 
 
 /* If you support Multi-VRF how big to
  * make the initial array of VRF's to.
  */
 #define SCTP_DEFAULT_VRF_SIZE 4
 
-/* constants for rto calc */
-#define sctp_align_safe_nocopy 0
-#define sctp_align_unsafe_makecopy 1
-
 /* JRS - Values defined for the HTCP algorithm */
 #define ALPHA_BASE	(1<<7)  /* 1.0 with shift << 7 */
 #define BETA_MIN	(1<<6)  /* 0.5 with shift << 7 */
 #define BETA_MAX	102	/* 0.8 with shift << 7 */
 
 /* Places that CWND log can happen from */
 #define SCTP_CWND_LOG_FROM_FR	1
 #define SCTP_CWND_LOG_FROM_RTX	2
@@ -271,17 +273,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_MAX_NUM_OF_ASOC	40000
 /* how many addresses per assoc remote and local */
 #define SCTP_SCALE_FOR_ADDR	2
 
 /* default MULTIPLE_ASCONF mode enable(1)/disable(0) value (sysctl) */
 #define SCTP_DEFAULT_MULTIPLE_ASCONFS	0
 
 /*
- * Theshold for rwnd updates, we have to read (sb_hiwat >>
+ * Threshold for rwnd updates, we have to read (sb_hiwat >>
  * SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
  * window update sack. When we look, we compare the last rwnd we sent vs the
  * current rwnd. It too must be greater than this value. Using 3 divdes the
  * hiwat by 8, so for 200k rwnd we need to read 24k. For a 64k rwnd we need
  * to read 8k. This seems about right.. I hope :-D.. we do set a
  * min of a MTU on it so if the rwnd is real small we will insist
  * on a full MTU of 1500 bytes.
  */
@@ -341,16 +343,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_DEF_FRMAX_BURST 4
 
 /* RTO calculation flag to say if it
  * is safe to determine local lan or not.
  */
 #define SCTP_RTT_FROM_NON_DATA 0
 #define SCTP_RTT_FROM_DATA     1
 
+#define PR_SCTP_UNORDERED_FLAG 0x0001
 
 /* IP hdr (20/40) + 12+2+2 (enet) + sctp common 12 */
 #define SCTP_FIRST_MBUF_RESV 68
 /* Packet transmit states in the sent field */
 #define SCTP_DATAGRAM_UNSENT		0
 #define SCTP_DATAGRAM_SENT		1
 #define SCTP_DATAGRAM_RESEND1		2	/* not used (in code, but may
 						 * hit this value) */
@@ -382,18 +385,18 @@ extern void getwintimeofday(struct timev
 #define SCTP_OUTPUT_FROM_CLOSING        16
 #define SCTP_OUTPUT_FROM_SOCKOPT        17
 
 /* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
 
 /* align to 32-bit sizes */
 #define SCTP_SIZE32(x)	((((x) + 3) >> 2) << 2)
 
-#define IS_SCTP_CONTROL(a) ((a)->chunk_type != SCTP_DATA)
-#define IS_SCTP_DATA(a) ((a)->chunk_type == SCTP_DATA)
+#define IS_SCTP_CONTROL(a) (((a)->chunk_type != SCTP_DATA) && ((a)->chunk_type != SCTP_IDATA))
+#define IS_SCTP_DATA(a) (((a)->chunk_type == SCTP_DATA) || ((a)->chunk_type == SCTP_IDATA))
 
 
 /* SCTP parameter types */
 /*************0x0000 series*************/
 #define SCTP_HEARTBEAT_INFO		0x0001
 #if defined(__Userspace__)
 #define SCTP_CONN_ADDRESS               0x0004
 #endif
@@ -461,17 +464,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_FLEXIBLE_ADDRESS	0x20
 #define SCTP_NO_HEARTBEAT	0x40
 
 /* mask to get sticky */
 #define SCTP_STICKY_OPTIONS_MASK	0x0c
 
 
 /*
- * SCTP states for internal state machine XXX (should match "user" values)
+ * SCTP states for internal state machine
  */
 #define SCTP_STATE_EMPTY		0x0000
 #define SCTP_STATE_INUSE		0x0001
 #define SCTP_STATE_COOKIE_WAIT		0x0002
 #define SCTP_STATE_COOKIE_ECHOED	0x0004
 #define SCTP_STATE_OPEN			0x0008
 #define SCTP_STATE_SHUTDOWN_SENT	0x0010
 #define SCTP_STATE_SHUTDOWN_RECEIVED	0x0020
@@ -512,17 +515,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_DEFAULT_MBUFS_IN_CHAIN 5
 
 /* How long a cookie lives in milli-seconds */
 #define SCTP_DEFAULT_COOKIE_LIFE	60000
 
 /* Maximum the mapping array will  grow to (TSN mapping array) */
 #define SCTP_MAPPING_ARRAY	512
 
-/* size of the inital malloc on the mapping array */
+/* size of the initial malloc on the mapping array */
 #define SCTP_INITIAL_MAPPING_ARRAY  16
 /* how much we grow the mapping array each call */
 #define SCTP_MAPPING_ARRAY_INCR     32
 
 /*
  * Here we define the timer types used by the implementation as arguments in
  * the set/get timer type calls.
  */
@@ -553,21 +556,19 @@ extern void getwintimeofday(struct timev
 #define SCTP_TIMER_TYPE_ASCONF		10
 #define SCTP_TIMER_TYPE_SHUTDOWNGUARD	11
 #define SCTP_TIMER_TYPE_AUTOCLOSE	12
 #define SCTP_TIMER_TYPE_EVENTWAKE	13
 #define SCTP_TIMER_TYPE_STRRESET        14
 #define SCTP_TIMER_TYPE_INPKILL         15
 #define SCTP_TIMER_TYPE_ASOCKILL        16
 #define SCTP_TIMER_TYPE_ADDR_WQ         17
-#define SCTP_TIMER_TYPE_ZERO_COPY       18
-#define SCTP_TIMER_TYPE_ZCOPY_SENDQ     19
-#define SCTP_TIMER_TYPE_PRIM_DELETED    20
+#define SCTP_TIMER_TYPE_PRIM_DELETED    18
 /* add new timers here - and increment LAST */
-#define SCTP_TIMER_TYPE_LAST            21
+#define SCTP_TIMER_TYPE_LAST            19
 
 #define SCTP_IS_TIMER_TYPE_VALID(t)	(((t) > SCTP_TIMER_TYPE_NONE) && \
 					 ((t) < SCTP_TIMER_TYPE_LAST))
 
 
 #if defined(__APPLE__)
 /* Number of ticks to run the main timer at in msec */
 #define SCTP_MAIN_TIMER_DEFAULT		10
@@ -623,20 +624,16 @@ extern void getwintimeofday(struct timev
 #define SCTP_SEND_SEC	1
 
 /* recv timer def = 200ms  */
 #define SCTP_RECV_MSEC	200
 
 /* 30 seconds + RTO (in ms) */
 #define SCTP_HB_DEFAULT_MSEC	30000
 
-/* Max time I will wait for Shutdown to complete */
-#define SCTP_DEF_MAX_SHUTDOWN_SEC 180
-
-
 /*
  * This is how long a secret lives, NOT how long a cookie lives how many
  * ticks the current secret will live.
  */
 #define SCTP_DEFAULT_SECRET_LIFE_SEC 3600
 
 #define SCTP_RTO_UPPER_BOUND	(60000)	/* 60 sec in ms */
 #define SCTP_RTO_LOWER_BOUND	(1000)	/* 1 sec is ms */
@@ -649,17 +646,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_DEF_MAX_INIT		8
 #define SCTP_DEF_MAX_SEND		10
 #define SCTP_DEF_MAX_PATH_RTX		5
 #define SCTP_DEF_PATH_PF_THRESHOLD	SCTP_DEF_MAX_PATH_RTX
 
 #define SCTP_DEF_PMTU_RAISE_SEC	600	/* 10 min between raise attempts */
 
 
-/* How many streams I request initally by default */
+/* How many streams I request initially by default */
 #define SCTP_OSTREAM_INITIAL 10
 #define SCTP_ISTREAM_INITIAL 2048
 
 /*
  * How many smallest_mtu's need to increase before a window update sack is
  * sent (should be a power of 2).
  */
 /* Send window update (incr * this > hiwat). Should be a power of 2 */
@@ -772,36 +769,37 @@ extern void getwintimeofday(struct timev
  * copied down where we will start to split the message.
  * So, with our default, we split only if the piece we
  * want to take will fill up a full MTU (assuming
  * a 1500 byte MTU).
  */
 #define SCTP_DEFAULT_SPLIT_POINT_MIN 2904
 
 /* Maximum length of diagnostic information in error causes */
-#define SCTP_DIAG_INFO_LEN 64
+#define SCTP_DIAG_INFO_LEN 128
 
 /* ABORT CODES and other tell-tale location
  * codes are generated by adding the below
  * to the instance id.
  */
 
 /* File defines */
-#define SCTP_FROM_SCTP_INPUT   0x10000000
-#define SCTP_FROM_SCTP_PCB     0x20000000
-#define SCTP_FROM_SCTP_INDATA  0x30000000
-#define SCTP_FROM_SCTP_TIMER   0x40000000
-#define SCTP_FROM_SCTP_USRREQ  0x50000000
-#define SCTP_FROM_SCTPUTIL     0x60000000
-#define SCTP_FROM_SCTP6_USRREQ 0x70000000
-#define SCTP_FROM_SCTP_ASCONF  0x80000000
-#define SCTP_FROM_SCTP_OUTPUT  0x90000000
-#define SCTP_FROM_SCTP_PEELOFF 0xa0000000
-#define SCTP_FROM_SCTP_PANDA   0xb0000000
-#define SCTP_FROM_SCTP_SYSCTL  0xc0000000
+#define SCTP_FROM_SCTP_INPUT        0x10000000
+#define SCTP_FROM_SCTP_PCB          0x20000000
+#define SCTP_FROM_SCTP_INDATA       0x30000000
+#define SCTP_FROM_SCTP_TIMER        0x40000000
+#define SCTP_FROM_SCTP_USRREQ       0x50000000
+#define SCTP_FROM_SCTPUTIL          0x60000000
+#define SCTP_FROM_SCTP6_USRREQ      0x70000000
+#define SCTP_FROM_SCTP_ASCONF       0x80000000
+#define SCTP_FROM_SCTP_OUTPUT       0x90000000
+#define SCTP_FROM_SCTP_PEELOFF      0xa0000000
+#define SCTP_FROM_SCTP_PANDA        0xb0000000
+#define SCTP_FROM_SCTP_SYSCTL       0xc0000000
+#define SCTP_FROM_SCTP_CC_FUNCTIONS 0xd0000000
 
 /* Location ID's */
 #define SCTP_LOC_1  0x00000001
 #define SCTP_LOC_2  0x00000002
 #define SCTP_LOC_3  0x00000003
 #define SCTP_LOC_4  0x00000004
 #define SCTP_LOC_5  0x00000005
 #define SCTP_LOC_6  0x00000006
@@ -827,16 +825,18 @@ extern void getwintimeofday(struct timev
 #define SCTP_LOC_26 0x0000001a
 #define SCTP_LOC_27 0x0000001b
 #define SCTP_LOC_28 0x0000001c
 #define SCTP_LOC_29 0x0000001d
 #define SCTP_LOC_30 0x0000001e
 #define SCTP_LOC_31 0x0000001f
 #define SCTP_LOC_32 0x00000020
 #define SCTP_LOC_33 0x00000021
+#define SCTP_LOC_34 0x00000022
+#define SCTP_LOC_35 0x00000023
 
 
 /* Free assoc codes */
 #define SCTP_NORMAL_PROC      0
 #define SCTP_PCBFREE_NOFORCE  1
 #define SCTP_PCBFREE_FORCE    2
 
 /* From codes for adding addresses */
@@ -903,22 +903,30 @@ extern void getwintimeofday(struct timev
 #ifndef IPPROTO_SCTP
 #define IPPROTO_SCTP 132	/* the Official IANA number :-) */
 #endif				/* !IPPROTO_SCTP */
 
 #define SCTP_MAX_DATA_BUNDLING		256
 
 /* modular comparison */
 /* See RFC 1982 for details. */
-#define SCTP_SSN_GT(a, b) (((a < b) && ((uint16_t)(b - a) > (1U<<15))) || \
-                           ((a > b) && ((uint16_t)(a - b) < (1U<<15))))
-#define SCTP_SSN_GE(a, b) (SCTP_SSN_GT(a, b) || (a == b))
-#define SCTP_TSN_GT(a, b) (((a < b) && ((uint32_t)(b - a) > (1U<<31))) || \
-                           ((a > b) && ((uint32_t)(a - b) < (1U<<31))))
-#define SCTP_TSN_GE(a, b) (SCTP_TSN_GT(a, b) || (a == b))
+#define SCTP_UINT16_GT(a, b) (((a < b) && ((uint16_t)(b - a) > (1U<<15))) || \
+                              ((a > b) && ((uint16_t)(a - b) < (1U<<15))))
+#define SCTP_UINT16_GE(a, b) (SCTP_UINT16_GT(a, b) || (a == b))
+#define SCTP_UINT32_GT(a, b) (((a < b) && ((uint32_t)(b - a) > (1U<<31))) || \
+                              ((a > b) && ((uint32_t)(a - b) < (1U<<31))))
+#define SCTP_UINT32_GE(a, b) (SCTP_UINT32_GT(a, b) || (a == b))
+
+#define SCTP_SSN_GT(a, b) SCTP_UINT16_GT(a, b)
+#define SCTP_SSN_GE(a, b) SCTP_UINT16_GE(a, b)
+#define SCTP_TSN_GT(a, b) SCTP_UINT32_GT(a, b)
+#define SCTP_TSN_GE(a, b) SCTP_UINT32_GE(a, b)
+#define SCTP_MID_GT(i, a, b) (((i) == 1) ? SCTP_UINT32_GT(a, b) : SCTP_UINT16_GT((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_GE(i, a, b) (((i) == 1) ? SCTP_UINT32_GE(a, b) : SCTP_UINT16_GE((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_EQ(i, a, b) (((i) == 1) ? a == b : (uint16_t)a == (uint16_t)b)
 
 /* Mapping array manipulation routines */
 #define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
 #define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
 #define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
 #define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
 	                if (tsn >= mapping_tsn) { \
 						gap = tsn - mapping_tsn; \
@@ -931,17 +939,17 @@ extern void getwintimeofday(struct timev
 #define SCTP_RETRAN_DONE -1
 #define SCTP_RETRAN_EXIT -2
 
 /*
  * This value defines the number of vtag block time wait entry's per list
  * element.  Each entry will take 2 4 byte ints (and of course the overhead
  * of the next pointer as well). Using 15 as an example will yield * ((8 *
  * 15) + 8) or 128 bytes of overhead for each timewait block that gets
- * initialized. Increasing it to 31 would yeild 256 bytes per block.
+ * initialized. Increasing it to 31 would yield 256 bytes per block.
  */
 #define SCTP_NUMBER_IN_VTAG_BLOCK 15
 /*
  * If we use the STACK option, we have an array of this size head pointers.
  * This array is mod'd the with the size to find which bucket and then all
  * entries must be searched to see if the tag is in timed wait. If so we
  * reject it.
  */
@@ -979,38 +987,32 @@ extern void getwintimeofday(struct timev
 /*-
  * defines for socket lock states.
  * Used by __APPLE__ and SCTP_SO_LOCK_TESTING
  */
 #define SCTP_SO_LOCKED		1
 #define SCTP_SO_NOT_LOCKED	0
 
 
-#define SCTP_HOLDS_LOCK 1
-#define SCTP_NOT_LOCKED 0
-
 /*-
  * For address locks, do we hold the lock?
  */
 #define SCTP_ADDR_LOCKED 1
 #define SCTP_ADDR_NOT_LOCKED 0
 
 #define IN4_ISPRIVATE_ADDRESS(a) \
    ((((uint8_t *)&(a)->s_addr)[0] == 10) || \
     ((((uint8_t *)&(a)->s_addr)[0] == 172) && \
      (((uint8_t *)&(a)->s_addr)[1] >= 16) && \
      (((uint8_t *)&(a)->s_addr)[1] <= 32)) || \
     ((((uint8_t *)&(a)->s_addr)[0] == 192) && \
      (((uint8_t *)&(a)->s_addr)[1] == 168)))
 
 #define IN4_ISLOOPBACK_ADDRESS(a) \
-    ((((uint8_t *)&(a)->s_addr)[0] == 127) && \
-     (((uint8_t *)&(a)->s_addr)[1] == 0) && \
-     (((uint8_t *)&(a)->s_addr)[2] == 0) && \
-     (((uint8_t *)&(a)->s_addr)[3] == 1))
+    (((uint8_t *)&(a)->s_addr)[0] == 127)
 
 #define IN4_ISLINKLOCAL_ADDRESS(a) \
     ((((uint8_t *)&(a)->s_addr)[0] == 169) && \
      (((uint8_t *)&(a)->s_addr)[1] == 254))
 
 #if defined(__Userspace__)
 #if defined(__Userspace_os_Windows)
 #define SCTP_GETTIME_TIMEVAL(x)	getwintimeofday(x)
--- a/netwerk/sctp/src/netinet/sctp_crc32.c
+++ b/netwerk/sctp/src/netinet/sctp_crc32.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 235828 2012-05-23 11:26:28Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 310590 2016-12-26 11:06:41Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
 #include <netinet/sctp.h>
 #include <netinet/sctp_crc32.h>
 #include <netinet/sctp_pcb.h>
 
 
@@ -89,17 +91,17 @@
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o32[256] =
+static const uint32_t sctp_crc_tableil8_o32[256] =
 {
 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
@@ -145,17 +147,17 @@ static uint32_t sctp_crc_tableil8_o32[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o40[256] =
+static const uint32_t sctp_crc_tableil8_o40[256] =
 {
 	0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
 	0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
 	0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
 	0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
 	0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
 	0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
 	0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
@@ -201,17 +203,17 @@ static uint32_t sctp_crc_tableil8_o40[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o48[256] =
+static const uint32_t sctp_crc_tableil8_o48[256] =
 {
 	0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
 	0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
 	0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
 	0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
 	0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
 	0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
 	0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
@@ -257,17 +259,17 @@ static uint32_t sctp_crc_tableil8_o48[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o56[256] =
+static const uint32_t sctp_crc_tableil8_o56[256] =
 {
 	0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
 	0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
 	0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
 	0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
 	0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
 	0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
 	0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
@@ -313,17 +315,17 @@ static uint32_t sctp_crc_tableil8_o56[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o64[256] =
+static const uint32_t sctp_crc_tableil8_o64[256] =
 {
 	0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
 	0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
 	0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
 	0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
 	0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
 	0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
 	0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
@@ -369,17 +371,17 @@ static uint32_t sctp_crc_tableil8_o64[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-uint32_t sctp_crc_tableil8_o72[256] =
+static const uint32_t sctp_crc_tableil8_o72[256] =
 {
 	0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
 	0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
 	0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
 	0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
 	0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
 	0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
 	0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
@@ -425,17 +427,17 @@ uint32_t sctp_crc_tableil8_o72[256] =
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o80[256] =
+static const uint32_t sctp_crc_tableil8_o80[256] =
 {
 	0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
 	0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
 	0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
 	0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
 	0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
 	0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
 	0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
@@ -481,17 +483,17 @@ static uint32_t sctp_crc_tableil8_o80[25
  * Reflected Bits = ....................... TRUE
  * Table Generation Offset = .............. 32 bits
  * Number of Slices = ..................... 8 slices
  * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
  * Directory Name = ....................... .\
  * File Name = ............................ 8x256_tables.c
  */
 
-static uint32_t sctp_crc_tableil8_o88[256] =
+static const uint32_t sctp_crc_tableil8_o88[256] =
 {
 	0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
 	0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
 	0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
 	0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
 	0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
 	0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
 	0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
@@ -608,17 +610,17 @@ multitable_crc32c(uint32_t crc32c,
 
 	if (length == 0) {
 		return (crc32c);
 	}
 	to_even_word = (4 - (((uintptr_t) buffer) & 0x3));
 	return (sctp_crc32c_sb8_64_bit(crc32c, buffer, length, to_even_word));
 }
 
-static uint32_t sctp_crc_c[256] = {
+static const uint32_t sctp_crc_c[256] = {
 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
@@ -691,31 +693,38 @@ singletable_crc32c(uint32_t crc32c,
 	unsigned int i;
 
 	for (i = 0; i < length; i++) {
 		SCTP_CRC32C(crc32c, buffer[i]);
 	}
 	return (crc32c);
 }
 
-
+#if defined(__Userspace__)
+uint32_t
+#else
 static uint32_t
+#endif
 calculate_crc32c(uint32_t crc32c,
                  const unsigned char *buffer,
                  unsigned int length)
 {
 	if (length < 4) {
 		return (singletable_crc32c(crc32c, buffer, length));
 	} else {
 		return (multitable_crc32c(crc32c, buffer, length));
 	}
 }
 #endif /* FreeBSD < 80000 || other OS */
 
+#if defined(__Userspace__)
+uint32_t
+#else
 static uint32_t
+#endif
 sctp_finalize_crc32c(uint32_t crc32c)
 {
 	uint32_t result;
 
 #if BYTE_ORDER == BIG_ENDIAN
 	uint8_t byte0, byte1, byte2, byte3;
 
 #endif
--- a/netwerk/sctp/src/netinet/sctp_crc32.h
+++ b/netwerk/sctp/src/netinet/sctp_crc32.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,28 +29,30 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 235828 2012-05-23 11:26:28Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 309607 2016-12-06 10:21:25Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_CRC32_H_
 #define _NETINET_SCTP_CRC32_H_
 
 #if defined(_KERNEL)
 #if !defined(SCTP_WITH_NO_CSUM)
 uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
 #endif
 #if defined(__FreeBSD__)
 void sctp_delayed_cksum(struct mbuf *, uint32_t offset);
 #endif
 #endif				/* _KERNEL */
 #if defined(__Userspace__)
 #if !defined(SCTP_WITH_NO_CSUM)
+uint32_t calculate_crc32c(uint32_t, const unsigned char *, unsigned int);
+uint32_t sctp_finalize_crc32c(uint32_t);
 uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
 #endif
 #endif
 #endif				/* __crc32c_h__ */
--- a/netwerk/sctp/src/netinet/sctp_header.h
+++ b/netwerk/sctp/src/netinet/sctp_header.h
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,17 +29,17 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 273168 2014-10-16 15:36:04Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 309682 2016-12-07 19:30:59Z tuexen $");
 #endif
 
 #ifndef _NETINET_SCTP_HEADER_H_
 #define _NETINET_SCTP_HEADER_H_
 
 #if defined(__Windows__) && !defined(__Userspace_os_Windows)
 #include <packon.h>
 #endif
@@ -148,27 +150,44 @@ struct sctp_supported_chunk_types_param 
 } SCTP_PACKED;
 
 
 /*
  * Structures for DATA chunks
  */
 struct sctp_data {
 	uint32_t tsn;
-	uint16_t stream_id;
-	uint16_t stream_sequence;
-	uint32_t protocol_id;
+	uint16_t sid;
+	uint16_t ssn;
+	uint32_t ppid;
 	/* user data follows */
 } SCTP_PACKED;
 
 struct sctp_data_chunk {
 	struct sctp_chunkhdr ch;
 	struct sctp_data dp;
 } SCTP_PACKED;
 
+struct sctp_idata {
+	uint32_t tsn;
+	uint16_t sid;
+	uint16_t reserved;	/* Where does the SSN go? */
+	uint32_t mid;
+	union {
+		uint32_t ppid;
+		uint32_t fsn;	/* Fragment Sequence Number */
+	} ppid_fsn;
+	/* user data follows */
+} SCTP_PACKED;
+
+struct sctp_idata_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_idata dp;
+} SCTP_PACKED;
+
 /*
  * Structures for the control chunks
  */
 
 /* Initiate (INIT)/Initiate Ack (INIT ACK) */
 struct sctp_init {
 	uint32_t initiate_tag;	/* initiate tag */
 	uint32_t a_rwnd;	/* a_rwnd */
@@ -215,44 +234,16 @@ struct sctp_state_cookie {	/* this is ou
 	uint8_t loopback_scope;	/* loopback scope information */
 	uint8_t reserved[SCTP_RESERVE_SPACE];    /* Align to 64 bits */
 	/*
 	 * at the end is tacked on the INIT chunk and the INIT-ACK chunk
 	 * (minus the cookie).
 	 */
 } SCTP_PACKED;
 
-
-/* Used for NAT state error cause */
-struct sctp_missing_nat_state {
-	uint16_t cause;
-	uint16_t length;
-        uint8_t data[];
-} SCTP_PACKED;
-
-
-struct sctp_inv_mandatory_param {
-	uint16_t cause;
-	uint16_t length;
-	uint32_t num_param;
-	uint16_t param;
-	/*
-	 * We include this to 0 it since only a missing cookie will cause
-	 * this error.
-	 */
-	uint16_t resv;
-} SCTP_PACKED;
-
-struct sctp_unresolv_addr {
-	uint16_t cause;
-	uint16_t length;
-	uint16_t addr_type;
-	uint16_t reserved;	/* Only one invalid addr type */
-} SCTP_PACKED;
-
 /* state cookie parameter */
 struct sctp_state_cookie_param {
 	struct sctp_paramhdr ph;
 	struct sctp_state_cookie cookie;
 } SCTP_PACKED;
 
 struct sctp_init_chunk {
 	struct sctp_chunkhdr ch;
@@ -383,38 +374,21 @@ struct sctp_cwr_chunk {
 	uint32_t tsn;
 } SCTP_PACKED;
 
 /* Shutdown Complete (SHUTDOWN COMPLETE) */
 struct sctp_shutdown_complete_chunk {
 	struct sctp_chunkhdr ch;
 } SCTP_PACKED;
 
-/* Oper error holding a stale cookie */
-struct sctp_stale_cookie_msg {
-	struct sctp_paramhdr ph;/* really an error cause */
-	uint32_t time_usec;
-} SCTP_PACKED;
-
 struct sctp_adaptation_layer_indication {
 	struct sctp_paramhdr ph;
 	uint32_t indication;
 } SCTP_PACKED;
 
-struct sctp_cookie_while_shutting_down {
-	struct sctphdr sh;
-	struct sctp_chunkhdr ch;
-	struct sctp_paramhdr ph;/* really an error cause */
-} SCTP_PACKED;
-
-struct sctp_shutdown_complete_msg {
-	struct sctphdr sh;
-	struct sctp_shutdown_complete_chunk shut_cmp;
-} SCTP_PACKED;
-
 /*
  * draft-ietf-tsvwg-addip-sctp
  */
 /* Address/Stream Configuration Change (ASCONF) */
 struct sctp_asconf_chunk {
 	struct sctp_chunkhdr ch;
 	uint32_t serial_number;
 	/* lookup address parameter (mandatory) */
@@ -432,20 +406,26 @@ struct sctp_asconf_ack_chunk {
 /* Forward Cumulative TSN (FORWARD TSN) */
 struct sctp_forward_tsn_chunk {
 	struct sctp_chunkhdr ch;
 	uint32_t new_cumulative_tsn;
 	/* stream/sequence pairs (sctp_strseq) follow */
 } SCTP_PACKED;
 
 struct sctp_strseq {
-	uint16_t stream;
-	uint16_t sequence;
+	uint16_t sid;
+	uint16_t ssn;
 } SCTP_PACKED;
 
+struct sctp_strseq_mid {
+	uint16_t sid;
+	uint16_t flags;
+	uint32_t mid;
+};
+
 struct sctp_forward_tsn_msg {
 	struct sctphdr sh;
 	struct sctp_forward_tsn_chunk msg;
 } SCTP_PACKED;
 
 /* should be a multiple of 4 - 1 aka 3/7/11 etc. */
 
 #define SCTP_NUM_DB_TO_VERIFY 31
@@ -567,22 +547,16 @@ struct sctp_auth_hmac_algo {
 
 struct sctp_auth_chunk {
 	struct sctp_chunkhdr ch;
 	uint16_t shared_key_id;
 	uint16_t hmac_id;
 	uint8_t hmac[];
 } SCTP_PACKED;
 
-struct sctp_auth_invalid_hmac {
-	struct sctp_paramhdr ph;
-	uint16_t hmac_id;
-	uint16_t padding;
-} SCTP_PACKED;
-
 /*
  * we pre-reserve enough room for a ECNE or CWR AND a SACK with no missing
  * pieces. If ENCE is missing we could have a couple of blocks. This way we
  * optimize so we MOST likely can bundle a SACK/ECN with the smallest size
  * data chunk I will split into. We could increase throughput slightly by
  * taking out these two but the  24-sack/8-CWR i.e. 32 bytes I pre-reserve I
  * feel is worth it for now.
  */
@@ -626,12 +600,12 @@ struct sctp_auth_invalid_hmac {
 
 #define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
 			      sizeof(struct sctphdr))
 
 #if defined(__Windows__)
 #include <packoff.h>
 #endif
 #if defined(__Userspace_os_Windows)
-#pragma pack ()
+#pragma pack(pop)
 #endif
 #undef SCTP_PACKED
 #endif				/* !__sctp_header_h__ */
--- a/netwerk/sctp/src/netinet/sctp_indata.c
+++ b/netwerk/sctp/src/netinet/sctp_indata.c
@@ -1,9 +1,11 @@
 /*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
  *
  * a) Redistributions of source code must retain the above copyright notice,
@@ -27,41 +29,56 @@
  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  * THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifdef __FreeBSD__
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 280440 2015-03-24 15:05:36Z tuexen $");
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 325434 2017-11-05 11:59:33Z tuexen $");
 #endif
 
 #include <netinet/sctp_os.h>
+#ifdef __FreeBSD__
+#include <sys/proc.h>
+#endif
 #include <netinet/sctp_var.h>
 #include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_header.h>
 #include <netinet/sctp_pcb.h>
-#include <netinet/sctp_header.h>
 #include <netinet/sctputil.h>
 #include <netinet/sctp_output.h>
-#include <netinet/sctp_input.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
 #include <netinet/sctp_indata.h>
-#include <netinet/sctp_uio.h>
-#include <netinet/sctp_timer.h>
-
-
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_crc32.h>
+#ifdef __FreeBSD__
+#include <netinet/sctp_lock_bsd.h>
+#endif
 /*
  * NOTES: On the outbound side of things I need to check the sack timer to
  * see if I should generate a sack into the chunk queue (if I have data to
  * send that is and will be sending it .. for bundling.
  *
  * The callback in sctp_usrreq.c will get called when the socket is read from.
  * This will cause sctp_service_queues() to get called on the top entry in
  * the list.
  */
+static uint32_t
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+			struct sctp_stream_in *strm,
+			struct sctp_tcb *stcb,
+			struct sctp_association *asoc,
+			struct sctp_tmit_chunk *chk, int lock_held);
+
 
 void
 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
 {
 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
 }
 
 /* Calculate what the rwnd would be */
@@ -71,38 +88,41 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, st
 	uint32_t calc = 0;
 
 	/*
 	 * This is really set wrong with respect to a 1-2-m socket. Since
 	 * the sb_cc is the count that everyone as put up. When we re-write
 	 * sctp_soreceive then we will fix this so that ONLY this
 	 * associations data is taken into account.
 	 */
-	if (stcb->sctp_socket == NULL)
+	if (stcb->sctp_socket == NULL) {
 		return (calc);
-
+	}
+
+	KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
+	        ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
+	KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
+	        ("size_on_all_streams is %u", asoc->size_on_all_streams));
 	if (stcb->asoc.sb_cc == 0 &&
-	    asoc->size_on_reasm_queue == 0 &&
-	    asoc->size_on_all_streams == 0) {
+	    asoc->cnt_on_reasm_queue == 0 &&
+	    asoc->cnt_on_all_streams == 0) {
 		/* Full rwnd granted */
 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
 		return (calc);
 	}
 	/* get actual space */
 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
-
 	/*
 	 * take out what has NOT been put on socket queue and we yet hold
 	 * for putting up.
 	 */
 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
 	                                         asoc->cnt_on_reasm_queue * MSIZE));
 	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
 	                                         asoc->cnt_on_all_streams * MSIZE));
-
 	if (calc == 0) {
 		/* out of space */
 		return (calc);
 	}
 
 	/* what is the overhead of all these rwnd's */
 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
 	/* If the window gets too small due to ctrl-stuff, reduce it
@@ -118,93 +138,46 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, st
 
 /*
  * Build out our readq entry based on the incoming packet.
  */
 struct sctp_queued_to_read *
 sctp_build_readq_entry(struct sctp_tcb *stcb,
     struct sctp_nets *net,
     uint32_t tsn, uint32_t ppid,
-    uint32_t context, uint16_t stream_no,
-    uint16_t stream_seq, uint8_t flags,
+    uint32_t context, uint16_t sid,
+    uint32_t mid, uint8_t flags,
     struct mbuf *dm)
 {
 	struct sctp_queued_to_read *read_queue_e = NULL;
 
 	sctp_alloc_a_readq(stcb, read_queue_e);
 	if (read_queue_e == NULL) {
 		goto failed_build;
 	}
-	read_queue_e->sinfo_stream = stream_no;
-	read_queue_e->sinfo_ssn = stream_seq;
+	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
+	read_queue_e->sinfo_stream = sid;
 	read_queue_e->sinfo_flags = (flags << 8);
 	read_queue_e->sinfo_ppid = ppid;
 	read_queue_e->sinfo_context = context;
-	read_queue_e->sinfo_timetolive = 0;
 	read_queue_e->sinfo_tsn = tsn;
 	read_queue_e->sinfo_cumtsn = tsn;
 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+	read_queue_e->mid = mid;
+	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
+	TAILQ_INIT(&read_queue_e->reasm);
 	read_queue_e->whoFrom = net;
-	read_queue_e->length = 0;
 	atomic_add_int(&net->ref_count, 1);
 	read_queue_e->data = dm;
-	read_queue_e->spec_flags = 0;
-	read_queue_e->tail_mbuf = NULL;
-	read_queue_e->aux_data = NULL;
 	read_queue_e->stcb = stcb;
 	read_queue_e->port_from = stcb->rport;
-	read_queue_e->do_not_ref_stcb = 0;
-	read_queue_e->end_added = 0;
-	read_queue_e->some_taken = 0;
-	read_queue_e->pdapi_aborted = 0;
 failed_build:
 	return (read_queue_e);
 }
 
-
-/*
- * Build out our readq entry based on the incoming packet.
- */
-static struct sctp_queued_to_read *
-sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
-    struct sctp_tmit_chunk *chk)
-{
-	struct sctp_queued_to_read *read_queue_e = NULL;
-
-	sctp_alloc_a_readq(stcb, read_queue_e);
-	if (read_queue_e == NULL) {
-		goto failed_build;
-	}
-	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
-	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
-	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
-	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
-	read_queue_e->sinfo_context = stcb->asoc.context;
-	read_queue_e->sinfo_timetolive = 0;
-	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
-	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
-	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
-	read_queue_e->whoFrom = chk->whoTo;
-	read_queue_e->aux_data = NULL;
-	read_queue_e->length = 0;
-	atomic_add_int(&chk->whoTo->ref_count, 1);
-	read_queue_e->data = chk->data;
-	read_queue_e->tail_mbuf = NULL;
-	read_queue_e->stcb = stcb;
-	read_queue_e->port_from = stcb->rport;
-	read_queue_e->spec_flags = 0;
-	read_queue_e->do_not_ref_stcb = 0;
-	read_queue_e->end_added = 0;
-	read_queue_e->some_taken = 0;
-	read_queue_e->pdapi_aborted = 0;
-failed_build:
-	return (read_queue_e);
-}
-
-
 struct mbuf *
 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
 {
 	struct sctp_extrcvinfo *seinfo;
 	struct sctp_sndrcvinfo *outinfo;
 	struct sctp_rcvinfo *rcvinfo;
 	struct sctp_nxtinfo *nxtinfo;
 #if defined(__Userspace_os_Windows)
@@ -225,19 +198,19 @@ sctp_build_ctl_nchunk(struct sctp_inpcb 
 	}
 
 	len = 0;
 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
 	}
 	seinfo = (struct sctp_extrcvinfo *)sinfo;
 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
-	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
+	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
 		provide_nxt = 1;
-		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
+		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
 	} else {
 		provide_nxt = 0;
 	}
 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
 			use_extended = 1;
 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
 		} else {
@@ -286,30 +259,30 @@ sctp_build_ctl_nchunk(struct sctp_inpcb 
 #endif
 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
 	}
 	if (provide_nxt) {
 		cmh->cmsg_level = IPPROTO_SCTP;
 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
 		cmh->cmsg_type = SCTP_NXTINFO;
 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
-		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
+		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
 		nxtinfo->nxt_flags = 0;
-		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
 		}
-		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
 		}
-		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
 		}
-		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
-		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
-		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
+		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
+		nxtinfo->nxt_length = seinfo->serinfo_next_length;
+		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
 #if defined(__Userspace_os_Windows)
 		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
 #else
 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
 #endif
 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
 	}
 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
@@ -331,37 +304,42 @@ sctp_build_ctl_nchunk(struct sctp_inpcb 
 }
 
 
 static void
 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
 {
 	uint32_t gap, i, cumackp1;
 	int fnd = 0;
-
+	int in_r=0, in_nr=0;
 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
 		return;
 	}
 	cumackp1 = asoc->cumulative_tsn + 1;
 	if (SCTP_TSN_GT(cumackp1, tsn)) {
 		/* this tsn is behind the cum ack and thus we don't
 		 * need to worry about it being moved from one to the other.
 		 */
 		return;
 	}
 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
-	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
+	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
+	if ((in_r == 0) && (in_nr == 0)) {
+#ifdef INVARIANTS
+		panic("Things are really messed up now");
+#else
 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
 		sctp_print_mapping_array(asoc);
-#ifdef INVARIANTS
-		panic("Things are really messed up now!!");
 #endif
 	}
-	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
-	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+	if (in_nr == 0)
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+	if (in_r)
+		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
 		asoc->highest_tsn_inside_nr_map = tsn;
 	}
 	if (tsn == asoc->highest_tsn_inside_map) {
 		/* We must back down to see what the new highest is */
 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
@@ -371,949 +349,1393 @@ sctp_mark_non_revokable(struct sctp_asso
 			}
 		}
 		if (!fnd) {
 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
 		}
 	}
 }
 
-
-/*
- * We are delivering currently from the reassembly queue. We must continue to
- * deliver until we either: 1) run out of space. 2) run out of sequential
- * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
- */
-static void
-sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
+static int
+sctp_place_control_in_stream(struct sctp_stream_in *strm,
+			     struct sctp_association *asoc,
+			     struct sctp_queued_to_read *control)
 {
-	struct sctp_tmit_chunk *chk, *nchk;
-	uint16_t nxt_todel;
-	uint16_t stream_no;
-	int end = 0;
-	int cntDel;
-	struct sctp_queued_to_read *control, *ctl, *nctl;
-
-	if (stcb == NULL)
-		return;
-
-	cntDel = stream_no = 0;
-	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
-	     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
-	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
-		/* socket above is long gone or going.. */
-	abandon:
-		asoc->fragmented_delivery_inprogress = 0;
-		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
-			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
-			asoc->size_on_reasm_queue -= chk->send_size;
-			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
-			/*
-			 * Lose the data pointer, since its in the socket
-			 * buffer
-			 */
-			if (chk->data) {
-				sctp_m_freem(chk->data);
-				chk->data = NULL;
+	struct sctp_queued_to_read *at;
+	struct sctp_readhead *q;
+	uint8_t flags, unordered;
+
+	flags = (control->sinfo_flags >> 8);
+	unordered = flags & SCTP_DATA_UNORDERED;
+	if (unordered) {
+		q = &strm->uno_inqueue;
+		if (asoc->idata_supported == 0) {
+			if (!TAILQ_EMPTY(q)) {
+				/* Only one stream can be here in old style  -- abort */
+				return (-1);
 			}
-			/* Now free the address and data */
-			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
-			/*sa_ignore FREED_MEMORY*/
-		}
-		return;
-	}
-	SCTP_TCB_LOCK_ASSERT(stcb);
-	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
-		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
-			/* Can't deliver more :< */
-			return;
-		}
-		stream_no = chk->rec.data.stream_number;
-		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
-		if (nxt_todel != chk->rec.data.stream_seq &&
-		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
-			/*
-			 * Not the next sequence to deliver in its stream OR
-			 * unordered
-			 */
-			return;
-		}
-		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
-
-			control = sctp_build_readq_entry_chk(stcb, chk);
-			if (control == NULL) {
-				/* out of memory? */
-				return;
-			}
-			/* save it off for our future deliveries */
-			stcb->asoc.control_pdapi = control;
-			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
-				end = 1;
-			else
-				end = 0;
-			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
-			sctp_add_to_readq(stcb->sctp_ep,
-			                  stcb, control, &stcb->sctp_socket->so_rcv, end,
-			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
-			cntDel++;
+			TAILQ_INSERT_TAIL(q, control, next_instrm);
+			control->on_strm_q = SCTP_ON_UNORDERED;
+			return (0);
+		}
+	} else {
+		q = &strm->inqueue;
+	}
+	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+		control->end_added = 1;
+		control->first_frag_seen = 1;
+		control->last_frag_seen = 1;
+	}
+	if (TAILQ_EMPTY(q)) {
+		/* Empty queue */
+		TAILQ_INSERT_HEAD(q, control, next_instrm);
+		if (unordered) {
+			control->on_strm_q = SCTP_ON_UNORDERED;
 		} else {
-			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
-				end = 1;
-			else
-				end = 0;
-			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
-			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
-			    stcb->asoc.control_pdapi,
-			    chk->data, end, chk->rec.data.TSN_seq,
-			    &stcb->sctp_socket->so_rcv)) {
+			control->on_strm_q = SCTP_ON_ORDERED;
+		}
+		return (0);
+	} else {
+		TAILQ_FOREACH(at, q, next_instrm) {
+			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
 				/*
-				 * something is very wrong, either
-				 * control_pdapi is NULL, or the tail_mbuf
-				 * is corrupt, or there is a EOM already on
-				 * the mbuf chain.
+				 * one in queue is bigger than the
+				 * new one, insert before this one
 				 */
-				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
-					goto abandon;
+				TAILQ_INSERT_BEFORE(at, control, next_instrm);
+				if (unordered) {
+					control->on_strm_q = SCTP_ON_UNORDERED;
 				} else {
-#ifdef INVARIANTS
-					if ((stcb->asoc.control_pdapi == NULL)  || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
-						panic("This should not happen control_pdapi NULL?");
+					control->on_strm_q = SCTP_ON_ORDERED ;
+				}
+				break;
+			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
+				/*
+				 * Gak, He sent me a duplicate msg
+				 * id number?? return -1 to abort.
+				 */
+				return (-1);
+			} else {
+				if (TAILQ_NEXT(at, next_instrm) == NULL) {
+					/*
+					 * We are at the end, insert
+					 * it after this one
+					 */
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+						sctp_log_strm_del(control, at,
+								  SCTP_STR_LOG_FROM_INSERT_TL);
 					}
-					/* if we did not panic, it was a EOM */
-					panic("Bad chunking ??");
-#else
-					if ((stcb->asoc.control_pdapi == NULL)  || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
-					  SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
+					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
+					if (unordered) {
+						control->on_strm_q = SCTP_ON_UNORDERED ;
+					} else {
+						control->on_strm_q = SCTP_ON_ORDERED ;
 					}
-					SCTP_PRINTF("Bad chunking ??\n");
-					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
-
-#endif
-					goto abandon;
+					break;
 				}
 			}
-			cntDel++;
-		}
-		/* pull it we did it */
-		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
-		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
-			asoc->fragmented_delivery_inprogress = 0;
-			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
-				asoc->strmin[stream_no].last_sequence_delivered++;
-			}
-			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
-				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
-			}
-		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
-			/*
-			 * turn the flag back on since we just  delivered
-			 * yet another one.
-			 */
-			asoc->fragmented_delivery_inprogress = 1;
-		}
-		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
-		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
-		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
-		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
-
-		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
-		asoc->size_on_reasm_queue -= chk->send_size;
-		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
-		/* free up the chk */
+		}
+	}
+	return (0);
+}
+
+static void
+sctp_abort_in_reasm(struct sctp_tcb *stcb,
+                    struct sctp_queued_to_read *control,
+                    struct sctp_tmit_chunk *chk,
+                    int *abort_flag, int opspot)
+{
+	char msg[SCTP_DIAG_INFO_LEN];
+	struct mbuf *oper;
+
+	if (stcb->asoc.idata_supported) {
+		snprintf(msg, sizeof(msg),
+			 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
+			 opspot,
+			 control->fsn_included,
+			 chk->rec.data.tsn,
+			 chk->rec.data.sid,
+			 chk->rec.data.fsn, chk->rec.data.mid);
+	} else {
+		snprintf(msg, sizeof(msg),
+			 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
+			 opspot,
+			 control->fsn_included,
+			 chk->rec.data.tsn,
+			 chk->rec.data.sid,
+			 chk->rec.data.fsn,
+			 (uint16_t)chk->rec.data.mid);
+	}
+	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+	sctp_m_freem(chk->data);
+	chk->data = NULL;
+	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
+	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
+	*abort_flag = 1;
+}
+
+static void
+sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
+{
+	/* 
+	 * The control could not be placed and must be cleaned.
+	 */
+	struct sctp_tmit_chunk *chk, *nchk;
+	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+		if (chk->data)
+			sctp_m_freem(chk->data);
 		chk->data = NULL;
 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
-
-		if (asoc->fragmented_delivery_inprogress == 0) {
-			/*
-			 * Now lets see if we can deliver the next one on
-			 * the stream
-			 */
-			struct sctp_stream_in *strm;
-
-			strm = &asoc->strmin[stream_no];
-			nxt_todel = strm->last_sequence_delivered + 1;
-			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
-				/* Deliver more if we can. */
-				if (nxt_todel == ctl->sinfo_ssn) {
-					TAILQ_REMOVE(&strm->inqueue, ctl, next);
-					asoc->size_on_all_streams -= ctl->length;
-					sctp_ucount_decr(asoc->cnt_on_all_streams);
-					strm->last_sequence_delivered++;
-					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
-					sctp_add_to_readq(stcb->sctp_ep, stcb,
-					                  ctl,
-					                  &stcb->sctp_socket->so_rcv, 1,
-					                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
-				} else {
-					break;
-				}
-				nxt_todel = strm->last_sequence_delivered + 1;
-			}
-			break;
-		}
-	}
+	}
+	sctp_free_a_readq(stcb, control);	
 }
 
 /*
  * Queue the chunk either right into the socket buffer if it is the next one
  * to go OR put it in the correct place in the delivery queue.  If we do
- * append to the so_buf, keep doing so until we are out of order. One big
- * question still remains, what to do when the socket buffer is FULL??
+ * append to the so_buf, keep doing so until we are out of order as
+ * long as the control's entered are non-fragmented.
  */
 static void
-sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
-    struct sctp_queued_to_read *control, int *abort_flag)
+sctp_queue_data_to_stream(struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
 {
 	/*
 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
 	 * all the data in one stream this could happen quite rapidly. One
 	 * could use the TSN to keep track of things, but this scheme breaks
-	 * down in the other type of stream useage that could occur. Send a
+	 * down in the other type of stream usage that could occur. Send a
 	 * single msg to stream 0, send 4Billion messages to stream 1, now
 	 * send a message to stream 0. You have a situation where the TSN
 	 * has wrapped but not in the stream. Is this worth worrying about
 	 * or should we just change our queue sort at the bottom to be by
 	 * TSN.
 	 *
 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
 	 * assignment this could happen... and I don't see how this would be
 	 * a violation. So for now I am undecided an will leave the sort by
 	 * SSN alone. Maybe a hybred approach is the answer
 	 *
 	 */
-	struct sctp_stream_in *strm;
 	struct sctp_queued_to_read *at;
 	int queue_needed;
-	uint16_t nxt_todel;
+	uint32_t nxt_todel;
 	struct mbuf *op_err;
+	struct sctp_stream_in *strm;
 	char msg[SCTP_DIAG_INFO_LEN];
 
-	queue_needed = 1;
-	asoc->size_on_all_streams += control->length;
-	sctp_ucount_incr(asoc->cnt_on_all_streams);
 	strm = &asoc->strmin[control->sinfo_stream];
-	nxt_todel = strm->last_sequence_delivered + 1;
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
 	}
-	SCTPDBG(SCTP_DEBUG_INDATA1,
-		"queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
-		(uint32_t) control->sinfo_stream,
-		(uint32_t) strm->last_sequence_delivered,
-		(uint32_t) nxt_todel);
-	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
+	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
 		/* The incoming sseq is behind where we last delivered? */
-		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
-			control->sinfo_ssn, strm->last_sequence_delivered);
-	protocol_error:
+		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
+			strm->last_mid_delivered, control->mid);
 		/*
 		 * throw it in the stream so it gets cleaned up in
 		 * association destruction
 		 */
-		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
-		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-		         strm->last_sequence_delivered, control->sinfo_tsn,
-			 control->sinfo_stream, control->sinfo_ssn);
+		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
+		if (asoc->idata_supported) {
+			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+			         strm->last_mid_delivered, control->sinfo_tsn,
+			         control->sinfo_stream, control->mid);
+		} else {
+			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+			         (uint16_t)strm->last_mid_delivered,
+			         control->sinfo_tsn,
+			         control->sinfo_stream,
+			         (uint16_t)control->mid);
+		}
 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1;
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
 		*abort_flag = 1;
 		return;
 
 	}
-	if (nxt_todel == control->sinfo_ssn) {
+	queue_needed = 1;
+	asoc->size_on_all_streams += control->length;
+	sctp_ucount_incr(asoc->cnt_on_all_streams);
+	nxt_todel = strm->last_mid_delivered + 1;
+	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
 		/* can be delivered right away? */
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
 		}
-		/* EY it wont be queued if it could be delivered directly*/
+		/* EY it wont be queued if it could be delivered directly */
 		queue_needed = 0;
-		asoc->size_on_all_streams -= control->length;
+		if (asoc->size_on_all_streams >= control->length) {
+			asoc->size_on_all_streams -= control->length;
+		} else {
+#ifdef INVARIANTS
+			panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+			asoc->size_on_all_streams = 0;
+#endif
+		}
 		sctp_ucount_decr(asoc->cnt_on_all_streams);
-		strm->last_sequence_delivered++;
-
+		strm->last_mid_delivered++;
 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
 		sctp_add_to_readq(stcb->sctp_ep, stcb,
 		                  control,
 		                  &stcb->sctp_socket->so_rcv, 1,
-		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
-		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
+		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
+		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
 			/* all delivered */
-			nxt_todel = strm->last_sequence_delivered + 1;
-			if (nxt_todel == control->sinfo_ssn) {
-				TAILQ_REMOVE(&strm->inqueue, control, next);
-				asoc->size_on_all_streams -= control->length;
-				sctp_ucount_decr(asoc->cnt_on_all_streams);
-				strm->last_sequence_delivered++;
+			nxt_todel = strm->last_mid_delivered + 1;
+			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
+			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
+				if (control->on_strm_q == SCTP_ON_ORDERED) {
+					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+					if (asoc->size_on_all_streams >= control->length) {
+						asoc->size_on_all_streams -= control->length;
+					} else {
+#ifdef INVARIANTS
+						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+						asoc->size_on_all_streams = 0;
+#endif
+					}
+					sctp_ucount_decr(asoc->cnt_on_all_streams);
+#ifdef INVARIANTS
+				} else {
+					panic("Huh control: %p is on_strm_q: %d",
+					      control, control->on_strm_q);
+#endif
+				}
+				control->on_strm_q = 0;
+				strm->last_mid_delivered++;
 				/*
 				 * We ignore the return of deliver_data here
 				 * since we always can hold the chunk on the
 				 * d-queue. And we have a finite number that
 				 * can be delivered from the strq.
 				 */
 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
 					sctp_log_strm_del(control, NULL,
 							  SCTP_STR_LOG_FROM_IMMED_DEL);
 				}
 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
 				sctp_add_to_readq(stcb->sctp_ep, stcb,
 				                  control,
 				                  &stcb->sctp_socket->so_rcv, 1,
 				                  SCTP_READ_LOCK_NOT_HELD,
-				                  SCTP_SO_NOT_LOCKED);
+				                  SCTP_SO_LOCKED);
 				continue;
+			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+				*need_reasm = 1;
 			}
 			break;
 		}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
 	}
 	if (queue_needed) {
 		/*
 		 * Ok, we did not deliver this guy, find the correct place
 		 * to put it on the queue.
 		 */
-		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
-			goto protocol_error;
-		}
-		if (TAILQ_EMPTY(&strm->inqueue)) {
-			/* Empty queue */
-			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
-				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
-			}
-			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
-		} else {
-			TAILQ_FOREACH(at, &strm->inqueue, next) {
-				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
-					/*
-					 * one in queue is bigger than the
-					 * new one, insert before this one
-					 */
-					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
-						sctp_log_strm_del(control, at,
-								  SCTP_STR_LOG_FROM_INSERT_MD);
-					}
-					TAILQ_INSERT_BEFORE(at, control, next);
-					break;
-				} else if (at->sinfo_ssn == control->sinfo_ssn) {
-					/*
-					 * Gak, He sent me a duplicate str
-					 * seq number
-					 */
-					/*
-					 * foo bar, I guess I will just free
-					 * this new guy, should we abort
-					 * too? FIX ME MAYBE? Or it COULD be
-					 * that the SSN's have wrapped.
-					 * Maybe I should compare to TSN
-					 * somehow... sigh for now just blow
-					 * away the chunk!
-					 */
-
-					if (control->data)
-						sctp_m_freem(control->data);
-					control->data = NULL;
-					asoc->size_on_all_streams -= control->length;
-					sctp_ucount_decr(asoc->cnt_on_all_streams);
-					if (control->whoFrom) {
-						sctp_free_remote_addr(control->whoFrom);
-						control->whoFrom = NULL;
-					}
-					sctp_free_a_readq(stcb, control);
-					return;
-				} else {
-					if (TAILQ_NEXT(at, next) == NULL) {
-						/*
-						 * We are at the end, insert
-						 * it after this one
-						 */
-						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
-							sctp_log_strm_del(control, at,
-									  SCTP_STR_LOG_FROM_INSERT_TL);
-						}
-						TAILQ_INSERT_AFTER(&strm->inqueue,
-						    at, control, next);
-						break;
-					}
-				}
-			}
+		if (sctp_place_control_in_stream(strm, asoc, control)) {
+			snprintf(msg, sizeof(msg),
+				 "Queue to str MID: %u duplicate",
+				 control->mid);
+			sctp_clean_up_control(stcb, control);
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
+			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			*abort_flag = 1;
 		}
 	}
 }
 
-/*
- * Returns two things: You get the total size of the deliverable parts of the
- * first fragmented message on the reassembly queue. And you get a 1 back if
- * all of the message is ready or a 0 back if the message is still incomplete
- */
-static int
-sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size)
+
+static void
+sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
+{
+	struct mbuf *m, *prev = NULL;
+	struct sctp_tcb *stcb;
+
+	stcb = control->stcb;
+	control->held_length = 0;
+	control->length = 0;
+	m = control->data;
+	while (m) {
+		if (SCTP_BUF_LEN(m) == 0) {
+			/* Skip mbufs with NO length */
+			if (prev == NULL) {
+				/* First one */
+				control->data = sctp_m_free(m);
+				m = control->data;
+			} else {
+				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+				m = SCTP_BUF_NEXT(prev);
+			}
+			if (m == NULL) {
+				control->tail_mbuf = prev;
+			}
+			continue;
+		}
+		prev = m;
+		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+		if (control->on_read_q) {
+			/*
+			 * On read queue so we must increment the
+			 * SB stuff, we assume caller has done any locks of SB.
+			 */
+			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+		}
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (prev) {
+		control->tail_mbuf = prev;
+	}
+}
+
+static void
+sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
+{
+	struct mbuf *prev=NULL;
+	struct sctp_tcb *stcb;
+
+	stcb = control->stcb;
+	if (stcb == NULL) {
+#ifdef INVARIANTS
+		panic("Control broken");
+#else
+		return;
+#endif
+	}
+	if (control->tail_mbuf == NULL) {
+		/* TSNH */
+		control->data = m;
+		sctp_setup_tail_pointer(control);
+		return;
+	}
+	control->tail_mbuf->m_next = m;
+	while (m) {
+		if (SCTP_BUF_LEN(m) == 0) {
+			/* Skip mbufs with NO length */
+			if (prev == NULL) {
+				/* First one */
+				control->tail_mbuf->m_next = sctp_m_free(m);
+				m = control->tail_mbuf->m_next;
+			} else {
+				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+				m = SCTP_BUF_NEXT(prev);
+			}
+			if (m == NULL) {
+				control->tail_mbuf = prev;
+			}
+			continue;
+		}
+		prev = m;
+		if (control->on_read_q) {
+			/*
+			 * On read queue so we must increment the
+			 * SB stuff, we assume caller has done any locks of SB.
+			 */
+			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+		}
+		*added += SCTP_BUF_LEN(m);
+		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (prev) {
+		control->tail_mbuf = prev;
+	}
+}
+
+static void 
+sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
+{
+	memset(nc, 0, sizeof(struct sctp_queued_to_read));
+	nc->sinfo_stream = control->sinfo_stream;
+	nc->mid = control->mid;
+	TAILQ_INIT(&nc->reasm);
+	nc->top_fsn = control->top_fsn;
+	nc->mid = control->mid;
+	nc->sinfo_flags = control->sinfo_flags;
+	nc->sinfo_ppid = control->sinfo_ppid;
+	nc->sinfo_context = control->sinfo_context;
+	nc->fsn_included = 0xffffffff;
+	nc->sinfo_tsn = control->sinfo_tsn;
+	nc->sinfo_cumtsn = control->sinfo_cumtsn;
+	nc->sinfo_assoc_id = control->sinfo_assoc_id;
+	nc->whoFrom = control->whoFrom;
+	atomic_add_int(&nc->whoFrom->ref_count, 1);
+	nc->stcb = control->stcb;
+	nc->port_from = control->port_from;
+}
+
+static void 
+sctp_reset_a_control(struct sctp_queued_to_read *control,
+                     struct sctp_inpcb *inp, uint32_t tsn)
 {
-	struct sctp_tmit_chunk *chk;
-	uint32_t tsn;
-
-	*t_size = 0;
-	chk = TAILQ_FIRST(&asoc->reasmqueue);
-	if (chk == NULL) {
-		/* nothing on the queue */
+	control->fsn_included = tsn;
+	if (control->on_read_q) {
+		/* 
+		 * We have to purge it from there,
+		 * hopefully this will work :-)
+		 */
+		TAILQ_REMOVE(&inp->read_queue, control, next);
+		control->on_read_q = 0;
+	}
+}
+
+static int
+sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
+                               struct sctp_association *asoc,
+                               struct sctp_stream_in *strm,
+                               struct sctp_queued_to_read *control,
+                               uint32_t pd_point,
+                               int inp_read_lock_held)
+{
+	/* Special handling for the old un-ordered data chunk.
+	 * All the chunks/TSN's go to mid 0. So
+	 * we have to do the old style watching to see
+	 * if we have it all. If you return one, no other
+	 * control entries on the un-ordered queue will
+	 * be looked at. In theory there should be no others
+	 * entries in reality, unless the guy is sending both
+	 * unordered NDATA and unordered DATA...
+	 */
+	struct sctp_tmit_chunk *chk, *lchk, *tchk;
+	uint32_t fsn;
+	struct sctp_queued_to_read *nc;
+	int cnt_added;
+
+	if (control->first_frag_seen == 0) {
+		/* Nothing we can do, we have not seen the first piece yet */
+		return (1);
+	}
+	/* Collapse any we can */
+	cnt_added = 0;
+restart:
+	fsn = control->fsn_included + 1;
+	/* Now what can we add? */
+	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
+		if (chk->rec.data.fsn == fsn) {
+			/* Ok lets add it */
+			sctp_alloc_a_readq(stcb, nc);
+			if (nc == NULL) {
+				break;
+			}
+			memset(nc, 0, sizeof(struct sctp_queued_to_read));
+			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
+			fsn++;
+			cnt_added++;
+			chk = NULL;
+			if (control->end_added) {
+				/* We are done */
+				if (!TAILQ_EMPTY(&control->reasm)) {
+					/* 
+					 * Ok we have to move anything left on
+					 * the control queue to a new control.
+					 */
+					sctp_build_readq_entry_from_ctl(nc, control);
+					tchk = TAILQ_FIRST(&control->reasm);
+					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+						if (asoc->size_on_reasm_queue >= tchk->send_size) {
+							asoc->size_on_reasm_queue -= tchk->send_size;
+						} else {
+#ifdef INVARIANTS
+						panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
+#else
+						asoc->size_on_reasm_queue = 0;
+#endif
+						}
+						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+						nc->first_frag_seen = 1;
+						nc->fsn_included = tchk->rec.data.fsn;
+						nc->data = tchk->data;
+						nc->sinfo_ppid = tchk->rec.data.ppid;
+						nc->sinfo_tsn = tchk->rec.data.tsn;
+						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
+						tchk->data = NULL;
+						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
+						sctp_setup_tail_pointer(nc);
+						tchk = TAILQ_FIRST(&control->reasm);
+					}
+					/* Spin the rest onto the queue */
+					while (tchk) {
+						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
+						tchk = TAILQ_FIRST(&control->reasm);
+					}
+					/* Now lets add it to the queue after removing control */
+					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
+					nc->on_strm_q = SCTP_ON_UNORDERED;
+					if (control->on_strm_q) {
+						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+						control->on_strm_q = 0;
+					}
+				}
+				if (control->pdapi_started) {
+					strm->pd_api_started = 0;
+					control->pdapi_started = 0;
+				}
+				if (control->on_strm_q) {
+					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+					control->on_strm_q = 0;
+					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				}
+				if (control->on_read_q == 0) {
+					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+							  &stcb->sctp_socket->so_rcv, control->end_added,
+							  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+#if defined(__Userspace__)
+				} else {
+					sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
+#endif
+				}
+				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
+					/* Switch to the new guy and continue */
+					control = nc;
+					goto restart;
+				} else {
+					if (nc->on_strm_q == 0) {
+						sctp_free_a_readq(stcb, nc);
+					}
+				}
+				return (1);
+			} else {
+				sctp_free_a_readq(stcb, nc);
+			}
+		} else {
+			/* Can't add more */
+			break;
+		}
+	}
+	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
+		strm->pd_api_started = 1;
+		control->pdapi_started = 1;
+		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+		                  &stcb->sctp_socket->so_rcv, control->end_added,
+		                  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
 		return (0);
-	}
-	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
-		/* Not a first on the queue */
-		return (0);
-	}
-	tsn = chk->rec.data.TSN_seq;
-	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
-		if (tsn != chk->rec.data.TSN_seq) {
-			return (0);
-		}
-		*t_size += chk->send_size;
-		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
-			return (1);
-		}
-		tsn++;
-	}
-	return (0);
+	} else {
+		return (1);
+	}
 }
 
 static void
-sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
+sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
+                               struct sctp_association *asoc,
+                               struct sctp_queued_to_read *control,
+                               struct sctp_tmit_chunk *chk,
+                               int *abort_flag)
 {
-	struct sctp_tmit_chunk *chk;
-	uint16_t nxt_todel;
-	uint32_t tsize, pd_point;
-
- doit_again:
-	chk = TAILQ_FIRST(&asoc->reasmqueue);
-	if (chk == NULL) {
-		/* Huh? */
-		asoc->size_on_reasm_queue = 0;
-		asoc->cnt_on_reasm_queue = 0;
+	struct sctp_tmit_chunk *at;
+	int inserted;
+	/*
+	 * Here we need to place the chunk into the control structure
+	 * sorted in the correct order.
+	 */
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		/* Its the very first one. */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a first fsn: %u becomes fsn_included\n",
+			chk->rec.data.fsn);
+		if (control->first_frag_seen) {
+			/*
+			 * In old un-ordered we can reassembly on
+			 * one control multiple messages. As long
+			 * as the next FIRST is greater then the old
+			 * first (TSN i.e. FSN wise)
+			 */
+			struct mbuf *tdata;
+			uint32_t tmp;
+
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
+				/* Easy way the start of a new guy beyond the lowest */
+				goto place_chunk;
+			}
+			if ((chk->rec.data.fsn == control->fsn_included) ||
+			    (control->pdapi_started)) {
+				/* 
+				 * Ok this should not happen, if it does
+				 * we started the pd-api on the higher TSN (since
+				 * the equals part is a TSN failure it must be that).
+				 *
+				 * We are completly hosed in that case since I have
+				 * no way to recover. This really will only happen
+				 * if we can get more TSN's higher before the pd-api-point.
+				 */
+				sctp_abort_in_reasm(stcb, control, chk,
+						    abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
+
+				return;
+			}
+			/*
+			 * Ok we have two firsts and the one we just got
+			 * is smaller than the one we previously placed.. yuck!
+			 * We must swap them out.
+			 */
+			/* swap the mbufs */
+			tdata = control->data;
+			control->data = chk->data;
+			chk->data = tdata;
+			/* Save the lengths */
+			chk->send_size = control->length;
+			/* Recompute length of control and tail pointer */
+			sctp_setup_tail_pointer(control);
+			/* Fix the FSN included */
+			tmp = control->fsn_included;
+			control->fsn_included = chk->rec.data.fsn;
+			chk->rec.data.fsn = tmp;
+			/* Fix the TSN included */
+			tmp = control->sinfo_tsn;
+			control->sinfo_tsn = chk->rec.data.tsn;
+			chk->rec.data.tsn = tmp;
+			/* Fix the PPID included */
+			tmp = control->sinfo_ppid;
+			control->sinfo_ppid = chk->rec.data.ppid;
+			chk->rec.data.ppid = tmp;
+			/* Fix tail pointer */
+			goto place_chunk;
+		}
+		control->first_frag_seen = 1;
+		control->fsn_included = chk->rec.data.fsn;
+		control->top_fsn = chk->rec.data.fsn;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->sinfo_ppid = chk->rec.data.ppid;
+		control->data = chk->data;
+		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		sctp_setup_tail_pointer(control);
 		return;
 	}
-	if (asoc->fragmented_delivery_inprogress == 0) {
-		nxt_todel =
-		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
-		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
-		    (nxt_todel == chk->rec.data.stream_seq ||
-		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
+place_chunk:
+	inserted = 0;
+	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
 			/*
-			 * Yep the first one is here and its ok to deliver
-			 * but should we?
+			 * This one in queue is bigger than the new one, insert
+			 * the new one before at.
+			 */
+			asoc->size_on_reasm_queue += chk->send_size;
+			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+			inserted = 1;
+			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+			break;
+		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
+			/* 
+			 * They sent a duplicate fsn number. This
+			 * really should not happen since the FSN is
+			 * a TSN and it should have been dropped earlier.
 			 */
-			if (stcb->sctp_socket) {
-				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
-				               stcb->sctp_ep->partial_delivery_point);
-			} else {
-				pd_point = stcb->sctp_ep->partial_delivery_point;
+			sctp_abort_in_reasm(stcb, control, chk,
+			                    abort_flag,
+			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
+			return;
+		}
+
+	}
+	if (inserted == 0) {
+		/* Its at the end */
+		asoc->size_on_reasm_queue += chk->send_size;
+		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+		control->top_fsn = chk->rec.data.fsn;
+		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+	}
+}
+
+static int
+sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                         struct sctp_stream_in *strm, int inp_read_lock_held)
+{
+	/*
+	 * Given a stream, strm, see if any of
+	 * the SSN's on it that are fragmented
+	 * are ready to deliver. If so go ahead
+	 * and place them on the read queue. In
+	 * so placing if we have hit the end, then
+	 * we need to remove them from the stream's queue.
+	 */
+	struct sctp_queued_to_read *control, *nctl = NULL;
+	uint32_t next_to_del;
+	uint32_t pd_point;
+	int ret = 0;
+
+	if (stcb->sctp_socket) {
+		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
+			       stcb->sctp_ep->partial_delivery_point);
+	} else {
+		pd_point = stcb->sctp_ep->partial_delivery_point;
+	}
+	control = TAILQ_FIRST(&strm->uno_inqueue);
+
+	if ((control != NULL) &&
+	    (asoc->idata_supported == 0)) {
+		/* Special handling needed for "old" data format */
+		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
+			goto done_un;
+		}
+	}
+	if (strm->pd_api_started) {
+		/* Can't add more */
+		return (0);
+	}
+	while (control) {
+		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
+			control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
+		nctl = TAILQ_NEXT(control, next_instrm);
+		if (control->end_added) {
+			/* We just put the last bit on */
+			if (control->on_strm_q) {
+#ifdef INVARIANTS
+				if (control->on_strm_q != SCTP_ON_UNORDERED ) {
+					panic("Huh control: %p on_q: %d -- not unordered?",
+					      control, control->on_strm_q);
+				}
+#endif
+				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+			}
+			if (control->on_read_q == 0) {
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+		} else {
+			/* Can we do a PD-API for this un-ordered guy? */
+			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
+				strm->pd_api_started = 1;
+				control->pdapi_started = 1;
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+
+				break;
 			}
-			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
-				/*
-				 * Yes, we setup to start reception, by
-				 * backing down the TSN just in case we
-				 * can't deliver. If we
-				 */
-				asoc->fragmented_delivery_inprogress = 1;
-				asoc->tsn_last_delivered =
-				    chk->rec.data.TSN_seq - 1;
-				asoc->str_of_pdapi =
-				    chk->rec.data.stream_number;
-				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
-				asoc->pdapi_ppid = chk->rec.data.payloadtype;
-				asoc->fragment_flags = chk->rec.data.rcv_flags;
-				sctp_service_reassembly(stcb, asoc);
+		}
+		control = nctl;
+	}
+done_un:
+	control = TAILQ_FIRST(&strm->inqueue);
+	if (strm->pd_api_started) {
+		/* Can't add more */
+		return (0);
+	}
+	if (control == NULL) {
+		return (ret);
+	}
+	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
+		/* Ok the guy at the top was being partially delivered
+		 * completed, so we remove it. Note
+		 * the pd_api flag was taken off when the
+		 * chunk was merged on in sctp_queue_data_for_reasm below.
+		 */
+		nctl = TAILQ_NEXT(control, next_instrm);
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
+			control, control->end_added, control->mid,
+			control->top_fsn, control->fsn_included,
+			strm->last_mid_delivered);
+		if (control->end_added) {
+			if (control->on_strm_q) {
+#ifdef INVARIANTS
+				if (control->on_strm_q != SCTP_ON_ORDERED ) {
+					panic("Huh control: %p on_q: %d -- not ordered?",
+					      control, control->on_strm_q);
+				}
+#endif
+				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+				if (asoc->size_on_all_streams >= control->length) {
+					asoc->size_on_all_streams -= control->length;
+				} else {
+#ifdef INVARIANTS
+					panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+					asoc->size_on_all_streams = 0;
+#endif
+				}
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				control->on_strm_q = 0;
+			}
+			if (strm->pd_api_started && control->pdapi_started) {
+				control->pdapi_started = 0;
+				strm->pd_api_started = 0;
+			}
+			if (control->on_read_q == 0) {
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+			control = nctl;
+		}
+	}
+	if (strm->pd_api_started) {
+		/* Can't add more must have gotten an un-ordered above being partially delivered. */
+		return (0);
+	}
+deliver_more:
+	next_to_del = strm->last_mid_delivered + 1;
+	if (control) {
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
+			control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
+			next_to_del);
+		nctl = TAILQ_NEXT(control, next_instrm);
+		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
+		    (control->first_frag_seen)) {
+			int done;
+
+			/* Ok we can deliver it onto the stream. */
+			if (control->end_added) {
+				/* We are done with it afterwards */
+				if (control->on_strm_q) {
+#ifdef INVARIANTS
+					if (control->on_strm_q != SCTP_ON_ORDERED ) {
+						panic("Huh control: %p on_q: %d -- not ordered?",
+						      control, control->on_strm_q);
+					}
+#endif
+					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+					if (asoc->size_on_all_streams >= control->length) {
+						asoc->size_on_all_streams -= control->length;
+					} else {
+#ifdef INVARIANTS
+						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+						asoc->size_on_all_streams = 0;
+#endif
+					}
+					sctp_ucount_decr(asoc->cnt_on_all_streams);
+					control->on_strm_q = 0;
+				}
+				ret++;
 			}
-		}
-	} else {
-		/* Service re-assembly will deliver stream data queued
-		 * at the end of fragmented delivery.. but it wont know
-		 * to go back and call itself again... we do that here
-		 * with the got doit_again
+			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+				/* A singleton now slipping through - mark it non-revokable too */
+				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+			} else if (control->end_added == 0) {
+				/* Check if we can defer adding until its all there */
+				if ((control->length < pd_point) || (strm->pd_api_started)) {
+					/* Don't need it or cannot add more (one being delivered that way) */
+					goto out;
+				}
+			}
+			done = (control->end_added) && (control->last_frag_seen);
+			if (control->on_read_q == 0) {
+				if (!done) {
+					if (asoc->size_on_all_streams >= control->length) {
+						asoc->size_on_all_streams -= control->length;
+					} else {
+#ifdef INVARIANTS
+						panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
+#else
+						asoc->size_on_all_streams = 0;
+#endif
+					}
+					strm->pd_api_started = 1;
+					control->pdapi_started = 1;
+				}
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+			strm->last_mid_delivered = next_to_del;
+			if (done) {
+				control = nctl;
+				goto deliver_more;
+			}
+		}
+	}
+out:
+	return (ret);
+}
+
+
+uint32_t
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+			struct sctp_stream_in *strm,
+			struct sctp_tcb *stcb, struct sctp_association *asoc,
+			struct sctp_tmit_chunk *chk, int hold_rlock)
+{
+	/*
+	 * Given a control and a chunk, merge the
+	 * data from the chk onto the control and free
+	 * up the chunk resources.
+	 */
+	uint32_t added=0;
+	int i_locked = 0;
+
+	if (control->on_read_q && (hold_rlock == 0)) {
+		/*
+		 * Its being pd-api'd so we must
+		 * do some locks.
 		 */
-		sctp_service_reassembly(stcb, asoc);
-		if (asoc->fragmented_delivery_inprogress == 0) {
-			/* finished our Fragmented delivery, could be
-			 * more waiting?
-			 */
-			goto doit_again;
-		}
-	}
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		i_locked = 1;
+	}
+	if (control->data == NULL) {
+		control->data = chk->data;
+		sctp_setup_tail_pointer(control);
+	} else {
+		sctp_add_to_tail_pointer(control, chk->data, &added);
+	}
+	control->fsn_included = chk->rec.data.fsn;
+	asoc->size_on_reasm_queue -= chk->send_size;
+	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+	chk->data = NULL;
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		control->first_frag_seen = 1;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->sinfo_ppid = chk->rec.data.ppid;
+	}
+	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+		/* Its complete */
+		if ((control->on_strm_q) && (control->on_read_q)) {
+			if (control->pdapi_started) {
+				control->pdapi_started = 0;
+				strm->pd_api_started = 0;
+			}
+			if (control->on_strm_q == SCTP_ON_UNORDERED) {
+				/* Unordered */
+				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
+				/* Ordered */
+				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+				/*
+				 * Don't need to decrement size_on_all_streams,
+				 * since control is on the read queue.
+				 */
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				control->on_strm_q = 0;
+#ifdef INVARIANTS
+			} else if (control->on_strm_q) {
+				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
+				      control->on_strm_q);
+#endif
+			}
+		}
+		control->end_added = 1;
+		control->last_frag_seen = 1;
+	}
+	if (i_locked) {
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+	}
+	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	return (added);
 }
 
 /*
  * Dump onto the re-assembly queue, in its proper place. After dumping on the
  * queue, see if anthing can be delivered. If so pull it off (or as much as
  * we can. If we run out of space then we must dump what we can and set the
  * appropriate flag to say we queued what we could.
  */
 static void
 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
-    struct sctp_tmit_chunk *chk, int *abort_flag)
+			  struct sctp_queued_to_read *control,
+			  struct sctp_tmit_chunk *chk,
+			  int created_control,
+			  int *abort_flag, uint32_t tsn)
 {
-	struct mbuf *op_err;
-	char msg[SCTP_DIAG_INFO_LEN];
-	uint32_t cum_ackp1, prev_tsn, post_tsn;
-	struct sctp_tmit_chunk *at, *prev, *next;
-
-	prev = next = NULL;
-	cum_ackp1 = asoc->tsn_last_delivered + 1;
-	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
-		/* This is the first one on the queue */
-		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
-		/*
-		 * we do not check for delivery of anything when only one
-		 * fragment is here
-		 */
-		asoc->size_on_reasm_queue = chk->send_size;
-		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
-		if (chk->rec.data.TSN_seq == cum_ackp1) {
-			if (asoc->fragmented_delivery_inprogress == 0 &&
-			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
-			    SCTP_DATA_FIRST_FRAG) {
-				/*
-				 * An empty queue, no delivery inprogress,
-				 * we hit the next one and it does NOT have
-				 * a FIRST fragment mark.
-				 */
-				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
-				snprintf(msg, sizeof(msg),
-				         "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-				         chk->rec.data.TSN_seq,
-				         chk->rec.data.stream_number,
-				         chk->rec.data.stream_seq);
-				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2;
-				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-				*abort_flag = 1;
-			} else if (asoc->fragmented_delivery_inprogress &&
-			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
-				/*
-				 * We are doing a partial delivery and the
-				 * NEXT chunk MUST be either the LAST or
-				 * MIDDLE fragment NOT a FIRST
-				 */
-				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
-				snprintf(msg, sizeof(msg),
-				         "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-				         chk->rec.data.TSN_seq,
-				         chk->rec.data.stream_number,
-				         chk->rec.data.stream_seq);
-				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3;
-				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-				*abort_flag = 1;
-			} else if (asoc->fragmented_delivery_inprogress) {
-				/*
-				 * Here we are ok with a MIDDLE or LAST
-				 * piece
-				 */
-				if (chk->rec.data.stream_number !=
-				    asoc->str_of_pdapi) {
-					/* Got to be the right STR No */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
-						chk->rec.data.stream_number,
-						asoc->str_of_pdapi);
-					snprintf(msg, sizeof(msg),
-					         "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         asoc->str_of_pdapi,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
-					    SCTP_DATA_UNORDERED &&
-					    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
-					/* Got to be the right STR Seq */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
-						chk->rec.data.stream_seq,
-						asoc->ssn_of_pdapi);
-					snprintf(msg, sizeof(msg),
-					         "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         asoc->ssn_of_pdapi,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-				}
-			}
-		}
-		return;
-	}
-	/* Find its place */
-	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
-		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
-			/*
-			 * one in queue is bigger than the new one, insert
-			 * before this one
+	uint32_t next_fsn;
+	struct sctp_tmit_chunk *at, *nat;
+	struct sctp_stream_in *strm;
+	int do_wakeup, unordered;
+	uint32_t lenadded;
+
+	strm = &asoc->strmin[control->sinfo_stream];
+	/*
+	 * For old un-ordered data chunks.
+	 */
+	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+		unordered = 1;
+	} else {
+		unordered = 0;
+	}
+	/* Must be added to the stream-in queue */
+	if (created_control) {
+		if (unordered == 0) {
+			sctp_ucount_incr(asoc->cnt_on_all_streams);
+		}
+		if (sctp_place_control_in_stream(strm, asoc, control)) {
+			/* Duplicate SSN? */
+			sctp_abort_in_reasm(stcb, control, chk,
+					    abort_flag,
+					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
+			sctp_clean_up_control(stcb, control);
+			return;
+		}
+		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
+			/* Ok we created this control and now
+			 * lets validate that its legal i.e. there
+			 * is a B bit set, if not and we have
+			 * up to the cum-ack then its invalid.
 			 */
-			/* A check */
-			asoc->size_on_reasm_queue += chk->send_size;
-			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
-			next = at;
-			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
-			break;
-		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
-			/* Gak, He sent me a duplicate str seq number */
-			/*
-			 * foo bar, I guess I will just free this new guy,
-			 * should we abort too? FIX ME MAYBE? Or it COULD be
-			 * that the SSN's have wrapped. Maybe I should
-			 * compare to TSN somehow... sigh for now just blow
-			 * away the chunk!
-			 */
-			if (chk->data) {
-				sctp_m_freem(chk->data);
-				chk->data = NULL;
+			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+				sctp_abort_in_reasm(stcb, control, chk,
+				                    abort_flag,
+				                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
+				return;
 			}
-			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	if ((asoc->idata_supported == 0) && (unordered == 1)) {
+		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
+		return;
+	}
+	/*
+	 * Ok we must queue the chunk into the reasembly portion:
+	 *  o if its the first it goes to the control mbuf.
+	 *  o if its not first but the next in sequence it goes to the control,
+	 *    and each succeeding one in order also goes.
+	 *  o if its not in order we place it on the list in its place.
+	 */
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		/* Its the very first one. */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a first fsn: %u becomes fsn_included\n",
+			chk->rec.data.fsn);
+		if (control->first_frag_seen) {
+			/*
+			 * Error on senders part, they either
+			 * sent us two data chunks with FIRST,
+			 * or they sent two un-ordered chunks that
+			 * were fragmented at the same time in the same stream.
+			 */
+			sctp_abort_in_reasm(stcb, control, chk,
+			                    abort_flag,
+			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
 			return;
-		} else {
-			prev = at;
-			if (TAILQ_NEXT(at, sctp_next) == NULL) {
-				/*
-				 * We are at the end, insert it after this
-				 * one
-				 */
-				/* check it first */
-				asoc->size_on_reasm_queue += chk->send_size;
-				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
-				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
-				break;
+		}
+		control->first_frag_seen = 1;
+		control->sinfo_ppid = chk->rec.data.ppid;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->fsn_included = chk->rec.data.fsn;
+		control->data = chk->data;
+		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		sctp_setup_tail_pointer(control);
+		asoc->size_on_all_streams += control->length;
+	} else {
+		/* Place the chunk in our list */
+		int inserted=0;
+		if (control->last_frag_seen == 0) {
+			/* Still willing to raise highest FSN seen */
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"We have a new top_fsn: %u\n",
+					chk->rec.data.fsn);
+				control->top_fsn = chk->rec.data.fsn;
 			}
-		}
-	}
-	/* Now the audits */
-	if (prev) {
-		prev_tsn = chk->rec.data.TSN_seq - 1;
-		if (prev_tsn == prev->rec.data.TSN_seq) {
-			/*
-			 * Ok the one I am dropping onto the end is the
-			 * NEXT. A bit of valdiation here.
-			 */
-			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-			    SCTP_DATA_FIRST_FRAG ||
-			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-			    SCTP_DATA_MIDDLE_FRAG) {
-				/*
-				 * Insert chk MUST be a MIDDLE or LAST
-				 * fragment
+			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"The last fsn is now in place fsn: %u\n",
+					chk->rec.data.fsn);
+				control->last_frag_seen = 1;
+			}
+			if (asoc->idata_supported || control->first_frag_seen) {
+				/* 
+				 * For IDATA we always check since we know that
+				 * the first fragment is 0. For old DATA we have
+				 * to receive the first before we know the first FSN
+				 * (which is the TSN).
 				 */
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-				    SCTP_DATA_FIRST_FRAG) {
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
-					snprintf(msg, sizeof(msg),
-					         "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if (chk->rec.data.stream_number !=
-				    prev->rec.data.stream_number) {
-					/*
-					 * Huh, need the correct STR here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
-					        chk->rec.data.stream_number,
-					        prev->rec.data.stream_number);
-					snprintf(msg, sizeof(msg),
-					         "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         prev->rec.data.stream_number,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
-				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
-					/*
-					 * Huh, need the same ordering here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
-					snprintf(msg, sizeof(msg),
-					         "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
-					         (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
-					         chk->rec.data.TSN_seq,
-					         (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
-				    chk->rec.data.stream_seq !=
-				    prev->rec.data.stream_seq) {
-					/*
-					 * Huh, need the correct STR here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
-						chk->rec.data.stream_seq,
-						prev->rec.data.stream_seq);
-					snprintf(msg, sizeof(msg),
-					         "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         prev->rec.data.stream_seq,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-			    SCTP_DATA_LAST_FRAG) {
-				/* Insert chk MUST be a FIRST */
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
-				    SCTP_DATA_FIRST_FRAG) {
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
-					snprintf(msg, sizeof(msg),
-					         "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
+				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+					/* We have already delivered up to this so its a dup */
+					sctp_abort_in_reasm(stcb, control, chk,
+							    abort_flag,
+							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
 					return;
 				}
 			}
-		}
-	}
-	if (next) {
-		post_tsn = chk->rec.data.TSN_seq + 1;
-		if (post_tsn == next->rec.data.TSN_seq) {
-			/*
-			 * Ok the one I am inserting ahead of is my NEXT
-			 * one. A bit of valdiation here.
-			 */
-			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
-				/* Insert chk MUST be a last fragment */
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
-				    != SCTP_DATA_LAST_FRAG) {
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
-					snprintf(msg, sizeof(msg),
-					         "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-				    SCTP_DATA_MIDDLE_FRAG ||
-				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-			    SCTP_DATA_LAST_FRAG) {
-				/*
-				 * Insert chk CAN be MIDDLE or FIRST NOT
-				 * LAST
+		} else {
+			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+				/* Second last? huh? */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Duplicate last fsn: %u (top: %u) -- abort\n",
+					chk->rec.data.fsn, control->top_fsn);
+				sctp_abort_in_reasm(stcb, control,
+						    chk, abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
+				return;
+			}
+			if (asoc->idata_supported || control->first_frag_seen) {
+				/* 
+				 * For IDATA we always check since we know that
+				 * the first fragment is 0. For old DATA we have
+				 * to receive the first before we know the first FSN
+				 * (which is the TSN).
 				 */
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
-				    SCTP_DATA_LAST_FRAG) {
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
-					snprintf(msg, sizeof(msg),
-					         "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if (chk->rec.data.stream_number !=
-				    next->rec.data.stream_number) {
-					/*
-					 * Huh, need the correct STR here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
-						chk->rec.data.stream_number,
-						next->rec.data.stream_number);
-					snprintf(msg, sizeof(msg),
-					         "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         next->rec.data.stream_number,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
-				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
-					/*
-					 * Huh, need the same ordering here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
-					snprintf(msg, sizeof(msg),
-					         "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
-					         (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
-					         chk->rec.data.TSN_seq,
-					         (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					return;
-				}
-				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
-				    chk->rec.data.stream_seq !=
-				    next->rec.data.stream_seq) {
-					/*
-					 * Huh, need the correct STR here,
-					 * they must be the same.
-					 */
-					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
-						chk->rec.data.stream_seq,
-						next->rec.data.stream_seq);
-					snprintf(msg, sizeof(msg),
-					         "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         next->rec.data.stream_seq,
-					         chk->rec.data.TSN_seq,
-					         chk->rec.data.stream_number,
-					         chk->rec.data.stream_seq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
+
+				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+					/* We have already delivered up to this so its a dup */
+					SCTPDBG(SCTP_DEBUG_XXX,
+						"New fsn: %u is already seen in included_fsn: %u -- abort\n",
+						chk->rec.data.fsn, control->fsn_included);
+					sctp_abort_in_reasm(stcb, control, chk,
+							    abort_flag,
+							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
 					return;
 				}
 			}
-		}
-	}
-	/* Do we need to do some delivery? check */
-	sctp_deliver_reasm_check(stcb, asoc);
-}
-
-/*
- * This is an unfortunate routine. It checks to make sure a evil guy is not
- * stuffing us full of bad packet fragments. A broken peer could also do this
- * but this is doubtful. It is to bad I must worry about evil crackers sigh
- * :< more cycles.
- */
-static int
-sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
-    uint32_t TSN_seq)
-{
-	struct sctp_tmit_chunk *at;
-	uint32_t tsn_est;
-
-	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
-		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
-			/* is it one bigger? */
-			tsn_est = at->rec.data.TSN_seq + 1;
-			if (tsn_est == TSN_seq) {
-				/* yep. It better be a last then */
-				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
-				    SCTP_DATA_LAST_FRAG) {
-					/*
-					 * Ok this guy belongs next to a guy
-					 * that is NOT last, it should be a
-					 * middle/last, not a complete
-					 * chunk.
-					 */
-					return (1);
+			/* validate not beyond top FSN if we have seen last one */
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
+					chk->rec.data.fsn,
+					control->top_fsn);
+				sctp_abort_in_reasm(stcb, control, chk,
+						    abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
+				return;
+			}
+		}
+		/*
+		 * If we reach here, we need to place the
+		 * new chunk in the reassembly for this 
+		 * control.
+		 */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a not first fsn: %u needs to be inserted\n",
+			chk->rec.data.fsn);
+		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
+				/*
+				 * This one in queue is bigger than the new one, insert
+				 * the new one before at.
+				 */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Insert it before fsn: %u\n",
+					at->rec.data.fsn);
+				asoc->size_on_reasm_queue += chk->send_size;
+				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+				inserted = 1;
+				break;
+			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
+				/* Gak, He sent me a duplicate str seq number */
+				/*
+				 * foo bar, I guess I will just free this new guy,
+				 * should we abort too? FIX ME MAYBE? Or it COULD be
+				 * that the SSN's have wrapped. Maybe I should
+				 * compare to TSN somehow... sigh for now just blow
+				 * away the chunk!
+				 */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Duplicate to fsn: %u -- abort\n",
+					at->rec.data.fsn);
+				sctp_abort_in_reasm(stcb, control,
+						    chk, abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
+				return;
+			}
+		}
+		if (inserted == 0) {
+			/* Goes on the end */
+			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
+				chk->rec.data.fsn);
+			asoc->size_on_reasm_queue += chk->send_size;
+			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+		}
+	}
+	/*
+	 * Ok lets see if we can suck any up into the control
+	 * structure that are in seq if it makes sense.
+	 */
+	do_wakeup = 0;
+	/*
+	 * If the first fragment has not been
+	 * seen there is no sense in looking.
+	 */
+	if (control->first_frag_seen) {
+		next_fsn = control->fsn_included + 1;
+		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
+			if (at->rec.data.fsn == next_fsn) {
+				/* We can add this one now to the control */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
+					control, at,
+					at->rec.data.fsn,
+					next_fsn, control->fsn_included);
+				TAILQ_REMOVE(&control->reasm, at, sctp_next);
+				lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
+				if (control->on_read_q) {
+					do_wakeup = 1;
 				} else {
 					/*
-					 * This guy is ok since its a LAST
-					 * and the new chunk is a fully
-					 * self- contained one.
+					 * We only add to the size-on-all-streams
+					 * if its not on the read q. The read q
+					 * flag will cause a sballoc so its accounted
+					 * for there.
 					 */
-					return (0);
+					asoc->size_on_all_streams += lenadded;
+				}
+				next_fsn++;
+				if (control->end_added && control->pdapi_started) {
+					if (strm->pd_api_started) {
+						strm->pd_api_started = 0;
+						control->pdapi_started = 0;
+					}
+					if (control->on_read_q == 0) {
+						sctp_add_to_readq(stcb->sctp_ep, stcb,
+								  control,
+								  &stcb->sctp_socket->so_rcv, control->end_added,
+								  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+					}
+					break;
+				}
+			} else {
+				break;
+			}
+		}
+	}
+	if (do_wakeup) {
+#if defined(__Userspace__)
+		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
+#endif
+		/* Need to wakeup the reader */
+		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+	}
+}
+
+static struct sctp_queued_to_read *
+sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
+{
+	struct sctp_queued_to_read *control;
+
+	if (ordered) {
+		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
+			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+				break;
+			}
+		}
+	} else {
+		if (idata_supported) {
+			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
+				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+					break;
 				}
 			}
-		} else if (TSN_seq == at->rec.data.TSN_seq) {
-			/* Software error since I have a dup? */
-			return (1);
 		} else {
-			/*
-			 * Ok, 'at' is larger than new chunk but does it
-			 * need to be right before it.
-			 */
-			tsn_est = TSN_seq + 1;
-			if (tsn_est == at->rec.data.TSN_seq) {
-				/* Yep, It better be a first */
-				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
-				    SCTP_DATA_FIRST_FRAG) {
-					return (1);
-				} else {
-					return (0);
-				}
-			}
-		}
-	}
-	return (0);
+			control = TAILQ_FIRST(&strm->uno_inqueue);
+		}
+	}
+	return (control);
 }
 
 static int
 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
-    struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
-    struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
-    int *break_flag, int last_chunk)
+			  struct mbuf **m, int offset,  int chk_length,
+			  struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
+			  int *break_flag, int last_chunk, uint8_t chk_type)
 {
 	/* Process a data chunk */
 	/* struct sctp_tmit_chunk *chk; */
 	struct sctp_tmit_chunk *chk;
-	uint32_t tsn, gap;
+	uint32_t tsn, fsn, gap, mid;
 	struct mbuf *dmbuf;
 	int the_len;
 	int need_reasm_check = 0;
-	uint16_t strmno, strmseq;
+	uint16_t sid;
 	struct mbuf *op_err;
 	char msg[SCTP_DIAG_INFO_LEN];
-	struct sctp_queued_to_read *control;
-	int ordered;
-	uint32_t protocol_id;
-	uint8_t chunk_flags;
+	struct sctp_queued_to_read *control, *ncontrol;
+	uint32_t ppid;
+	uint8_t chk_flags;
 	struct sctp_stream_reset_list *liste;
-
-	chk = NULL;
-	tsn = ntohl(ch->dp.tsn);
-	chunk_flags = ch->ch.chunk_flags;
-	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
+	int ordered;
+	size_t clen;
+	int created_control = 0;
+
+	if (chk_type == SCTP_IDATA) {
+		struct sctp_idata_chunk *chunk, chunk_buf;
+
+		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
+		                                                 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
+		chk_flags = chunk->ch.chunk_flags;
+		clen = sizeof(struct sctp_idata_chunk);
+		tsn = ntohl(chunk->dp.tsn);
+		sid = ntohs(chunk->dp.sid);
+		mid = ntohl(chunk->dp.mid);
+		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
+			fsn = 0;
+			ppid = chunk->dp.ppid_fsn.ppid;
+		} else {
+			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
+			ppid = 0xffffffff; /* Use as an invalid value. */
+		}
+	} else {
+		struct sctp_data_chunk *chunk, chunk_buf;
+
+		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
+		                                                sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
+		chk_flags = chunk->ch.chunk_flags;
+		clen = sizeof(struct sctp_data_chunk);
+		tsn = ntohl(chunk->dp.tsn);
+		sid = ntohs(chunk->dp.sid);
+		mid = (uint32_t)(ntohs(chunk->dp.ssn));
+		fsn = tsn;
+		ppid = chunk->dp.ppid;
+	}
+	if ((size_t)chk_length == clen) {
+		/*
+		 * Need to send an abort since we had a
+		 * empty data chunk.
+		 */
+		op_err = sctp_generate_no_user_data_cause(tsn);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_flag = 1;
+		return (0);
+	}
+	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
 		asoc->send_sack = 1;
 	}
-	protocol_id = ch->dp.protocol_id;
-	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
+	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
 	}
 	if (stcb == NULL) {
 		return (0);
 	}
-	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
+	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
 		/* It is a duplicate */
 		SCTP_STAT_INCR(sctps_recvdupdata);
 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
 			/* Record a dup for the next outbound sack */
 			asoc->dup_tsns[asoc->numduptsns] = tsn;
 			asoc->numduptsns++;
 		}
@@ -1365,16 +1787,119 @@ sctp_process_a_data_chunk(struct sctp_tc
 		return (0);
 	}
 	/*
 	 * Now before going further we see if there is room. If NOT then we
 	 * MAY let one through only IF this TSN is the one we are waiting
 	 * for on a partial delivery API.
 	 */
 
+	/* Is the stream valid? */
+	if (sid >= asoc->streamincnt) {
+		struct sctp_error_invalid_stream *cause;
+
+		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
+		                               0, M_NOWAIT, 1, MT_DATA);
+		if (op_err != NULL) {
+			/* add some space up front so prepend will work well */
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+			cause = mtod(op_err, struct sctp_error_invalid_stream *);
+			/*
+			 * Error causes are just param's and this one has
+			 * two back to back phdr, one with the error type
+			 * and size, the other with the streamid and a rsvd
+			 */
+			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
+			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
+			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
+			cause->stream_id = htons(sid);
+			cause->reserved = htons(0);
+			sctp_queue_op_err(stcb, op_err);
+		}
+		SCTP_STAT_INCR(sctps_badsid);
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+			asoc->highest_tsn_inside_nr_map = tsn;
+		}
+		if (tsn == (asoc->cumulative_tsn + 1)) {
+			/* Update cum-ack */
+			asoc->cumulative_tsn = tsn;
+		}
+		return (0);
+	}
+	/*
+	 * If its a fragmented message, lets see if we can
+	 * find the control on the reassembly queues.
+	 */
+	if ((chk_type == SCTP_IDATA) &&
+	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
+	    (fsn == 0)) {
+		/* 
+		 *  The first *must* be fsn 0, and other 
+		 *  (middle/end) pieces can *not* be fsn 0.
+		 * XXX: This can happen in case of a wrap around.
+		 *      Ignore is for now.
+		 */
+		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
+		         mid, chk_flags);
+		goto err_out;
+	}
+	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
+	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
+		chk_flags, control);
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+		/* See if we can find the re-assembly entity */
+		if (control != NULL) {
+			/* We found something, does it belong? */
+			if (ordered && (mid != control->mid)) {
+				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
+			err_out:
+				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
+				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				*abort_flag = 1;
+				return (0);
+			}
+			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
+				/* We can't have a switched order with an unordered chunk */
+				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+					 tsn);
+				goto err_out;
+			}
+			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
+				/* We can't have a switched unordered with a ordered chunk */
+				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+					 tsn);
+				goto err_out;
+			}
+		}
+	} else {
+		/* Its a complete segment. Lets validate we
+		 * don't have a re-assembly going on with
+		 * the same Stream/Seq (for ordered) or in
+		 * the same Stream for unordered.
+		 */
+		if (control != NULL) {
+			if (ordered || asoc->idata_supported) {
+				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
+					chk_flags, mid);
+				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
+				goto err_out;
+			} else {
+				if ((tsn == control->fsn_included + 1) &&
+				    (control->end_added == 0)) {
+					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
+					goto err_out;
+				} else {
+					control = NULL;
+				}
+			}
+		}
+	}
 	/* now do the tests */
 	if (((asoc->cnt_on_all_streams +
 	      asoc->cnt_on_reasm_queue +
 	      asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
 	    (((int)asoc->my_rwnd) <= 0)) {
 		/*
 		 * When we have NO room in the rwnd we check to make sure
 		 * the reader is doing its job...
@@ -1397,132 +1922,121 @@ sctp_process_a_data_chunk(struct sctp_tc
 			}
 #endif
 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
 			SCTP_SOCKET_UNLOCK(so, 1);
 #endif
 		}
 		/* now is it in the mapping array of what we have accepted? */
-		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
-		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
-			/* Nope not in the valid range dump it */
-			sctp_set_rwnd(stcb, asoc);
-			if ((asoc->cnt_on_all_streams +
-			     asoc->cnt_on_reasm_queue +
-			     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
-				SCTP_STAT_INCR(sctps_datadropchklmt);
-			} else {
-				SCTP_STAT_INCR(sctps_datadroprwnd);
+		if (chk_type == SCTP_DATA) {
+			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
+			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+				/* Nope not in the valid range dump it */
+			dump_packet:
+				sctp_set_rwnd(stcb, asoc);
+				if ((asoc->cnt_on_all_streams +
+				     asoc->cnt_on_reasm_queue +
+				     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
+					SCTP_STAT_INCR(sctps_datadropchklmt);
+				} else {
+					SCTP_STAT_INCR(sctps_datadroprwnd);
+				}
+				*break_flag = 1;
+				return (0);
 			}
-			*break_flag = 1;
-			return (0);
-		}
-	}
-	strmno = ntohs(ch->dp.stream_id);
-	if (strmno >= asoc->streamincnt) {
-		struct sctp_paramhdr *phdr;
-		struct mbuf *mb;
-
-		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
-					   0, M_NOWAIT, 1, MT_DATA);
-		if (mb != NULL) {
-			/* add some space up front so prepend will work well */
-			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
-			phdr = mtod(mb, struct sctp_paramhdr *);
-			/*
-			 * Error causes are just param's and this one has
-			 * two back to back phdr, one with the error type
-			 * and size, the other with the streamid and a rsvd
-			 */
-			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
-			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
-			phdr->param_length =
-			    htons(sizeof(struct sctp_paramhdr) * 2);
-			phdr++;
-			/* We insert the stream in the type field */
-			phdr->param_type = ch->dp.stream_id;
-			/* And set the length to 0 for the rsvd field */
-			phdr->param_length = 0;
-			sctp_queue_op_err(stcb, mb);
-		}
-		SCTP_STAT_INCR(sctps_badsid);
-		SCTP_TCB_LOCK_ASSERT(stcb);
-		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
-		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
-			asoc->highest_tsn_inside_nr_map = tsn;
-		}
-		if (tsn == (asoc->cumulative_tsn + 1)) {
-			/* Update cum-ack */
-			asoc->cumulative_tsn = tsn;
-		}
-		return (0);
-	}
-	/*
-	 * Before we continue lets validate that we are not being fooled by
-	 * an evil attacker. We can only have 4k chunks based on our TSN
-	 * spread allowed by the mapping array 512 * 8 bits, so there is no
-	 * way our stream sequence numbers could have wrapped. We of course
-	 * only validate the FIRST fragment so the bit must be set.
-	 */
-	strmseq = ntohs(ch->dp.stream_sequence);
+		} else {
+			if (control == NULL) {
+				goto dump_packet;
+			}
+			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
+				goto dump_packet;
+			}
+		}
+	}
 #ifdef SCTP_ASOCLOG_OF_TSNS
 	SCTP_TCB_LOCK_ASSERT(stcb);
 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
 		asoc->tsn_in_at = 0;
 		asoc->tsn_in_wrapped = 1;
 	}
 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
-	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
-	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
+	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
+	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
 	asoc->tsn_in_at++;
 #endif
-	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
+	/*
+	 * Before we continue lets validate that we are not being fooled by
+	 * an evil attacker. We can only have Nk chunks based on our TSN
+	 * spread allowed by the mapping array N * 8 bits, so there is no
+	 * way our stream sequence numbers could have wrapped. We of course
+	 * only validate the FIRST fragment so the bit must be set.
+	 */
+	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
-	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
-	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
+	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
+	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
 		/* The incoming sseq is behind where we last delivered? */
-		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
-			strmseq, asoc->strmin[strmno].last_sequence_delivered);
-
-		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-		         asoc->strmin[strmno].last_sequence_delivered,
-		         tsn, strmno, strmseq);
+		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
+			mid, asoc->strmin[sid].last_mid_delivered);
+
+		if (asoc->idata_supported) {
+			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+			         asoc->strmin[sid].last_mid_delivered,
+			         tsn,
+			         sid,
+			         mid);
+		} else {
+			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+			         (uint16_t)asoc->strmin[sid].last_mid_delivered,
+			         tsn,
+			         sid,
+			         (uint16_t)mid);
+		}
 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14;
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
 		*abort_flag = 1;
 		return (0);
 	}
-	/************************************
-	 * From here down we may find ch-> invalid
-	 * so its a good idea NOT to use it.
-	 *************************************/
-
-	the_len = (chk_length - sizeof(struct sctp_data_chunk));
+	if (chk_type == SCTP_IDATA) {
+		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
+	} else {
+		the_len = (chk_length - sizeof(struct sctp_data_chunk));
+	}
 	if (last_chunk == 0) {
-		dmbuf = SCTP_M_COPYM(*m,
-				     (offset + sizeof(struct sctp_data_chunk)),
-				     the_len, M_NOWAIT);
+		if (chk_type == SCTP_IDATA) {
+			dmbuf = SCTP_M_COPYM(*m,
+					     (offset + sizeof(struct sctp_idata_chunk)),
+					     the_len, M_NOWAIT);
+		} else {
+			dmbuf = SCTP_M_COPYM(*m,
+					     (offset + sizeof(struct sctp_data_chunk)),
+					     the_len, M_NOWAIT);
+		}
 #ifdef SCTP_MBUF_LOGGING
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
 		}
 #endif
 	} else {
 		/* We can steal the last chunk */
 		int l_len;
 		dmbuf = *m;
 		/* lop off the top part */
-		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+		if (chk_type == SCTP_IDATA) {
+			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
+		} else {
+			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+		}
 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
 			l_len = SCTP_BUF_LEN(dmbuf);
 		} else {
 			/* need to count up the size hopefully
 			 * does not hit this to often :-0
 			 */
 			struct mbuf *lat;
 
@@ -1535,418 +2049,293 @@ sctp_process_a_data_chunk(struct sctp_tc
 			/* Trim the end round bytes off  too */
 			m_adj(dmbuf, -(l_len - the_len));
 		}
 	}
 	if (dmbuf == NULL) {
 		SCTP_STAT_INCR(sctps_nomem);
 		return (0);
 	}
-	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
-	    asoc->fragmented_delivery_inprogress == 0 &&
+	/*
+	 * Now no matter what, we need a control, get one
+	 * if we don't have one (we may have gotten it
+	 * above when we found the message was fragmented
+	 */
+	if (control == NULL) {
+		sctp_alloc_a_readq(stcb, control);
+		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+					   ppid,
+					   sid,
+					   chk_flags,
+					   NULL, fsn, mid);
+		if (control == NULL) {
+			SCTP_STAT_INCR(sctps_nomem);
+			return (0);
+		}
+		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+			struct mbuf *mm;
+
+			control->data = dmbuf;
+			for (mm = control->data; mm; mm = mm->m_next) {
+				control->length += SCTP_BUF_LEN(mm);
+			}
+			control->tail_mbuf = NULL;
+			control->end_added = 1;
+			control->last_frag_seen = 1;
+			control->first_frag_seen = 1;
+			control->fsn_included = fsn;
+			control->top_fsn = fsn;
+		}
+		created_control = 1;
+	}
+	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
+		chk_flags, ordered, mid, control);
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
 	    TAILQ_EMPTY(&asoc->resetHead) &&
 	    ((ordered == 0) ||
-	    ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
-	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
+	     (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
+	      TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
 		/* Candidate for express delivery */
 		/*
 		 * Its not fragmented, No PD-API is up, Nothing in the
 		 * delivery queue, Its un-ordered OR ordered and the next to
 		 * deliver AND nothing else is stuck on the stream queue,
 		 * And there is room for it in the socket buffer. Lets just
 		 * stuff it up the buffer....
 		 */
-
-		/* It would be nice to avoid this copy if we could :< */
-		sctp_alloc_a_readq(stcb, control);
-		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
-					   protocol_id,
-					   strmno, strmseq,
-					   chunk_flags,
-					   dmbuf);
-		if (control == NULL) {
-			goto failed_express_del;
-		}
 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
 			asoc->highest_tsn_inside_nr_map = tsn;
 		}
+		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
+			control, mid);
+
 		sctp_add_to_readq(stcb->sctp_ep, stcb,
 		                  control, &stcb->sctp_socket->so_rcv,
 		                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
 
-		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
+		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
 			/* for ordered, bump what we delivered */
-			asoc->strmin[strmno].last_sequence_delivered++;
+			asoc->strmin[sid].last_mid_delivered++;
 		}
 		SCTP_STAT_INCR(sctps_recvexpress);
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
-			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
+			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
 					      SCTP_STR_LOG_FROM_EXPRS_DEL);
 		}
 		control = NULL;
-
 		goto finish_express_del;
 	}
-failed_express_del:
-	/* If we reach here this is a new chunk */
-	chk = NULL;
-	control = NULL;
-	/* Express for fragmented delivery? */
-	if ((asoc->fragmented_delivery_inprogress) &&
-	    (stcb->asoc.control_pdapi) &&
-	    (asoc->str_of_pdapi == strmno) &&
-	    (asoc->ssn_of_pdapi == strmseq)
-		) {
-		control = stcb->asoc.control_pdapi;
-		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
-			/* Can't be another first? */
-			goto failed_pdapi_express_del;
-		}
-		if (tsn == (control->sinfo_tsn + 1)) {
-			/* Yep, we can add it on */
-			int end = 0;
-
-			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
-				end = 1;
-			}
-			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
-			                         tsn,
-			                         &stcb->sctp_socket->so_rcv)) {
-				SCTP_PRINTF("Append fails end:%d\n", end);
-				goto failed_pdapi_express_del;
-			}
-
-			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
-			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
-				asoc->highest_tsn_inside_nr_map = tsn;
-			}
-			SCTP_STAT_INCR(sctps_recvexpressm);
-			asoc->tsn_last_delivered = tsn;
-			asoc->fragment_flags = chunk_flags;
-			asoc->tsn_of_pdapi_last_delivered = tsn;
-			asoc->last_flags_delivered = chunk_flags;
-			asoc->last_strm_seq_delivered = strmseq;
-			asoc->last_strm_no_delivered = strmno;
-			if (end) {
-				/* clean up the flags and such */
-				asoc->fragmented_delivery_inprogress = 0;
-				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
-					asoc->strmin[strmno].last_sequence_delivered++;
-				}
-				stcb->asoc.control_pdapi = NULL;
-				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
-					/* There could be another message ready */
-					need_reasm_check = 1;
-				}
-			}
-			control = NULL;
-			goto finish_express_del;
-		}
-	}
- failed_pdapi_express_del:
-	control = NULL;
-	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
-		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
-		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
-			asoc->highest_tsn_inside_nr_map = tsn;
-		}
-	} else {
-		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
-		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
-			asoc->highest_tsn_inside_map = tsn;
-		}
-	}
-	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+
+	/* Now will we need a chunk too? */
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
 		sctp_alloc_a_chunk(stcb, chk);
 		if (chk == NULL) {
 			/* No memory so we drop the chunk */
 			SCTP_STAT_INCR(sctps_nomem);
 			if (last_chunk == 0) {
 				/* we copied it, free the copy */
 				sctp_m_freem(dmbuf);
 			}
 			return (0);
 		}
-		chk->rec.data.TSN_seq = tsn;
+		chk->rec.data.tsn = tsn;
 		chk->no_fr_allowed = 0;
-		chk->rec.data.stream_seq = strmseq;
-		chk->rec.data.stream_number = strmno;
-		chk->rec.data.payloadtype = protocol_id;
+		chk->rec.data.fsn = fsn;
+		chk->rec.data.mid = mid;
+		chk->rec.data.sid = sid;
+		chk->rec.data.ppid = ppid;
 		chk->rec.data.context = stcb->asoc.context;
 		chk->rec.data.doing_fast_retransmit = 0;
-		chk->rec.data.rcv_flags = chunk_flags;
+		chk->rec.data.rcv_flags = chk_flags;
 		chk->asoc = asoc;
 		chk->send_size = the_len;
 		chk->whoTo = net;
+		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
+			chk,
+			control, mid);
 		atomic_add_int(&net->ref_count, 1);
 		chk->data = dmbuf;
+	}
+	/* Set the appropriate TSN mark */
+	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+			asoc->highest_tsn_inside_nr_map = tsn;
+		}
 	} else {
-		sctp_alloc_a_readq(stcb, control);
-		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
-		    protocol_id,
-		    strmno, strmseq,
-		    chunk_flags,
-		    dmbuf);
-		if (control == NULL) {
-			/* No memory so we drop the chunk */
-			SCTP_STAT_INCR(sctps_nomem);
-			if (last_chunk == 0) {
-				/* we copied it, free the copy */
-				sctp_m_freem(dmbuf);
-			}
-			return (0);
-		}
-		control->length = the_len;
-	}
-
-	/* Mark it as received */
-	/* Now queue it where it belongs */
-	if (control != NULL) {
-		/* First a sanity check */
-		if (asoc->fragmented_delivery_inprogress) {
+		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
+			asoc->highest_tsn_inside_map = tsn;
+		}
+	}
+	/* Now is it complete (i.e. not fragmented)? */
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+		/*
+		 * Special check for when streams are resetting. We
+		 * could be more smart about this and check the
+		 * actual stream to see if it is not being reset..
+		 * that way we would not create a HOLB when amongst
+		 * streams being reset and those not being reset.
+		 *
+		 */
+		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+		    SCTP_TSN_GT(tsn, liste->tsn)) {
 			/*
-			 * Ok, we have a fragmented delivery in progress if
-			 * this chunk is next to deliver OR belongs in our
-			 * view to the reassembly, the peer is evil or
-			 * broken.
+			 * yep its past where we need to reset... go
+			 * ahead and queue it.
 			 */
-			uint32_t estimate_tsn;
-
-			estimate_tsn = asoc->tsn_last_delivered + 1;
-			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
-			    (estimate_tsn == control->sinfo_tsn)) {
-				/* Evil/Broke peer */
-				sctp_m_freem(control->data);
-				control->data = NULL;
-				if (control->whoFrom) {
-					sctp_free_remote_addr(control->whoFrom);
-					control->whoFrom = NULL;
+			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
+				/* first one on */
+				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+			} else {
+				struct sctp_queued_to_read *lcontrol, *nlcontrol;
+				unsigned char inserted = 0;
+				TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
+					if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
+
+						continue;
+					} else {
+						/* found it */
+						TAILQ_INSERT_BEFORE(lcontrol, control, next);
+						inserted = 1;
+						break;
+					}
 				}
-				sctp_free_a_readq(stcb, control);
-				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-				         tsn, strmno, strmseq);
-				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15;
-				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-				*abort_flag = 1;
+				if (inserted == 0) {
+					/*
+					 * must be put at end, use
+					 * prevP (all setup from
+					 * loop) to setup nextP.
+					 */
+					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+				}
+			}
+			goto finish_express_del;
+		}
+		if (chk_flags & SCTP_DATA_UNORDERED) {
+			/* queue directly into socket buffer */
+			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
+				control, mid);
+			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+			sctp_add_to_readq(stcb->sctp_ep, stcb,
+			                  control,
+			                  &stcb->sctp_socket->so_rcv, 1,
+			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+		} else {
+			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
+				mid);
+			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
+			if (*abort_flag) {
 				if (last_chunk) {
 					*m = NULL;
 				}
 				return (0);
-			} else {
-				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
-					sctp_m_freem(control->data);
-					control->data = NULL;
-					if (control->whoFrom) {
-						sctp_free_remote_addr(control->whoFrom);
-						control->whoFrom = NULL;
-					}
-					sctp_free_a_readq(stcb, control);
-					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         tsn, strmno, strmseq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					if (last_chunk) {
-						*m = NULL;
-					}
-					return (0);
-				}
 			}
-		} else {
-			/* No PDAPI running */
-			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
-				/*
-				 * Reassembly queue is NOT empty validate
-				 * that this tsn does not need to be in
-				 * reasembly queue. If it does then our peer
-				 * is broken or evil.
-				 */
-				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
-					sctp_m_freem(control->data);
-					control->data = NULL;
-					if (control->whoFrom) {
-						sctp_free_remote_addr(control->whoFrom);
-						control->whoFrom = NULL;
-					}
-					sctp_free_a_readq(stcb, control);
-					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
-					         tsn, strmno, strmseq);
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17;
-					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
-					*abort_flag = 1;
-					if (last_chunk) {
-						*m = NULL;
-					}
-					return (0);
-				}
-			}
-		}
-		/* ok, if we reach here we have passed the sanity checks */
-		if (chunk_flags & SCTP_DATA_UNORDERED) {
-			/* queue directly into socket buffer */
-			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
-			sctp_add_to_readq(stcb->sctp_ep, stcb,
-			                  control,
-			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
-		} else {
-			/*
-			 * Special check for when streams are resetting. We
-			 * could be more smart about this and check the
-			 * actual stream to see if it is not being reset..
-			 * that way we would not create a HOLB when amongst
-			 * streams being reset and those not being reset.
-			 *
-			 * We take complete messages that have a stream reset
-			 * intervening (aka the TSN is after where our
-			 * cum-ack needs to be) off and put them on a
-			 * pending_reply_queue. The reassembly ones we do
-			 * not have to worry about since they are all sorted
-			 * and proceessed by TSN order. It is only the
-			 * singletons I must worry about.
-			 */
-			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
-			    SCTP_TSN_GT(tsn, liste->tsn)) {
-				/*
-				 * yep its past where we need to reset... go
-				 * ahead and queue it.
-				 */
-				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
-					/* first one on */
-					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
-				} else {
-					struct sctp_queued_to_read *ctlOn, *nctlOn;
-					unsigned char inserted = 0;
-
-					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
-						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
-							continue;
-						} else {
-							/* found it */
-							TAILQ_INSERT_BEFORE(ctlOn, control, next);
-							inserted = 1;
-							break;
-						}
-					}
-					if (inserted == 0) {
-						/*
-						 * must be put at end, use
-						 * prevP (all setup from
-						 * loop) to setup nextP.
-						 */
-						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
-					}
-				}
-			} else {
-				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
-				if (*abort_flag) {
-					if (last_chunk) {
-						*m = NULL;
-					}
-					return (0);
-				}
-			}
-		}
-	} else {
-		/* Into the re-assembly queue */
-		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
-		if (*abort_flag) {
-			/*
-			 * the assoc is now gone and chk was put onto the
-			 * reasm queue, which has all been freed.
-			 */
-			if (last_chunk) {
-				*m = NULL;
-			}
-			return (0);
-		}
+		}
+		goto finish_express_del;
+	}
+	/* If we reach here its a reassembly */
+	need_reasm_check = 1;
+	SCTPDBG(SCTP_DEBUG_XXX,
+		"Queue data to stream for reasm control: %p MID: %u\n",
+		control, mid);
+	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
+	if (*abort_flag) {
+		/*
+		 * the assoc is now gone and chk was put onto the
+		 * reasm queue, which has all been freed.
+		 */
+		if (last_chunk) {
+			*m = NULL;
+		}
+		return (0);
 	}
 finish_express_del:
+	/* Here we tidy up things */
 	if (tsn == (asoc->cumulative_tsn + 1)) {
 		/* Update cum-ack */
 		asoc->cumulative_tsn = tsn;
 	}
 	if (last_chunk) {
 		*m = NULL;
 	}
 	if (ordered) {
 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
 	} else {
 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
 	}
 	SCTP_STAT_INCR(sctps_recvdata);
 	/* Set it present please */
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
-		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
+		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
 	}
 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
 			     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
 	}
+	if (need_reasm_check) {
+		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
+		need_reasm_check = 0;
+	}
 	/* check the special flag for stream resets */
 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
 		/*
 		 * we have finished working through the backlogged TSN's now
 		 * time to reset streams. 1: call reset function. 2: free
 		 * pending_reply space 3: distribute any chunks in
 		 * pending_reply_queue.
 		 */
-		struct sctp_queued_to_read *ctl, *nctl;
-
 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
 		SCTP_FREE(liste, SCTP_M_STRESET);
 		/*sa_ignore FREED_MEMORY*/
 		liste = TAILQ_FIRST(&asoc->resetHead);
 		if (TAILQ_EMPTY(&asoc->resetHead)) {
 			/* All can be removed */
-			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
-				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
-				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
+				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
+				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
 				if (*abort_flag) {
 					return (0);
 				}
+				if (need_reasm_check) {
+					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
+					need_reasm_check = 0;
+				}
 			}
 		} else {
-			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
-				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
+			TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
+				if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
 					break;
 				}
 				/*
-				 * if ctl->sinfo_tsn is <= liste->tsn we can
+				 * if control->sinfo_tsn is <= liste->tsn we can
 				 * process it which is the NOT of
-				 * ctl->sinfo_tsn > liste->tsn
+				 * control->sinfo_tsn > liste->tsn
 				 */
-				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
-				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
+				TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
+				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
 				if (*abort_flag) {
 					return (0);
 				}
+				if (need_reasm_check) {
+					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
+					need_reasm_check = 0;
+				}
 			}
 		}
-		/*
-		 * Now service re-assembly to pick up anything that has been
-		 * held on reassembly queue?
-		 */
-		sctp_deliver_reasm_check(stcb, asoc);
-		need_reasm_check = 0;
-	}
-
-	if (need_reasm_check) {
-		/* Another one waits ? */
-		sctp_deliver_reasm_check(stcb, asoc);
 	}
 	return (1);
 }
 
-int8_t sctp_map_lookup_tab[256] = {
+static const int8_t sctp_map_lookup_tab[256] = {
   0, 1, 0, 2, 0, 1, 0, 3,
   0, 1, 0, 2, 0, 1, 0, 4,
   0, 1, 0, 2, 0, 1, 0, 3,
   0, 1, 0, 2, 0, 1, 0, 5,
   0, 1, 0, 2, 0, 1, 0, 3,
   0, 1, 0, 2, 0, 1, 0, 4,
   0, 1, 0, 2, 0, 1, 0, 3,
   0, 1, 0, 2, 0, 1, 0, 6,
@@ -1980,17 +2369,17 @@ int8_t sctp_map_lookup_tab[256] = {
 void
 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
 {
 	/*
 	 * Now we also need to check the mapping array in a couple of ways.
 	 * 1) Did we move the cum-ack point?
 	 *
 	 * When you first glance at this you might think
-	 * that all entries that make up the postion
+	 * that all entries that make up the position
 	 * of the cum-ack would be in the nr-mapping array
 	 * only.. i.e. things up to the cum-ack are always
 	 * deliverable. Thats true with one exception, when
 	 * its a fragmented message we may not deliver the data
 	 * until some threshold (or all of it) is in place. So
 	 * we must OR the nr_mapping_array and mapping_array to
 	 * get a true picture of the cum-ack.
 	 */
@@ -2078,26 +2467,26 @@ sctp_slide_mapping_arrays(struct sctp_tc
 		 */
 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
 		slide_end = (lgap >> 3);
 		if (slide_end < slide_from) {
 			sctp_print_mapping_array(asoc);
 #ifdef INVARIANTS
 			panic("impossible slide");
 #else
-			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
+			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
 			            lgap, slide_end, slide_from, at);
 			return;
 #endif
 		}
 		if (slide_end > asoc->mapping_array_size) {
 #ifdef INVARIANTS
 			panic("would overrun buffer");
 #else
-			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
+			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
 			            asoc->mapping_array_size, slide_end);
 			slide_end = asoc->mapping_array_size;
 #endif
 		}
 		distance = (slide_end - slide_from) + 1;
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
 			sctp_log_map(old_base, old_cumack, old_highest,
 				     SCTP_MAP_PREPARE_SLIDE);
@@ -2146,47 +2535,49 @@ sctp_slide_mapping_arrays(struct sctp_tc
 	}
 }
 
 void
 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
 {
 	struct sctp_association *asoc;
 	uint32_t highest_tsn;
-
+	int is_a_gap;
+
+	sctp_slide_mapping_arrays(stcb);
 	asoc = &stcb->asoc;
 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
 		highest_tsn = asoc->highest_tsn_inside_nr_map;
 	} else {
 		highest_tsn = asoc->highest_tsn_inside_map;
 	}
+	/* Is there a gap now? */
+	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
 
 	/*
 	 * Now we need to see if we need to queue a sack or just start the
 	 * timer (if allowed).
 	 */
 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
 		/*
 		 * Ok special case, in SHUTDOWN-SENT case. here we
 		 * maker sure SACK timer is off and instead send a
 		 * SHUTDOWN and a SACK
 		 */
 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
-			                stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18);
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
 		}
 		sctp_send_shutdown(stcb,
-				   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
-		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+		                   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
+		if (is_a_gap) {
+			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+		}
 	} else {
-		int is_a_gap;
-
-		/* is there a gap now ? */
-		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
-
 		/*
 		 * CMT DAC algorithm: increase number of packets
 		 * received since last ack
 		 */
 		stcb->asoc.cmt_dac_pkts_rcvd++;
 
 		if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
@@ -2232,95 +2623,30 @@ sctp_sack_check(struct sctp_tcb *stcb, i
 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
 				                 stcb->sctp_ep, stcb, NULL);
 			}
 		}
 	}
 }
 
-void
-sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
-{
-	struct sctp_tmit_chunk *chk;
-	uint32_t tsize, pd_point;
-	uint16_t nxt_todel;
-
-	if (asoc->fragmented_delivery_inprogress) {
-		sctp_service_reassembly(stcb, asoc);
-	}
-	/* Can we proceed further, i.e. the PD-API is complete */
-	if (asoc->fragmented_delivery_inprogress) {
-		/* no */
-		return;
-	}
-	/*
-	 * Now is there some other chunk I can deliver from the reassembly
-	 * queue.
-	 */
- doit_again:
-	chk = TAILQ_FIRST(&asoc->reasmqueue);
-	if (chk == NULL) {
-		asoc->size_on_reasm_queue = 0;
-		asoc->cnt_on_reasm_queue = 0;
-		return;
-	}
-	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
-	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
-	    ((nxt_todel == chk->rec.data.stream_seq) ||
-	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
-		/*
-		 * Yep the first one is here. We setup to start reception,
-		 * by backing down the TSN just in case we can't deliver.
-		 */
-
-		/*
-		 * Before we start though either all of the message should
-		 * be here or the socket buffer max or nothing on the
-		 * delivery queue and something can be delivered.
-		 */
-		if (stcb->sctp_socket) {
-			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
-				       stcb->sctp_ep->partial_delivery_point);
-		} else {
-			pd_point = stcb->sctp_ep->partial_delivery_point;
-		}
-		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
-			asoc->fragmented_delivery_inprogress = 1;
-			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
-			asoc->str_of_pdapi = chk->rec.data.stream_number;
-			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
-			asoc->pdapi_ppid = chk->rec.data.payloadtype;
-			asoc->fragment_flags = chk->rec.data.rcv_flags;
-			sctp_service_reassembly(stcb, asoc);
-			if (asoc->fragmented_delivery_inprogress == 0) {
-				goto doit_again;
-			}
-		}
-	}
-}
-
 int
 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
-                  struct sockaddr *src, struct sockaddr *dst,
-                  struct sctphdr *sh, struct sctp_inpcb *inp,
-                  struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn,
-#if defined(__FreeBSD__)
-                  uint8_t mflowtype, uint32_t mflowid,
-#endif
-		  uint32_t vrf_id, uint16_t port)
+                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                  struct sctp_nets *net, uint32_t *high_tsn)
 {
-	struct sctp_data_chunk *ch, chunk_buf;
+	struct sctp_chunkhdr *ch, chunk_buf;
 	struct sctp_association *asoc;
 	int num_chunks = 0;	/* number of control chunks processed */
 	int stop_proc = 0;
-	int chk_length, break_flag, last_chunk;
+	int break_flag, last_chunk;
 	int abort_flag = 0, was_a_gap;
 	struct mbuf *m;
 	uint32_t highest_tsn;
+	uint16_t chk_length;
 
 	/* set the rwnd */
 	sctp_set_rwnd(stcb, &stcb->asoc);
 
 	m = *mm;
 	SCTP_TCB_LOCK_ASSERT(stcb);
 	asoc = &stcb->asoc;
 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
@@ -2353,109 +2679,119 @@ sctp_process_data(struct mbuf **mm, int 
 			caddr_t *from, *to;
 			/* get the pointers and copy */
 			to = mtod(m, caddr_t *);
 			from = mtod((*mm), caddr_t *);
 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
 			/* copy the length and free up the old */
 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
 			sctp_m_freem(*mm);
-			/* sucess, back copy */
+			/* success, back copy */
 			*mm = m;
 		} else {
 			/* We are in trouble in the mbuf world .. yikes */
 			m = *mm;
 		}
 	}
 #endif
 	/* get pointer to the first chunk header */
-	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
-						     sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+	                                           sizeof(struct sctp_chunkhdr),
+	                                           (uint8_t *)&chunk_buf);
 	if (ch == NULL) {
 		return (1);
 	}
 	/*
 	 * process all DATA chunks...
 	 */
 	*high_tsn = asoc->cumulative_tsn;
 	break_flag = 0;
 	asoc->data_pkts_seen++;
 	while (stop_proc == 0) {
 		/* validate chunk length */
-		chk_length = ntohs(ch->ch.chunk_length);
+		chk_length = ntohs(ch->chunk_length);
 		if (length - *offset < chk_length) {
 			/* all done, mutulated chunk */
 			stop_proc = 1;
 			continue;
 		}
-		if (ch->ch.chunk_type == SCTP_DATA) {
-			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
+		if ((asoc->idata_supported == 1) &&
+		    (ch->chunk_type == SCTP_DATA)) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
+			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			return (2);
+		}
+		if ((asoc->idata_supported == 0) &&
+		    (ch->chunk_type == SCTP_IDATA)) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
+			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			return (2);
+		}
+		if ((ch->chunk_type == SCTP_DATA) ||
+		    (ch->chunk_type == SCTP_IDATA)) {
+			uint16_t clen;
+
+			if (ch->chunk_type == SCTP_DATA) {
+				clen = sizeof(struct sctp_data_chunk);
+			} else {
+				clen = sizeof(struct sctp_idata_chunk);
+			}
+			if (chk_length < clen) {
 				/*
 				 * Need to send an abort since we had a
 				 * invalid data chunk.
 				 */
 				struct mbuf *op_err;
 				char msg[SCTP_DIAG_INFO_LEN];
 
-				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
+				snprintf(msg, sizeof(msg), "%s chunk of length %u",
+				         ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
 				         chk_length);
 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
-				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
-				sctp_abort_association(inp, stcb, m, iphlen,
-				                       src, dst, sh, op_err,
-#if defined(__FreeBSD__)
-				                       mflowtype, mflowid,
-#endif
-				                       vrf_id, port);
-				return (2);
-			}
-			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
-				/*
-				 * Need to send an abort since we had an
-				 * empty data chunk.
-				 */
-				struct mbuf *op_err;
-
-				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
-				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
-				sctp_abort_association(inp, stcb, m, iphlen,
-				                       src, dst, sh, op_err,
-#if defined(__FreeBSD__)
-				                       mflowtype, mflowid,
-#endif
-				                       vrf_id, port);
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
+				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
 				return (2);
 			}
 #ifdef SCTP_AUDITING_ENABLED
 			sctp_audit_log(0xB1, 0);
 #endif
 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
 				last_chunk = 1;
 			} else {
 				last_chunk = 0;
 			}
-			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
+			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 
 						      chk_length, net, high_tsn, &abort_flag, &break_flag,
-						      last_chunk)) {
+						      last_chunk, ch->chunk_type)) {
 				num_chunks++;
 			}
 			if (abort_flag)
 				return (2);
 
 			if (break_flag) {
 				/*
 				 * Set because of out of rwnd space and no
 				 * drop rep space left.
 				 */
 				stop_proc = 1;
 				continue;
 			}
 		} else {
 			/* not a data chunk in the data region */
-			switch (ch->ch.chunk_type) {
+			switch (ch->chunk_type) {
 			case SCTP_INITIATION:
 			case SCTP_INITIATION_ACK:
 			case SCTP_SELECTIVE_ACK:
 			case SCTP_NR_SELECTIVE_ACK:
 			case SCTP_HEARTBEAT_REQUEST:
 			case SCTP_HEARTBEAT_ACK:
 			case SCTP_ABORT_ASSOCIATION:
 			case SCTP_SHUTDOWN:
@@ -2467,91 +2803,92 @@ sctp_process_data(struct mbuf **mm, int 
 			case SCTP_ECN_CWR:
 			case SCTP_SHUTDOWN_COMPLETE:
 			case SCTP_AUTHENTICATION:
 			case SCTP_ASCONF_ACK:
 			case SCTP_PACKET_DROPPED:
 			case SCTP_STREAM_RESET:
 			case SCTP_FORWARD_CUM_TSN:
 			case SCTP_ASCONF:
+			{
 				/*
 				 * Now, what do we do with KNOWN chunks that
 				 * are NOT in the right place?
 				 *
 				 * For now, I do nothing but ignore them. We
 				 * may later want to add sysctl stuff to
 				 * switch out and do either an ABORT() or
 				 * possibly process them.
 				 */
-				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
+				struct mbuf *op_err;
+				char msg[SCTP_DIAG_INFO_LEN];
+
+				snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
+				         ch->chunk_type);
+				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				return (2);
+			}
+			default:
+				/*
+				 * Unknown chunk type: use bit rules after
+				 * checking length
+				 */
+				if (chk_length < sizeof(struct sctp_chunkhdr)) {
+					/*
+					 * Need to send an abort since we had a
+					 * invalid chunk.
+					 */
 					struct mbuf *op_err;
-
-					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
-					sctp_abort_association(inp, stcb,
-					                       m, iphlen,
-					                       src, dst,
-					                       sh, op_err,
-#if defined(__FreeBSD__)
-					                       mflowtype, mflowid,
-#endif
-					                       vrf_id, port);
+					char msg[SCTP_DIAG_INFO_LEN];
+
+					snprintf(msg, sizeof(msg), "Chunk of length %u",
+						 chk_length);
+					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
+					sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
 					return (2);
 				}
-				break;
-			default:
-				/* unknown chunk type, use bit rules */
-				if (ch->ch.chunk_type & 0x40) {
+				if (ch->chunk_type & 0x40) {
 					/* Add a error report to the queue */
-					struct mbuf *merr;
-					struct sctp_paramhdr *phd;
-
-					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
-					if (merr) {
-						phd = mtod(merr, struct sctp_paramhdr *);
-						/*
-						 * We cheat and use param
-						 * type since we did not
-						 * bother to define a error
-						 * cause struct. They are
-						 * the same basic format
-						 * with different names.
-						 */
-						phd->param_type =
-							htons(SCTP_CAUSE_UNRECOG_CHUNK);
-						phd->param_length =
-							htons(chk_length + sizeof(*phd));
-						SCTP_BUF_LEN(merr) = sizeof(*phd);
-						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
-						if (SCTP_BUF_NEXT(merr)) {
-							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
-								sctp_m_freem(merr);
-							} else {
-								sctp_queue_op_err(stcb, merr);
-							}
+					struct mbuf *op_err;
+					struct sctp_gen_error_cause *cause;
+
+					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
+					                               0, M_NOWAIT, 1, MT_DATA);
+					if (op_err != NULL) {
+						cause  = mtod(op_err, struct sctp_gen_error_cause *);
+						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
+						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
+						if (SCTP_BUF_NEXT(op_err) != NULL) {
+							sctp_queue_op_err(stcb, op_err);
 						} else {
-							sctp_m_freem(merr);
+							sctp_m_freem(op_err);
 						}
 					}
 				}
-				if ((ch->ch.chunk_type & 0x80) == 0) {
+				if ((ch->chunk_type & 0x80) == 0) {
 					/* discard the rest of this packet */
 					stop_proc = 1;
 				}	/* else skip this bad chunk and
 					 * continue... */
 				break;
 			}	/* switch of chunk type */
 		}
 		*offset += SCTP_SIZE32(chk_length);
 		if ((*offset >= length) || stop_proc) {
 			/* no more data left in the mbuf chain */
 			stop_proc = 1;
 			continue;
 		}
-		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
-							     sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+		                                           sizeof(struct sctp_chunkhdr),
+		                                           (uint8_t *)&chunk_buf);
 		if (ch == NULL) {
 			*offset = length;
 			stop_proc = 1;
 			continue;
 		}
 	}
 	if (break_flag) {
 		/*
@@ -2571,19 +2908,16 @@ sctp_process_data(struct mbuf **mm, int 
 				       0,
 				       SCTP_FROM_SCTP_INDATA,
 				       __LINE__);
 		}
 		stcb->asoc.overall_error_count = 0;
 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
 	}
 	/* now service all of the reassm queue if needed */
-	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
-		sctp_service_queues(stcb, asoc);
-
 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
 		/* Assure that we ack right away */
 		stcb->asoc.send_sack = 1;
 	}
 	/* Start a sack timer or QUEUE a SACK for sending */
 	sctp_sack_check(stcb, was_a_gap);
 	return (0);
 }
@@ -2617,110 +2951,110 @@ sctp_process_segment_range(struct sctp_t
 			 * next expected pseudo-cumack, or
 			 * rtx_pseudo_cumack, if required. Separate
 			 * cumack trackers for first transmissions,
 			 * and retransmissions.
 			 */
 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
 			    (tp1->snd_count == 1)) {
-				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
+				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
 				tp1->whoTo->find_pseudo_cumack = 0;
 			}
 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
 			    (tp1->snd_count > 1)) {
-				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
+				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
 			}
-			if (tp1->rec.data.TSN_seq == theTSN) {
+			if (tp1->rec.data.tsn == theTSN) {
 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
 					/*-
 					 * must be held until
 					 * cum-ack passes
 					 */
 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
 						/*-
 						 * If it is less than RESEND, it is
 						 * now no-longer in flight.
 						 * Higher values may already be set
 						 * via previous Gap Ack Blocks...
 						 * i.e. ACKED or RESEND.
 						 */
-						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
 						                *biggest_newly_acked_tsn)) {
-							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
+							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
 						}
 						/*-
 						 * CMT: SFR algo (and HTNA) - set
 						 * saw_newack to 1 for dest being
 						 * newly acked. update
 						 * this_sack_highest_newack if
 						 * appropriate.
 						 */
 						if (tp1->rec.data.chunk_was_revoked == 0)
 							tp1->whoTo->saw_newack = 1;
 
-						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
 						                tp1->whoTo->this_sack_highest_newack)) {
 							tp1->whoTo->this_sack_highest_newack =
-								tp1->rec.data.TSN_seq;
+								tp1->rec.data.tsn;
 						}
 						/*-
 						 * CMT DAC algo: also update
 						 * this_sack_lowest_newack
 						 */
 						if (*this_sack_lowest_newack == 0) {
 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
 								sctp_log_sack(*this_sack_lowest_newack,
 									      last_tsn,
-									      tp1->rec.data.TSN_seq,
+									      tp1->rec.data.tsn,
 									      0,
 									      0,
 									      SCTP_LOG_TSN_ACKED);
 							}
-							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
+							*this_sack_lowest_newack = tp1->rec.data.tsn;
 						}
 						/*-
 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
 						 * Separate pseudo_cumack trackers for first transmissions and
 						 * retransmissions.
 						 */
-						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
+						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
 							if (tp1->rec.data.chunk_was_revoked == 0) {
 								tp1->whoTo->new_pseudo_cumack = 1;
 							}
 							tp1->whoTo->find_pseudo_cumack = 1;
 						}
 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
-							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
+							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
 						}
-						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
+						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
 							if (tp1->rec.data.chunk_was_revoked == 0) {
 								tp1->whoTo->new_pseudo_cumack = 1;
 							}
 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
 						}
 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
 							sctp_log_sack(*biggest_newly_acked_tsn,
 								      last_tsn,
-								      tp1->rec.data.TSN_seq,
+								      tp1->rec.data.tsn,
 								      frag_strt,
 								      frag_end,
 								      SCTP_LOG_TSN_ACKED);
 						}
 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
 								       tp1->whoTo->flight_size,
 								       tp1->book_size,
-								       (uintptr_t)tp1->whoTo,
-								       tp1->rec.data.TSN_seq);
+								       (uint32_t)(uintptr_t)tp1->whoTo,
+								       tp1->rec.data.tsn);
 						}
 						sctp_flight_size_decrease(tp1);
 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
 														     tp1);
 						}
 						sctp_total_flight_decrease(stcb, tp1);
 
@@ -2736,33 +3070,32 @@ sctp_process_segment_range(struct sctp_t
 							 */
 							if (tp1->do_rtt) {
 								if (*rto_ok) {
 									tp1->whoTo->RTO =
 										sctp_calculate_rto(stcb,
 												   &stcb->asoc,
 												   tp1->whoTo,
 												   &tp1->sent_rcv_time,
-												   sctp_align_safe_nocopy,
 												   SCTP_RTT_FROM_DATA);
 									*rto_ok = 0;
 								}
 								if (tp1->whoTo->rto_needed == 0) {
 									tp1->whoTo->rto_needed = 1;
 								}
 								tp1->do_rtt = 0;
 							}
 						}
 
 					}
 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
-						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
 						                stcb->asoc.this_sack_highest_gap)) {
 							stcb->asoc.this_sack_highest_gap =
-								tp1->rec.data.TSN_seq;
+								tp1->rec.data.tsn;
 						}
 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
 #ifdef SCTP_AUDITING_ENABLED
 							sctp_audit_log(0xB2,
 								       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
 #endif
 						}
@@ -2778,36 +3111,41 @@ sctp_process_segment_range(struct sctp_t
 					if (tp1->rec.data.chunk_was_revoked) {
 						/* deflate the cwnd */
 						tp1->whoTo->cwnd -= tp1->book_size;
 						tp1->rec.data.chunk_was_revoked = 0;
 					}
 					/* NR Sack code here */
 					if (nr_sacking &&
 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
-						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
-							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
 #ifdef INVARIANTS
 						} else {
-							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
 #endif
 						}
+						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
+							stcb->asoc.trigger_reset = 1;
+						}
 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
 						if (tp1->data) {
 							/* sa_ignore NO_NULL_CHK */
 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
 							sctp_m_freem(tp1->data);
 							tp1->data = NULL;
 						}
 						wake_him++;
 					}
 				}
 				break;
-			}	/* if (tp1->TSN_seq == theTSN) */
-			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
+			}	/* if (tp1->tsn == theTSN) */
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
 				break;
 			}
 			tp1 = TAILQ_NEXT(tp1, sctp_next);
 			if ((tp1 == NULL) && (circled == 0)) {
 				circled++;
 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
 			}
 		}	/* end while (tp1) */
@@ -2889,50 +3227,50 @@ sctp_handle_segments(struct mbuf *m, int
 static void
 sctp_check_for_revoked(struct sctp_tcb *stcb,
 		       struct sctp_association *asoc, uint32_t cumack,
 		       uint32_t biggest_tsn_acked)
 {
 	struct sctp_tmit_chunk *tp1;
 
 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
-		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
 			/*
 			 * ok this guy is either ACK or MARKED. If it is
 			 * ACKED it has been previously acked but not this
 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
 			 * again.
 			 */
-			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
 				break;
 			}
 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
 				/* it has been revoked */
 				tp1->sent = SCTP_DATAGRAM_SENT;
 				tp1->rec.data.chunk_was_revoked = 1;
 				/* We must add this stuff back in to
 				 * assure timers and such get started.
 				 */
 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
 						       tp1->whoTo->flight_size,
 						       tp1->book_size,
-						       (uintptr_t)tp1->whoTo,
-						       tp1->rec.data.TSN_seq);
+						       (uint32_t)(uintptr_t)tp1->whoTo,
+						       tp1->rec.data.tsn);
 				}
 				sctp_flight_size_increase(tp1);
 				sctp_total_flight_increase(stcb, tp1);
 				/* We inflate the cwnd to compensate for our
 				 * artificial inflation of the flight_size.
 				 */
 				tp1->whoTo->cwnd += tp1->book_size;
 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
 					sctp_log_sack(asoc->last_acked_seq,
 						      cumack,
-						      tp1->rec.data.TSN_seq,
+						      tp1->rec.data.tsn,
 						      0,
 						      0,
 						      SCTP_LOG_TSN_REVOKED);
 				}
 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
 				/* it has been re-acked in this SACK */
 				tp1->sent = SCTP_DATAGRAM_ACKED;
 			}
@@ -2958,17 +3296,17 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 	/*
 	 * select the sending_seq, this is either the next thing ready to be
 	 * sent but not transmitted, OR, the next seq we assign.
 	 */
 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
 	if (tp1 == NULL) {
 		sending_seq = asoc->sending_seq;
 	} else {
-		sending_seq = tp1->rec.data.TSN_seq;
+		sending_seq = tp1->rec.data.tsn;
 	}
 
 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
 	if ((asoc->sctp_cmt_on_off > 0) &&
 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
 			if (net->saw_newack)
 				num_dests_sacked++;
@@ -2981,21 +3319,21 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 		strike_flag = 0;
 		if (tp1->no_fr_allowed) {
 			/* this one had a timeout or something */
 			continue;
 		}
 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
 				sctp_log_fr(biggest_tsn_newly_acked,
-					    tp1->rec.data.TSN_seq,
+					    tp1->rec.data.tsn,
 					    tp1->sent,
 					    SCTP_FR_LOG_CHECK_STRIKE);
 		}
-		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
 			/* done */
 			break;
 		}
 		if (stcb->asoc.prsctp_supported) {
 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
 				/* Is it expired? */
 #ifndef __FreeBSD__
@@ -3008,17 +3346,17 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
 										 SCTP_SO_NOT_LOCKED);
 					}
 					continue;
 				}
 			}
 
 		}
-		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
 			/* we are beyond the tsn in the sack  */
 			break;
 		}
 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
 			/* either a RESEND, ACKED, or MARKED */
 			/* skip */
 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
 				/* Continue strikin FWD-TSN chunks */
@@ -3032,17 +3370,17 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
 			/*
 			 * No new acks were receieved for data sent to this
 			 * dest. Therefore, according to the SFR algo for
 			 * CMT, no data sent to this dest can be marked for
 			 * FR using this SACK.
 			 */
 			continue;
-		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
 		                                     tp1->whoTo->this_sack_highest_newack)) {
 			/*
 			 * CMT: New acks were receieved for data sent to
 			 * this dest. But no new acks were seen for data
 			 * sent after tp1. Therefore, according to the SFR
 			 * algo for CMT, tp1 cannot be marked for FR using
 			 * this SACK. This step covers part of the DAC algo
 			 * and the HTNA algo as well.
@@ -3061,17 +3399,17 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 		 */
 		if (accum_moved && asoc->fast_retran_loss_recovery) {
 			/*
 			 * Strike the TSN if in fast-recovery and cum-ack
 			 * moved.
 			 */
 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 				sctp_log_fr(biggest_tsn_newly_acked,
-					    tp1->rec.data.TSN_seq,
+					    tp1->rec.data.tsn,
 					    tp1->sent,
 					    SCTP_FR_LOG_STRIKE_CHUNK);
 			}
 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
 				tp1->sent++;
 			}
 			if ((asoc->sctp_cmt_on_off > 0) &&
 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
@@ -3082,20 +3420,20 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 				 * cumack earlier. If not already to be
 				 * rtx'd, If not a mixed sack and if tp1 is
 				 * not between two sacked TSNs, then mark by
 				 * one more.
 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
 				 * two packets have been received after this missing TSN.
 				 */
 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
-				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
+				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 						sctp_log_fr(16 + num_dests_sacked,
-							    tp1->rec.data.TSN_seq,
+							    tp1->rec.data.tsn,
 							    tp1->sent,
 							    SCTP_FR_LOG_STRIKE_CHUNK);
 					}
 					tp1->sent++;
 				}
 			}
 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
 		           (asoc->sctp_cmt_on_off == 0)) {
@@ -3123,17 +3461,17 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 				                tp1->rec.data.fast_retran_tsn)) {
 					/*
 					 * Strike the TSN, since this ack is
 					 * beyond where things were when we
 					 * did a FR.
 					 */
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 						sctp_log_fr(biggest_tsn_newly_acked,
-							    tp1->rec.data.TSN_seq,
+							    tp1->rec.data.tsn,
 							    tp1->sent,
 							    SCTP_FR_LOG_STRIKE_CHUNK);
 					}
 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
 						tp1->sent++;
 					}
 					strike_flag = 1;
 					if ((asoc->sctp_cmt_on_off > 0) &&
@@ -3151,47 +3489,47 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 						 * sacked TSNs, then mark by
 						 * one more.
 						 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
 						 * two packets have been received after this missing TSN.
 						 */
 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
 						    (num_dests_sacked == 1) &&
 						    SCTP_TSN_GT(this_sack_lowest_newack,
-						                tp1->rec.data.TSN_seq)) {
+						                tp1->rec.data.tsn)) {
 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 								sctp_log_fr(32 + num_dests_sacked,
-									    tp1->rec.data.TSN_seq,
+									    tp1->rec.data.tsn,
 									    tp1->sent,
 									    SCTP_FR_LOG_STRIKE_CHUNK);
 							}
 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
 								tp1->sent++;
 							}
 						}
 					}
 				}
 			}
 			/*
 			 * JRI: TODO: remove code for HTNA algo. CMT's
 			 * SFR algo covers HTNA.
 			 */
-		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
+		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
 		                       biggest_tsn_newly_acked)) {
 			/*
 			 * We don't strike these: This is the  HTNA
 			 * algorithm i.e. we don't strike If our TSN is
 			 * larger than the Highest TSN Newly Acked.
 			 */
 			;
 		} else {
 			/* Strike the TSN */
 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 				sctp_log_fr(biggest_tsn_newly_acked,
-					    tp1->rec.data.TSN_seq,
+					    tp1->rec.data.tsn,
 					    tp1->sent,
 					    SCTP_FR_LOG_STRIKE_CHUNK);
 			}
 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
 				tp1->sent++;
 			}
 			if ((asoc->sctp_cmt_on_off > 0) &&
 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
@@ -3202,37 +3540,37 @@ sctp_strike_gap_ack_chunks(struct sctp_t
 				 * cumack earlier. If not already to be
 				 * rtx'd, If not a mixed sack and if tp1 is
 				 * not between two sacked TSNs, then mark by
 				 * one more.
 				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
 				 * two packets have been received after this missing TSN.
 				 */
 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
-				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
+				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
 						sctp_log_fr(48 + num_dests_sacked,
-							    tp1->rec.data.TSN_seq,
+							    tp1->rec.data.tsn,
 							    tp1->sent,
 							    SCTP_FR_LOG_STRIKE_CHUNK);
 					}
 					tp1-&