Bug 1204043 - remove mServeMultipleEventsPerPollIter pref. r=mcmanus
authorDragana Damjanovic <dd.mozilla@gmail.com>
Tue, 15 Sep 2015 05:10:00 +0200
changeset 295205 689f55b551e6a6473a64e4cc806b7e57c85e0854
parent 295204 2c316ea16e94fe84eeb6d48f1c21e41383e02284
child 295206 05202b078d10b15e05b6f1c7151446ac0ca10b02
push id5245
push userraliiev@mozilla.com
push dateThu, 29 Oct 2015 11:30:51 +0000
treeherdermozilla-beta@dac831dc1bd0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmcmanus
bugs1204043
milestone43.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1204043 - remove mServeMultipleEventsPerPollIter pref. r=mcmanus
modules/libpref/init/all.js
netwerk/base/nsSocketTransportService2.cpp
netwerk/base/nsSocketTransportService2.h
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -1452,20 +1452,16 @@ pref("network.http.packaged-apps-develop
 
 // default values for FTP
 // in a DSCP environment this should be 40 (0x28, or AF11), per RFC-4594,
 // Section 4.8 "High-Throughput Data Service Class", and 80 (0x50, or AF22)
 // per Section 4.7 "Low-Latency Data Service Class".
 pref("network.ftp.data.qos", 0);
 pref("network.ftp.control.qos", 0);
 
-// If this pref is false only one xpcom event will be served per poll
-// iteration. This is the original behavior.
-// If it is true multiple events will be served.
-pref("network.sts.serve_multiple_events_per_poll_iteration", true);
 // The max time to spend on xpcom events between two polls in ms.
 pref("network.sts.max_time_for_events_between_two_polls", 100);
 // </http>
 
 // 2147483647 == PR_INT32_MAX == ~2 GB
 pref("network.websocket.max-message-size", 2147483647);
 
 // Should we automatically follow http 3xx redirects during handshake
--- a/netwerk/base/nsSocketTransportService2.cpp
+++ b/netwerk/base/nsSocketTransportService2.cpp
@@ -39,17 +39,16 @@ PRThread                 *gSocketThread 
 #define SEND_BUFFER_PREF "network.tcp.sendbuffer"
 #define KEEPALIVE_ENABLED_PREF "network.tcp.keepalive.enabled"
 #define KEEPALIVE_IDLE_TIME_PREF "network.tcp.keepalive.idle_time"
 #define KEEPALIVE_RETRY_INTERVAL_PREF "network.tcp.keepalive.retry_interval"
 #define KEEPALIVE_PROBE_COUNT_PREF "network.tcp.keepalive.probe_count"
 #define SOCKET_LIMIT_TARGET 550U
 #define SOCKET_LIMIT_MIN     50U
 #define BLIP_INTERVAL_PREF "network.activity.blipIntervalMilliseconds"
-#define SERVE_MULTIPLE_EVENTS_PREF "network.sts.serve_multiple_events_per_poll_iteration"
 #define MAX_TIME_BETWEEN_TWO_POLLS "network.sts.max_time_for_events_between_two_polls"
 #define TELEMETRY_PREF "toolkit.telemetry.enabled"
 
 uint32_t nsSocketTransportService::gMaxCount;
 PRCallOnceType nsSocketTransportService::gMaxCountInitOnce;
 
 class DebugMutexAutoLock
 {
@@ -101,17 +100,16 @@ nsSocketTransportService::nsSocketTransp
     , mIdleCount(0)
     , mSentBytesCount(0)
     , mReceivedBytesCount(0)
     , mSendBufferSize(0)
     , mKeepaliveIdleTimeS(600)
     , mKeepaliveRetryIntervalS(1)
     , mKeepaliveProbeCount(kDefaultTCPKeepCount)
     , mKeepaliveEnabledPref(false)
-    , mServeMultipleEventsPerPollIter(true)
     , mServingPendingQueue(false)
     , mMaxTimePerPollIter(100)
     , mTelemetryEnabledPref(false)
     , mProbedMaxCount(false)
 {
     gSocketTransportLog = PR_NewLogModule("nsSocketTransport");
     gUDPSocketLog = PR_NewLogModule("UDPSocket");
 
@@ -538,17 +536,16 @@ nsSocketTransportService::Init()
 
     nsCOMPtr<nsIPrefBranch> tmpPrefService = do_GetService(NS_PREFSERVICE_CONTRACTID);
     if (tmpPrefService) {
         tmpPrefService->AddObserver(SEND_BUFFER_PREF, this, false);
         tmpPrefService->AddObserver(KEEPALIVE_ENABLED_PREF, this, false);
         tmpPrefService->AddObserver(KEEPALIVE_IDLE_TIME_PREF, this, false);
         tmpPrefService->AddObserver(KEEPALIVE_RETRY_INTERVAL_PREF, this, false);
         tmpPrefService->AddObserver(KEEPALIVE_PROBE_COUNT_PREF, this, false);
-        tmpPrefService->AddObserver(SERVE_MULTIPLE_EVENTS_PREF, this, false);
         tmpPrefService->AddObserver(MAX_TIME_BETWEEN_TWO_POLLS, this, false);
         tmpPrefService->AddObserver(TELEMETRY_PREF, this, false);
     }
     UpdatePrefs();
 
     nsCOMPtr<nsIObserverService> obsSvc = services::GetObserverService();
     if (obsSvc) {
         obsSvc->AddObserver(this, "profile-initial-state", false);
@@ -880,67 +877,61 @@ nsSocketTransportService::Run()
             }
 
             // If nothing was pending before the poll, it might be now
             if (!pendingEvents) {
                 thread->HasPendingEvents(&pendingEvents);
             }
 
             if (pendingEvents) {
-                if (mServeMultipleEventsPerPollIter) {
-                    if (!mServingPendingQueue) {
-                        nsresult rv = Dispatch(NS_NewRunnableMethod(this,
-                            &nsSocketTransportService::MarkTheLastElementOfPendingQueue),
-                            nsIEventTarget::DISPATCH_NORMAL);
-                        if (NS_FAILED(rv)) {
-                            NS_WARNING("Could not dispatch a new event on the "
-                                       "socket thread.");
-                        } else {
-                            mServingPendingQueue = true;
-                        }
-
-                        if (mTelemetryEnabledPref) {
-                            startOfIteration = startOfNextIteration;
-                            // Everything that comes after this point will
-                            // be served in the next iteration. If no even
-                            // arrives, startOfNextIteration will be reset at the
-                            // beginning of each for-loop.
-                            startOfNextIteration = TimeStamp::NowLoRes();
-                        }
+                if (!mServingPendingQueue) {
+                    nsresult rv = Dispatch(NS_NewRunnableMethod(this,
+                        &nsSocketTransportService::MarkTheLastElementOfPendingQueue),
+                        nsIEventTarget::DISPATCH_NORMAL);
+                    if (NS_FAILED(rv)) {
+                        NS_WARNING("Could not dispatch a new event on the "
+                                   "socket thread.");
+                    } else {
+                        mServingPendingQueue = true;
                     }
-                    TimeStamp eventQueueStart = TimeStamp::NowLoRes();
-                    do {
-                        NS_ProcessNextEvent(thread);
-                        numberOfPendingEvents++;
-                        pendingEvents = false;
-                        thread->HasPendingEvents(&pendingEvents);
-                    } while (pendingEvents && mServingPendingQueue &&
-                             ((TimeStamp::NowLoRes() -
-                               eventQueueStart).ToMilliseconds() <
-                              mMaxTimePerPollIter));
 
-                    if (mTelemetryEnabledPref && !mServingPendingQueue &&
-                        !startOfIteration.IsNull()) {
-                        Telemetry::AccumulateTimeDelta(
-                            Telemetry::STS_POLL_AND_EVENTS_CYCLE,
-                            startOfIteration + pollDuration,
-                            TimeStamp::NowLoRes());
-
-                        Telemetry::Accumulate(
-                            Telemetry::STS_NUMBER_OF_PENDING_EVENTS,
-                            numberOfPendingEvents);
-
-                        numberOfPendingEventsLastCycle += numberOfPendingEvents;
-                        numberOfPendingEvents = 0;
-                        pollDuration = 0;
+                    if (mTelemetryEnabledPref) {
+                        startOfIteration = startOfNextIteration;
+                        // Everything that comes after this point will
+                        // be served in the next iteration. If no even
+                        // arrives, startOfNextIteration will be reset at the
+                        // beginning of each for-loop.
+                        startOfNextIteration = TimeStamp::NowLoRes();
                     }
-                } else {
+                }
+                TimeStamp eventQueueStart = TimeStamp::NowLoRes();
+                do {
                     NS_ProcessNextEvent(thread);
+                    numberOfPendingEvents++;
                     pendingEvents = false;
                     thread->HasPendingEvents(&pendingEvents);
+                } while (pendingEvents && mServingPendingQueue &&
+                         ((TimeStamp::NowLoRes() -
+                           eventQueueStart).ToMilliseconds() <
+                          mMaxTimePerPollIter));
+
+                if (mTelemetryEnabledPref && !mServingPendingQueue &&
+                    !startOfIteration.IsNull()) {
+                    Telemetry::AccumulateTimeDelta(
+                        Telemetry::STS_POLL_AND_EVENTS_CYCLE,
+                        startOfIteration + pollDuration,
+                        TimeStamp::NowLoRes());
+
+                    Telemetry::Accumulate(
+                        Telemetry::STS_NUMBER_OF_PENDING_EVENTS,
+                        numberOfPendingEvents);
+
+                    numberOfPendingEventsLastCycle += numberOfPendingEvents;
+                    numberOfPendingEvents = 0;
+                    pollDuration = 0;
                 }
             }
         } while (pendingEvents);
 
         bool goingOffline = false;
         // now that our event queue is empty, check to see if we should exit
         {
             DebugMutexAutoLock lock(mLock);
@@ -1201,23 +1192,16 @@ nsSocketTransportService::UpdatePrefs()
         bool keepaliveEnabled = false;
         rv = tmpPrefService->GetBoolPref(KEEPALIVE_ENABLED_PREF,
                                          &keepaliveEnabled);
         if (NS_SUCCEEDED(rv) && keepaliveEnabled != mKeepaliveEnabledPref) {
             mKeepaliveEnabledPref = keepaliveEnabled;
             OnKeepaliveEnabledPrefChange();
         }
 
-        bool serveMultiplePref = false;
-        rv = tmpPrefService->GetBoolPref(SERVE_MULTIPLE_EVENTS_PREF,
-                                         &serveMultiplePref);
-        if (NS_SUCCEEDED(rv)) {
-            mServeMultipleEventsPerPollIter = serveMultiplePref;
-        }
-
         int32_t maxTimePref;
         rv = tmpPrefService->GetIntPref(MAX_TIME_BETWEEN_TWO_POLLS,
                                         &maxTimePref);
         if (NS_SUCCEEDED(rv) && maxTimePref >= 0) {
             mMaxTimePerPollIter = maxTimePref;
         }
 
         bool telemetryPref = false;
--- a/netwerk/base/nsSocketTransportService2.h
+++ b/netwerk/base/nsSocketTransportService2.h
@@ -231,17 +231,16 @@ private:
     int32_t     mKeepaliveIdleTimeS;
     // Number of seconds between retries should keepalive pings fail.
     int32_t     mKeepaliveRetryIntervalS;
     // Number of keepalive probes to send.
     int32_t     mKeepaliveProbeCount;
     // True if TCP keepalive is enabled globally.
     bool        mKeepaliveEnabledPref;
 
-    bool                   mServeMultipleEventsPerPollIter;
     mozilla::Atomic<bool>  mServingPendingQueue;
     int32_t                mMaxTimePerPollIter;
     mozilla::Atomic<bool, mozilla::Relaxed>  mTelemetryEnabledPref;
 
     void OnKeepaliveEnabledPrefChange();
     void NotifyKeepaliveEnabledPrefChange(SocketContext *sock);
 
     // Socket thread only for dynamically adjusting max socket size