author | Carsten "Tomcat" Book <cbook@mozilla.com> |
Wed, 14 Sep 2016 12:07:02 +0200 | |
changeset 313876 | 501e27643a529ce2844924cd47f01ef3150fa0ba |
parent 313875 | de96dcebba86cf25c6184fc6a0815a225ae737e9 (current diff) |
parent 313814 | e5cc560f4a47bdd0f30556356bd34f9f31915f5a (diff) |
child 313877 | b9c4a0402a0a90ff4ef6223fcd2fa92422f8bf44 |
child 313894 | da51e6314ee51300286875170b26224c39ba2ff1 |
child 313935 | 39ebab543e1f86e6c8d80cbcf5da7dbdeda901c0 |
push id | 32264 |
push user | cbook@mozilla.com |
push date | Wed, 14 Sep 2016 10:18:20 +0000 |
treeherder | autoland@b9c4a0402a0a [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | merge |
milestone | 51.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/CLOBBER +++ b/CLOBBER @@ -17,9 +17,9 @@ # # Modifying this file will now automatically clobber the buildbot machines \o/ # # Are you updating CLOBBER because you think it's needed for your WebIDL # changes to stick? As of bug 928195, this shouldn't be necessary! Please # don't change CLOBBER for WebIDL changes any more. -Bug 1288460 requires another clobber due to bug 1298779. +Bug 1302429 to fix also bustage.
--- a/browser/app/profile/firefox.js +++ b/browser/app/profile/firefox.js @@ -1502,8 +1502,19 @@ pref("extensions.pocket.enabled", true); pref("signon.schemeUpgrades", true); // Enable the "Simplify Page" feature in Print Preview pref("print.use_simplify_page", true); // Space separated list of URLS that are allowed to send objects (instead of // only strings) through webchannels. This list is duplicated in mobile/android/app/mobile.js pref("webchannel.allowObject.urlWhitelist", "https://accounts.firefox.com https://content.cdn.mozilla.net https://input.mozilla.org https://support.mozilla.org https://install.mozilla.org"); + +// Whether or not the browser should scan for unsubmitted +// crash reports, and then show a notification for submitting +// those reports. +#ifdef RELEASE_BUILD +pref("browser.crashReports.unsubmittedCheck.enabled", false); +#else +pref("browser.crashReports.unsubmittedCheck.enabled", true); +#endif + +pref("browser.crashReports.unsubmittedCheck.autoSubmit", false); \ No newline at end of file
--- a/browser/base/content/browser.css +++ b/browser/base/content/browser.css @@ -772,23 +772,16 @@ html|*#fullscreen-exit-button { #addon-progress-notification { -moz-binding: url("chrome://browser/content/urlbarBindings.xml#addon-progress-notification"); } #click-to-play-plugins-notification { -moz-binding: url("chrome://browser/content/urlbarBindings.xml#click-to-play-plugins-notification"); } -#login-fill-notification { - -moz-binding: url("chrome://browser/content/urlbarBindings.xml#login-fill-notification"); -} - -.login-fill-item { - -moz-binding: url("chrome://passwordmgr/content/login.xml#login"); -} .plugin-popupnotification-centeritem { -moz-binding: url("chrome://browser/content/urlbarBindings.xml#plugin-popupnotification-center-item"); } browser[tabmodalPromptShowing] { -moz-user-focus: none !important; } @@ -1180,20 +1173,16 @@ toolbarpaletteitem[place="palette"][hidd } /* Combined context-menu items */ #context-navigation > .menuitem-iconic > .menu-iconic-text, #context-navigation > .menuitem-iconic > .menu-accel-container { display: none; } -#login-fill-doorhanger:not([inDetailView]) > #login-fill-clickcapturer { - pointer-events: none; -} - .popup-notification-invalid-input { box-shadow: 0 0 1.5px 1px red; } .popup-notification-invalid-input[focused] { box-shadow: 0 0 2px 2px rgba(255,0,0,0.4); }
--- a/browser/base/content/browser.xul +++ b/browser/base/content/browser.xul @@ -713,18 +713,16 @@ <image id="default-notification-icon" class="notification-anchor-icon" role="button" tooltiptext="&urlbar.defaultNotificationAnchor.tooltip;"/> <image id="geo-notification-icon" class="notification-anchor-icon geo-icon" role="button" tooltiptext="&urlbar.geolocationNotificationAnchor.tooltip;"/> <image id="addons-notification-icon" class="notification-anchor-icon install-icon" role="button" tooltiptext="&urlbar.addonsNotificationAnchor.tooltip;"/> <image id="indexedDB-notification-icon" class="notification-anchor-icon indexedDB-icon" role="button" tooltiptext="&urlbar.indexedDBNotificationAnchor.tooltip;"/> - <image id="login-fill-notification-icon" class="notification-anchor-icon login-icon" role="button" - tooltiptext="&urlbar.loginFillNotificationAnchor.tooltip;"/> <image id="password-notification-icon" class="notification-anchor-icon login-icon" role="button" tooltiptext="&urlbar.passwordNotificationAnchor.tooltip;"/> <image id="plugins-notification-icon" class="notification-anchor-icon plugin-icon" role="button" tooltiptext="&urlbar.pluginsNotificationAnchor.tooltip;"/> <image id="web-notifications-notification-icon" class="notification-anchor-icon desktop-notification-icon" role="button" tooltiptext="&urlbar.webNotificationAnchor.tooltip;"/> <image id="webRTC-shareDevices-notification-icon" class="notification-anchor-icon camera-icon" role="button" tooltiptext="&urlbar.webRTCShareDevicesNotificationAnchor.tooltip;"/>
--- a/browser/base/content/popup-notifications.inc +++ b/browser/base/content/popup-notifications.inc @@ -47,32 +47,16 @@ <popupnotification id="password-notification" hidden="true"> <popupnotificationcontent orient="vertical"> <textbox id="password-notification-username"/> <textbox id="password-notification-password" type="password" show-content=""/> <checkbox id="password-notification-visibilityToggle" hidden="true"/> </popupnotificationcontent> </popupnotification> - <stack id="login-fill-doorhanger" hidden="true"> - <vbox id="login-fill-mainview"> - <description id="login-fill-testing" - value="Thanks for testing the login fill doorhanger!"/> - <textbox id="login-fill-filter"/> - <richlistbox id="login-fill-list"/> - </vbox> - <vbox id="login-fill-clickcapturer"/> - <vbox id="login-fill-details"> - <textbox id="login-fill-username" readonly="true"/> - <textbox id="login-fill-password" type="password" disabled="true"/> - <hbox> - <button id="login-fill-use" label="Use in form"/> - </hbox> - </vbox> - </stack> <popupnotification id="addon-progress-notification" hidden="true"> <popupnotificationcontent orient="vertical"> <progressmeter id="addon-progress-notification-progressmeter"/> <label id="addon-progress-notification-progresstext" crop="end"/> </popupnotificationcontent> <button id="addon-progress-cancel" oncommand="this.parentNode.cancel();"/>
--- a/browser/base/content/urlbarBindings.xml +++ b/browser/base/content/urlbarBindings.xml @@ -2473,26 +2473,16 @@ file, You can obtain one at http://mozil </implementation> <handlers> <!-- The _accept method checks for .defaultPrevented so that if focus is in a button, enter activates the button and not this default action --> <handler event="keypress" keycode="VK_RETURN" group="system" action="this._accept(event);"/> </handlers> </binding> - <!-- This is the XBL notification definition for the login fill doorhanger, - which is empty because the actual panel is not implemented inside an XBL - binding, but made of elements added to the notification panel. This - allows accessing the full structure while the panel is hidden. --> - <binding id="login-fill-notification" extends="chrome://global/content/bindings/notification.xml#popup-notification"> - <content> - <children/> - </content> - </binding> - <binding id="splitmenu"> <content> <xul:hbox anonid="menuitem" flex="1" class="splitmenu-menuitem" xbl:inherits="iconic,label,disabled,onclick=oncommand,_moz-menuactive=active"/> <xul:menu anonid="menu" class="splitmenu-menu" xbl:inherits="disabled,_moz-menuactive=active" oncommand="event.stopPropagation();">
--- a/browser/components/nsBrowserGlue.js +++ b/browser/components/nsBrowserGlue.js @@ -66,16 +66,18 @@ XPCOMUtils.defineLazyServiceGetter(this, ["WebChannel", "resource://gre/modules/WebChannel.jsm"], ["WindowsRegistry", "resource://gre/modules/WindowsRegistry.jsm"], ["webrtcUI", "resource:///modules/webrtcUI.jsm"], ].forEach(([name, resource]) => XPCOMUtils.defineLazyModuleGetter(this, name, resource)); if (AppConstants.MOZ_CRASHREPORTER) { XPCOMUtils.defineLazyModuleGetter(this, "PluginCrashReporter", "resource:///modules/ContentCrashHandlers.jsm"); + XPCOMUtils.defineLazyModuleGetter(this, "UnsubmittedCrashHandler", + "resource:///modules/ContentCrashHandlers.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "CrashSubmit", "resource://gre/modules/CrashSubmit.jsm"); } XPCOMUtils.defineLazyGetter(this, "gBrandBundle", function() { return Services.strings.createBundle('chrome://branding/locale/brand.properties'); }); @@ -709,16 +711,17 @@ BrowserGlue.prototype = { iconURL: "resource:///chrome/browser/content/browser/defaultthemes/devedition.icon.png", author: vendorShortName, }); } TabCrashHandler.init(); if (AppConstants.MOZ_CRASHREPORTER) { PluginCrashReporter.init(); + UnsubmittedCrashHandler.init(); } Services.obs.notifyObservers(null, "browser-ui-startup-complete", ""); }, _checkForOldBuildUpdates: function () { // check for update if our build is old if (AppConstants.MOZ_UPDATER && @@ -739,74 +742,16 @@ BrowserGlue.prototype = { let acceptableAge = Services.prefs.getIntPref("app.update.checkInstallTime.days") * millisecondsIn24Hours; if (buildDate + acceptableAge < today) { Cc["@mozilla.org/updates/update-service;1"].getService(Ci.nsIApplicationUpdateService).checkForBackgroundUpdates(); } } }, - checkForPendingCrashReports: function() { - // We don't process crash reports older than 28 days, so don't bother submitting them - const PENDING_CRASH_REPORT_DAYS = 28; - if (AppConstants.MOZ_CRASHREPORTER) { - let dateLimit = new Date(); - dateLimit.setDate(dateLimit.getDate() - PENDING_CRASH_REPORT_DAYS); - CrashSubmit.pendingIDsAsync(dateLimit).then( - function onSuccess(ids) { - let count = ids.length; - if (count) { - let win = RecentWindow.getMostRecentBrowserWindow(); - if (!win) { - return; - } - let nb = win.document.getElementById("global-notificationbox"); - let notification = nb.getNotificationWithValue("pending-crash-reports"); - if (notification) { - return; - } - let buttons = [ - { - label: win.gNavigatorBundle.getString("pendingCrashReports.submitAll"), - callback: function() { - ids.forEach(function(id) { - CrashSubmit.submit(id, {extraExtraKeyVals: {"SubmittedFromInfobar": true}}); - }); - } - }, - { - label: win.gNavigatorBundle.getString("pendingCrashReports.ignoreAll"), - callback: function() { - ids.forEach(function(id) { - CrashSubmit.ignore(id); - }); - } - }, - { - label: win.gNavigatorBundle.getString("pendingCrashReports.viewAll"), - callback: function() { - win.openUILinkIn("about:crashes", "tab"); - return true; - } - } - ]; - nb.appendNotification(PluralForm.get(count, - win.gNavigatorBundle.getString("pendingCrashReports.label")).replace("#1", count), - "pending-crash-reports", - "chrome://browser/skin/tab-crashed.svg", - nb.PRIORITY_INFO_HIGH, buttons); - } - }, - function onError(err) { - Cu.reportError(err); - } - ); - } - }, - _onSafeModeRestart: function BG_onSafeModeRestart() { // prompt the user to confirm let strings = gBrowserBundle; let promptTitle = strings.GetStringFromName("safeModeRestartPromptTitle"); let promptMessage = strings.GetStringFromName("safeModeRestartPromptMessage"); let restartText = strings.GetStringFromName("safeModeRestartButton"); let buttonFlags = (Services.prompt.BUTTON_POS_0 * Services.prompt.BUTTON_TITLE_IS_STRING) + @@ -1065,20 +1010,16 @@ BrowserGlue.prototype = { if (removalSuccessful && uninstalledValue == "True") { this._resetProfileNotification("uninstall"); } } } this._checkForOldBuildUpdates(); - if (!AppConstants.RELEASE_BUILD) { - this.checkForPendingCrashReports(); - } - CaptivePortalWatcher.init(); AutoCompletePopup.init(); this._firstWindowTelemetry(aWindow); this._firstWindowLoaded(); },
--- a/browser/components/preferences/in-content/advanced.js +++ b/browser/components/preferences/in-content/advanced.js @@ -55,20 +55,17 @@ var gAdvancedPane = { setEventListener("layers.acceleration.disabled", "change", gAdvancedPane.updateHardwareAcceleration); setEventListener("advancedPrefs", "select", gAdvancedPane.tabSelectionChanged); if (AppConstants.MOZ_TELEMETRY_REPORTING) { setEventListener("submitHealthReportBox", "command", gAdvancedPane.updateSubmitHealthReport); } - if (AppConstants.MOZ_CRASHREPORTER) { - setEventListener("submitCrashesBox", "command", - gAdvancedPane.updateSubmitCrashes); - } + setEventListener("connectionSettings", "command", gAdvancedPane.showConnections); setEventListener("clearCacheButton", "command", gAdvancedPane.clearCache); setEventListener("clearOfflineAppCacheButton", "command", gAdvancedPane.clearOfflineAppCache); setEventListener("offlineNotifyExceptions", "command", gAdvancedPane.showOfflineExceptions); @@ -238,38 +235,16 @@ var gAdvancedPane = { /** * */ initSubmitCrashes: function () { this._setupLearnMoreLink("toolkit.crashreporter.infoURL", "crashReporterLearnMore"); - - var checkbox = document.getElementById("submitCrashesBox"); - try { - var cr = Components.classes["@mozilla.org/toolkit/crash-reporter;1"]. - getService(Components.interfaces.nsICrashReporter); - checkbox.checked = cr.submitReports; - } catch (e) { - checkbox.style.display = "none"; - } - }, - - /** - * - */ - updateSubmitCrashes: function () - { - var checkbox = document.getElementById("submitCrashesBox"); - try { - var cr = Components.classes["@mozilla.org/toolkit/crash-reporter;1"]. - getService(Components.interfaces.nsICrashReporter); - cr.submitReports = checkbox.checked; - } catch (e) { } }, /** * The preference/checkbox is configured in XUL. * * In all cases, set up the Learn More link sanely. */ initTelemetry: function ()
--- a/browser/components/preferences/in-content/advanced.xul +++ b/browser/components/preferences/in-content/advanced.xul @@ -49,16 +49,23 @@ type="int"/> #ifdef MOZ_TELEMETRY_REPORTING <preference id="toolkit.telemetry.enabled" name="toolkit.telemetry.enabled" type="bool"/> #endif + <!-- Data Choices tab --> +#ifdef MOZ_CRASHREPORTER + <preference id="browser.crashReports.unsubmittedCheck.autoSubmit" + name="browser.crashReports.unsubmittedCheck.autoSubmit" + type="bool"/> +#endif + <!-- Network tab --> <preference id="browser.cache.disk.capacity" name="browser.cache.disk.capacity" type="int"/> <preference id="browser.offline-apps.notify" name="browser.offline-apps.notify" type="bool"/> @@ -224,21 +231,23 @@ </groupbox> </hbox> </vbox> </groupbox> #endif #ifdef MOZ_CRASHREPORTER <groupbox> <caption> - <checkbox id="submitCrashesBox" label="&enableCrashReporter.label;" - accesskey="&enableCrashReporter.accesskey;"/> + <checkbox id="automaticallySubmitCrashesBox" + preference="browser.crashReports.unsubmittedCheck.autoSubmit" + label="&alwaysSubmitCrashReports.label;" + accesskey="&alwaysSubmitCrashReports.accesskey;"/> </caption> <hbox class="indent"> - <label flex="1">&crashReporterDesc.label;</label> + <label flex="1">&crashReporterDesc2.label;</label> <spacer flex="10"/> <label id="crashReporterLearnMore" class="text-link">&crashReporterLearnMore.label;</label> </hbox> </groupbox> #endif </tabpanel> #endif
--- a/browser/config/tooltool-manifests/win32/releng.manifest +++ b/browser/config/tooltool-manifests/win32/releng.manifest @@ -24,16 +24,16 @@ { "size": 167175, "digest": "0b71a936edf5bd70cf274aaa5d7abc8f77fe8e7b5593a208f805cc9436fac646b9c4f0b43c2b10de63ff3da671497d35536077ecbc72dba7f8159a38b580f831", "algorithm": "sha512", "filename": "sccache.tar.bz2", "unpack": true }, { -"version": "Visual Studio 2015 Update 2 / SDK 10.0.10586.0/212", -"size": 332442800, -"digest": "995394a4a515c7cb0f8595f26f5395361a638870dd0bbfcc22193fe1d98a0c47126057d5999cc494f3f3eac5cb49160e79757c468f83ee5797298e286ef6252c", +"version": "Visual Studio 2015 Update 3 14.0.25425.01 / SDK 10.0.14393.0", +"size": 326656969, +"digest": "babc414ffc0457d27f5a1ed24a8e4873afbe2f1c1a4075469a27c005e1babc3b2a788f643f825efedff95b79686664c67ec4340ed535487168a3482e68559bc7", "algorithm": "sha512", -"filename": "vs2015u2.zip", +"filename": "vs2015u3.zip", "unpack": true } ]
--- a/browser/config/tooltool-manifests/win64/releng.manifest +++ b/browser/config/tooltool-manifests/win64/releng.manifest @@ -25,16 +25,16 @@ { "size": 167175, "digest": "0b71a936edf5bd70cf274aaa5d7abc8f77fe8e7b5593a208f805cc9436fac646b9c4f0b43c2b10de63ff3da671497d35536077ecbc72dba7f8159a38b580f831", "algorithm": "sha512", "filename": "sccache.tar.bz2", "unpack": true }, { -"version": "Visual Studio 2015 Update 2 / SDK 10.0.10586.0/212", -"size": 332442800, -"digest": "995394a4a515c7cb0f8595f26f5395361a638870dd0bbfcc22193fe1d98a0c47126057d5999cc494f3f3eac5cb49160e79757c468f83ee5797298e286ef6252c", +"version": "Visual Studio 2015 Update 3 14.0.25425.01 / SDK 10.0.14393.0", +"size": 326656969, +"digest": "babc414ffc0457d27f5a1ed24a8e4873afbe2f1c1a4075469a27c005e1babc3b2a788f643f825efedff95b79686664c67ec4340ed535487168a3482e68559bc7", "algorithm": "sha512", -"filename": "vs2015u2.zip", +"filename": "vs2015u3.zip", "unpack": true } ]
--- a/browser/locales/en-US/chrome/browser/browser.dtd +++ b/browser/locales/en-US/chrome/browser/browser.dtd @@ -206,17 +206,16 @@ These should match what Safari and other <!ENTITY printButton.tooltip "Print this page"> <!ENTITY urlbar.viewSiteInfo.label "View site information"> <!ENTITY urlbar.defaultNotificationAnchor.tooltip "Open message panel"> <!ENTITY urlbar.geolocationNotificationAnchor.tooltip "Open location request panel"> <!ENTITY urlbar.addonsNotificationAnchor.tooltip "Open add-on installation message panel"> <!ENTITY urlbar.indexedDBNotificationAnchor.tooltip "Open offline storage message panel"> -<!ENTITY urlbar.loginFillNotificationAnchor.tooltip "Manage your login information"> <!ENTITY urlbar.passwordNotificationAnchor.tooltip "Open save password message panel"> <!ENTITY urlbar.pluginsNotificationAnchor.tooltip "Manage plug-in use"> <!ENTITY urlbar.webNotificationAnchor.tooltip "Change whether you can receive notifications from the site"> <!ENTITY urlbar.webRTCShareDevicesNotificationAnchor.tooltip "Manage sharing your camera and/or microphone with the site"> <!ENTITY urlbar.webRTCShareMicrophoneNotificationAnchor.tooltip "Manage sharing your microphone with the site"> <!ENTITY urlbar.webRTCShareScreenNotificationAnchor.tooltip "Manage sharing your windows or screen with the site">
--- a/browser/locales/en-US/chrome/browser/browser.properties +++ b/browser/locales/en-US/chrome/browser/browser.properties @@ -725,20 +725,20 @@ certErrorDetailsCertChain.label = Certif # LOCALIZATION NOTE (tabgroups.migration.anonGroup): # %S is the group number/ID tabgroups.migration.anonGroup = Group %S tabgroups.migration.tabGroupBookmarkFolderName = Bookmarked Tab Groups # LOCALIZATION NOTE (pendingCrashReports.label): Semi-colon list of plural forms # See: http://developer.mozilla.org/en/docs/Localization_and_Plurals # #1 is the number of pending crash reports -pendingCrashReports.label = You have an unsubmitted crash report;You have #1 unsubmitted crash reports +pendingCrashReports2.label = You have an unsent crash report;You have #1 unsent crash reports pendingCrashReports.viewAll = View -pendingCrashReports.submitAll = Submit -pendingCrashReports.ignoreAll = Ignore +pendingCrashReports.send = Send +pendingCrashReports.alwaysSend = Always Send decoder.noCodecs.button = Learn how decoder.noCodecs.accesskey = L decoder.noCodecs.message = To play video, you may need to install Microsoft’s Media Feature Pack. decoder.noCodecsVista.message = To play video, you may need to install Microsoft’s Platform Update Supplement for Windows Vista. decoder.noCodecsXP.message = To play video, you may need to enable Adobe’s Primetime Content Decryption Module. decoder.noCodecsLinux.message = To play video, you may need to install the required video codecs. decoder.noHWAcceleration.message = To improve video quality, you may need to install Microsoft’s Media Feature Pack.
--- a/browser/locales/en-US/chrome/browser/preferences/advanced.dtd +++ b/browser/locales/en-US/chrome/browser/preferences/advanced.dtd @@ -35,20 +35,20 @@ <!ENTITY enableHealthReport.accesskey "R"> <!ENTITY healthReportLearnMore.label "Learn More"> <!ENTITY telemetryDesc.label "Shares performance, usage, hardware and customization data about your browser with &vendorShortName; to help us make &brandShortName; better"> <!ENTITY enableTelemetryData.label "Share additional data (i.e., Telemetry)"> <!ENTITY enableTelemetryData.accesskey "T"> <!ENTITY telemetryLearnMore.label "Learn More"> -<!ENTITY crashReporterDesc.label "&brandShortName; submits crash reports to help &vendorShortName; make your browser more stable and secure"> -<!ENTITY enableCrashReporter.label "Enable Crash Reporter"> -<!ENTITY enableCrashReporter.accesskey "C"> -<!ENTITY crashReporterLearnMore.label "Learn More"> +<!ENTITY crashReporterDesc2.label "Crash reports help &vendorShortName; fix problems and make your browser more stable and secure"> +<!ENTITY alwaysSubmitCrashReports.label "Allow &brandShortName; to send backlogged crash reports on your behalf"> +<!ENTITY alwaysSubmitCrashReports.accesskey "c"> +<!ENTITY crashReporterLearnMore.label "Learn More"> <!ENTITY networkTab.label "Network"> <!ENTITY connection.label "Connection"> <!ENTITY connectionDesc.label "Configure how &brandShortName; connects to the Internet"> <!ENTITY connectionSettings.label "Settings…"> <!ENTITY connectionSettings.accesskey "e">
--- a/browser/modules/ContentCrashHandlers.jsm +++ b/browser/modules/ContentCrashHandlers.jsm @@ -3,29 +3,46 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ "use strict"; var Cc = Components.classes; var Ci = Components.interfaces; var Cu = Components.utils; -this.EXPORTED_SYMBOLS = [ "TabCrashHandler", "PluginCrashReporter" ]; +this.EXPORTED_SYMBOLS = [ "TabCrashHandler", + "PluginCrashReporter", + "UnsubmittedCrashHandler" ]; Cu.import("resource://gre/modules/XPCOMUtils.jsm"); Cu.import("resource://gre/modules/Services.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "CrashSubmit", "resource://gre/modules/CrashSubmit.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "AppConstants", "resource://gre/modules/AppConstants.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "RemotePages", "resource://gre/modules/RemotePageManager.jsm"); XPCOMUtils.defineLazyModuleGetter(this, "SessionStore", "resource:///modules/sessionstore/SessionStore.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "Task", + "resource://gre/modules/Task.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "RecentWindow", + "resource:///modules/RecentWindow.jsm"); +XPCOMUtils.defineLazyModuleGetter(this, "PluralForm", + "resource://gre/modules/PluralForm.jsm"); + +XPCOMUtils.defineLazyGetter(this, "gNavigatorBundle", function() { + const url = "chrome://browser/locale/browser.properties"; + return Services.strings.createBundle(url); +}); + +// We don't process crash reports older than 28 days, so don't bother +// submitting them +const PENDING_CRASH_REPORT_DAYS = 28; this.TabCrashHandler = { _crashedTabCount: 0, get prefs() { delete this.prefs; return this.prefs = Services.prefs.getBranch("browser.tabs.crashReporting."); }, @@ -314,16 +331,214 @@ this.TabCrashHandler = { if (!this.childMap) { return null; } return this.childMap.get(this.browserMap.get(browser.permanentKey)); }, } +/** + * This component is responsible for scanning the pending + * crash report directory for reports, and (if enabled), to + * prompt the user to submit those reports. It might also + * submit those reports automatically without prompting if + * the user has opted in. + */ +this.UnsubmittedCrashHandler = { + init() { + if (this.initialized) { + return; + } + + this.initialized = true; + + let pref = "browser.crashReports.unsubmittedCheck.enabled"; + let shouldCheck = Services.prefs.getBoolPref(pref); + + if (shouldCheck) { + Services.obs.addObserver(this, "browser-delayed-startup-finished", + false); + } + }, + + observe(subject, topic, data) { + if (topic != "browser-delayed-startup-finished") { + return; + } + + Services.obs.removeObserver(this, topic); + this.checkForUnsubmittedCrashReports(); + }, + + /** + * Scans the profile directory for unsubmitted crash reports + * within the past PENDING_CRASH_REPORT_DAYS days. If it + * finds any, it will, if necessary, attempt to open a notification + * bar to prompt the user to submit them. + * + * @returns Promise + * Resolves after it tries to append a notification on + * the most recent browser window. If a notification + * cannot be shown, will resolve anyways. + */ + checkForUnsubmittedCrashReports: Task.async(function*() { + let dateLimit = new Date(); + dateLimit.setDate(dateLimit.getDate() - PENDING_CRASH_REPORT_DAYS); + + let reportIDs = []; + try { + reportIDs = yield CrashSubmit.pendingIDsAsync(dateLimit); + } catch (e) { + Cu.reportError(e); + return; + } + + if (reportIDs.length) { + if (CrashNotificationBar.autoSubmit) { + CrashNotificationBar.submitReports(reportIDs); + } else { + this.showPendingSubmissionsNotification(reportIDs); + } + } + }), + + /** + * Given an array of unsubmitted crash report IDs, try to open + * up a notification asking the user to submit them. + * + * @param reportIDs (Array<string>) + * The Array of report IDs to offer the user to send. + */ + showPendingSubmissionsNotification(reportIDs) { + let count = reportIDs.length; + if (!count) { + return; + } + + let messageTemplate = + gNavigatorBundle.GetStringFromName("pendingCrashReports2.label"); + + let message = PluralForm.get(count, messageTemplate).replace("#1", count); + + CrashNotificationBar.show({ + notificationID: "pending-crash-reports", + message, + reportIDs, + }); + }, +}; + +this.CrashNotificationBar = { + /** + * Attempts to show a notification bar to the user in the most + * recent browser window asking them to submit some crash report + * IDs. If a notification cannot be shown (for example, there + * is no browser window), this method exits silently. + * + * The notification will allow the user to submit their crash + * reports. If the user dismissed the notification, the crash + * reports will be marked to be ignored (though they can + * still be manually submitted via about:crashes). + * + * @param JS Object + * An Object with the following properties: + * + * notificationID (string) + * The ID for the notification to be opened. + * + * message (string) + * The message to be displayed in the notification. + * + * reportIDs (Array<string>) + * The array of report IDs to offer to the user. + */ + show({ notificationID, message, reportIDs }) { + let chromeWin = RecentWindow.getMostRecentBrowserWindow(); + if (!chromeWin) { + // Can't show a notification in this case. We'll hopefully + // get another opportunity to have the user submit their + // crash reports later. + return; + } + + let nb = chromeWin.document.getElementById("global-notificationbox"); + let notification = nb.getNotificationWithValue(notificationID); + if (notification) { + return; + } + + let buttons = [{ + label: gNavigatorBundle.GetStringFromName("pendingCrashReports.send"), + callback: () => { + this.submitReports(reportIDs); + }, + }, + { + label: gNavigatorBundle.GetStringFromName("pendingCrashReports.alwaysSend"), + callback: () => { + this.autoSubmit = true; + this.submitReports(reportIDs); + }, + }, + { + label: gNavigatorBundle.GetStringFromName("pendingCrashReports.viewAll"), + callback: function() { + chromeWin.openUILinkIn("about:crashes", "tab"); + return true; + }, + }]; + + let eventCallback = (eventType) => { + if (eventType == "dismissed") { + // The user intentionally dismissed the notification, + // which we interpret as meaning that they don't care + // to submit the reports. We'll ignore these particular + // reports going forward. + reportIDs.forEach(function(reportID) { + CrashSubmit.ignore(reportID); + }); + } + }; + + nb.appendNotification(message, notificationID, + "chrome://browser/skin/tab-crashed.svg", + nb.PRIORITY_INFO_HIGH, buttons, + eventCallback); + }, + + get autoSubmit() { + return Services.prefs + .getBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit"); + }, + + set autoSubmit(val) { + Services.prefs.setBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit", + val); + }, + + /** + * Attempt to submit reports to the crash report server. Each + * report will have the "SubmittedFromInfobar" extra key set + * to true. + * + * @param reportIDs (Array<string>) + * The array of reportIDs to submit. + */ + submitReports(reportIDs) { + for (let reportID of reportIDs) { + CrashSubmit.submit(reportID, { + extraExtraKeyVals: { + "SubmittedFromInfobar": true, + }, + }); + } + }, +}; + this.PluginCrashReporter = { /** * Makes the PluginCrashReporter ready to hear about and * submit crash reports. */ init() { if (this.initialized) { return;
--- a/browser/themes/linux/browser.css +++ b/browser/themes/linux/browser.css @@ -1740,17 +1740,16 @@ toolbarbutton.chevron > .toolbarbutton-i %include ../../../devtools/client/themes/responsivedesign.inc.css %include ../../../devtools/client/themes/commandline.inc.css %include ../shared/plugin-doorhanger.inc.css notification.pluginVulnerable > .notification-inner > .messageCloseButton:not(:hover) { background-image: -moz-image-rect(url("chrome://global/skin/icons/close.svg"), 0, 80, 16, 64); } -%include ../shared/login-doorhanger.inc.css %include downloads/indicator.css .gcli-panel { padding: 0; } .gclitoolbar-input-node > .textbox-input-box > html|*.textbox-input::-moz-selection {
--- a/browser/themes/osx/browser.css +++ b/browser/themes/osx/browser.css @@ -3150,17 +3150,16 @@ menulist.translate-infobar-element > .me margin-left: 1em; } %include ../shared/fullscreen/warning.inc.css %include ../shared/ctrlTab.inc.css %include ../../../devtools/client/themes/responsivedesign.inc.css %include ../../../devtools/client/themes/commandline.inc.css %include ../shared/plugin-doorhanger.inc.css -%include ../shared/login-doorhanger.inc.css %include downloads/indicator.css /* On mac, the popup notification contents are indented by default and so the default closebutton margins from notification.css require adjustment */ .click-to-play-plugins-notification-description-box > .popup-notification-closebutton { margin-inline-end: -6px;
deleted file mode 100644 --- a/browser/themes/shared/login-doorhanger.inc.css +++ /dev/null @@ -1,79 +0,0 @@ -#notification-popup[popupid="login-fill"] > .panel-arrowcontainer > .panel-arrowcontent { - /* Since we display a sliding subview that extends to the border, we cannot - * keep the default padding of arrow panels. We use the same padding in the - * individual content views instead. Since we removed the padding, we also - * have to ensure the contents are clipped to the border box. */ - padding: 0; - overflow: hidden; -} - -#login-fill-mainview, -#login-fill-details { - padding: var(--panel-arrowcontent-padding); -} - -#login-fill-doorhanger[inDetailView] > #login-fill-mainview { - transform: translateX(-14px); -} - -#login-fill-mainview, -#login-fill-details { - transition: transform 150ms; -} - -#login-fill-doorhanger:not([inDetailView]) > #login-fill-details { - transform: translateX(105%); -} - -#login-fill-doorhanger:not([inDetailView]) > #login-fill-details:-moz-locale-dir(rtl) { - transform: translateX(-105%); -} - -#login-fill-doorhanger[inDetailView] > #login-fill-clickcapturer { - background-color: hsla(210,4%,10%,.1); -} - -#login-fill-testing { - color: #b33; - font-weight: bold; -} - -#login-fill-list { - border: 1px solid black; - max-height: 20em; -} - -.login-fill-item[disabled] { - color: #888; - background-color: #fff; -} - -.login-fill-item[disabled][selected] { - background-color: #eef; -} - -.login-hostname { - margin: 4px; - font-weight: bold; -} - -.login-fill-item.different-hostname > .login-hostname { - color: #888; - font-style: italic; -} - -.login-username { - margin: 4px; - color: #888; -} - -#login-fill-details { - padding: 4px; - background: var(--panel-arrowcontent-background); - color: var(--panel-arrowcontent-color); - background-clip: padding-box; - border-left: 1px solid hsla(210,4%,10%,.3); - box-shadow: 0 3px 5px hsla(210,4%,10%,.1), - 0 0 7px hsla(210,4%,10%,.1); - margin-inline-start: 38px; -}
--- a/browser/themes/shared/notification-icons.inc.css +++ b/browser/themes/shared/notification-icons.inc.css @@ -124,21 +124,16 @@ .login-icon { list-style-image: url(chrome://browser/skin/notification-icons.svg#login); } .popup-notification-icon[popupid="password"] { list-style-image: url(chrome://browser/skin/notification-icons.svg#login-detailed); } -#login-fill-notification-icon { - /* Temporary solution until the capture and fill doorhangers are unified. */ - transform: scaleX(-1); -} - .camera-icon, .popup-notification-icon[popupid="webRTC-shareDevices"] { list-style-image: url(chrome://browser/skin/notification-icons.svg#camera); } .camera-icon.blocked-permission-icon { list-style-image: url(chrome://browser/skin/notification-icons.svg#camera-blocked); }
--- a/browser/themes/windows/browser.css +++ b/browser/themes/windows/browser.css @@ -2429,17 +2429,16 @@ notification.pluginVulnerable > .notific } @media (min-resolution: 1.1dppx) { notification.pluginVulnerable > .notification-inner > .messageCloseButton { list-style-image: url("chrome://global/skin/icons/close-inverted@2x.png"); } } -%include ../shared/login-doorhanger.inc.css %include downloads/indicator.css /* Error counter */ #developer-toolbar-toolbox-button[error-count]:before { color: #FDF3DE; min-width: 16px;
--- a/build/docs/toolchains.rst +++ b/build/docs/toolchains.rst @@ -46,17 +46,17 @@ 2. Select ``Programming Languages`` -> ` 3. Under ``Windows and Web Development`` uncheck everything except ``Universal Windows App Development Tools`` and the items under it (should be ``Tools (1.3.1)...`` and the ``Windows 10 SDK``). Once Visual Studio 2015 Community has been installed, from a checkout of mozilla-central, run something like the following to produce a ZIP archive:: - $ ./mach python build/windows_toolchain.py create-zip vs2015u2 + $ ./mach python build/windows_toolchain.py create-zip vs2015u3 The produced archive will be the argument to ``create-zip`` + ``.zip``. Firefox for Android with Gradle =============================== To build Firefox for Android with Gradle in automation, archives containing both the Gradle executable and a Maven repository
--- a/build/moz.configure/toolchain.configure +++ b/build/moz.configure/toolchain.configure @@ -269,19 +269,21 @@ def get_compiler_info(compiler, language ''') result = try_preprocess(compiler, language, check) if not result: raise FatalCheckError( 'Unknown compiler or compiler not supported.') + # Metadata emitted by preprocessors such as GCC with LANG=ja_JP.utf-8 may + # have non-ASCII characters. Treat the output as bytearray. data = {} for line in result.splitlines(): - if line.startswith('%'): + if line.startswith(b'%'): k, _, v = line.partition(' ') k = k.lstrip('%') data[k] = v.replace(' ', '').lstrip('"').rstrip('"') log.debug('%s = %s', k, data[k]) try: type = CompilerType(data['COMPILER']) except:
--- a/build/win32/mozconfig.vs2015-win64 +++ b/build/win32/mozconfig.vs2015-win64 @@ -1,24 +1,24 @@ if [ -z "${VSPATH}" ]; then TOOLTOOL_DIR=${TOOLTOOL_DIR:-$topsrcdir} - VSPATH="$(cd ${TOOLTOOL_DIR} && pwd)/vs2015u2" + VSPATH="$(cd ${TOOLTOOL_DIR} && pwd)/vs2015u3" fi VSWINPATH="$(cd ${VSPATH} && pwd -W)" export WINDOWSSDKDIR="${VSWINPATH}/SDK" export WIN32_REDIST_DIR="${VSPATH}/VC/redist/x86/Microsoft.VC140.CRT" export WIN_UCRT_REDIST_DIR="${VSPATH}/SDK/Redist/ucrt/DLLs/x86" export PATH="${VSPATH}/VC/bin/amd64_x86:${VSPATH}/VC/bin/amd64:${VSPATH}/VC/bin:${VSPATH}/SDK/bin/x86:${VSPATH}/SDK/bin/x64:${VSPATH}/DIA SDK/bin:${PATH}" export PATH="${VSPATH}/VC/redist/x86/Microsoft.VC140.CRT:${VSPATH}/VC/redist/x64/Microsoft.VC140.CRT:${VSPATH}/SDK/Redist/ucrt/DLLs/x86:${VSPATH}/SDK/Redist/ucrt/DLLs/x64:${PATH}" -export INCLUDE="${VSPATH}/VC/include:${VSPATH}/VC/atlmfc/include:${VSPATH}/SDK/Include/10.0.10586.0/ucrt:${VSPATH}/SDK/Include/10.0.10586.0/shared:${VSPATH}/SDK/Include/10.0.10586.0/um:${VSPATH}/SDK/Include/10.0.10586.0/winrt:${VSPATH}/DIA SDK/include" -export LIB="${VSPATH}/VC/lib:${VSPATH}/VC/atlmfc/lib:${VSPATH}/SDK/lib/10.0.10586.0/ucrt/x86:${VSPATH}/SDK/lib/10.0.10586.0/um/x86:${VSPATH}/DIA SDK/lib" +export INCLUDE="${VSPATH}/VC/include:${VSPATH}/VC/atlmfc/include:${VSPATH}/SDK/Include/10.0.14393.0/ucrt:${VSPATH}/SDK/Include/10.0.14393.0/shared:${VSPATH}/SDK/Include/10.0.14393.0/um:${VSPATH}/SDK/Include/10.0.14393.0/winrt:${VSPATH}/DIA SDK/include" +export LIB="${VSPATH}/VC/lib:${VSPATH}/VC/atlmfc/lib:${VSPATH}/SDK/lib/10.0.14393.0/ucrt/x86:${VSPATH}/SDK/lib/10.0.14393.0/um/x86:${VSPATH}/DIA SDK/lib" . $topsrcdir/build/mozconfig.vs-common mk_export_correct_style WINDOWSSDKDIR mk_export_correct_style INCLUDE mk_export_correct_style LIB mk_export_correct_style PATH mk_export_correct_style WIN32_REDIST_DIR
--- a/build/win64/mozconfig.vs2015 +++ b/build/win64/mozconfig.vs2015 @@ -1,23 +1,23 @@ if [ -z "${VSPATH}" ]; then TOOLTOOL_DIR=${TOOLTOOL_DIR:-$topsrcdir} - VSPATH="$(cd ${TOOLTOOL_DIR} && pwd)/vs2015u2" + VSPATH="$(cd ${TOOLTOOL_DIR} && pwd)/vs2015u3" fi VSWINPATH="$(cd ${VSPATH} && pwd -W)" export WINDOWSSDKDIR="${VSWINPATH}/SDK" export WIN32_REDIST_DIR=${VSPATH}/VC/redist/x64/Microsoft.VC140.CRT export WIN_UCRT_REDIST_DIR="${VSPATH}/SDK/Redist/ucrt/DLLs/x64" export PATH="${VSPATH}/VC/bin/amd64:${VSPATH}/VC/bin:${VSPATH}/SDK/bin/x64:${VSPATH}/VC/redist/x64/Microsoft.VC140.CRT:${VSPATH}/SDK/Redist/ucrt/DLLs/x64:${VSPATH}/DIA SDK/bin/amd64:${PATH}" -export INCLUDE="${VSPATH}/VC/include:${VSPATH}/VC/atlmfc/include:${VSPATH}/SDK/Include/10.0.10586.0/ucrt:${VSPATH}/SDK/Include/10.0.10586.0/shared:${VSPATH}/SDK/Include/10.0.10586.0/um:${VSPATH}/SDK/Include/10.0.10586.0/winrt:${VSPATH}/DIA SDK/include" -export LIB="${VSPATH}/VC/lib/amd64:${VSPATH}/VC/atlmfc/lib/amd64:${VSPATH}/SDK/lib/10.0.10586.0/ucrt/x64:${VSPATH}/SDK/lib/10.0.10586.0/um/x64:${VSPATH}/DIA SDK/lib/amd64" +export INCLUDE="${VSPATH}/VC/include:${VSPATH}/VC/atlmfc/include:${VSPATH}/SDK/Include/10.0.14393.0/ucrt:${VSPATH}/SDK/Include/10.0.14393.0/shared:${VSPATH}/SDK/Include/10.0.14393.0/um:${VSPATH}/SDK/Include/10.0.14393.0/winrt:${VSPATH}/DIA SDK/include" +export LIB="${VSPATH}/VC/lib/amd64:${VSPATH}/VC/atlmfc/lib/amd64:${VSPATH}/SDK/lib/10.0.14393.0/ucrt/x64:${VSPATH}/SDK/lib/10.0.14393.0/um/x64:${VSPATH}/DIA SDK/lib/amd64" . $topsrcdir/build/mozconfig.vs-common mk_export_correct_style WINDOWSSDKDIR mk_export_correct_style INCLUDE mk_export_correct_style LIB mk_export_correct_style PATH mk_export_correct_style WIN32_REDIST_DIR
--- a/build/windows_toolchain.py +++ b/build/windows_toolchain.py @@ -79,17 +79,17 @@ VS_PATTERNS = [ { 'pattern': 'VC/redist/x64/Microsoft.VC140.CRT/**', }, { 'pattern': 'VC/redist/x86/Microsoft.VC140.CRT/**', }, ] -SDK_RELEASE = '10.0.10586.0' +SDK_RELEASE = '10.0.14393.0' # Files from the Windows 10 SDK to install. SDK_PATTERNS = [ { 'pattern': 'bin/x64/**', }, { 'pattern': 'Include/%s/**' % SDK_RELEASE,
--- a/config/msvc-stl-wrapper.template.h +++ b/config/msvc-stl-wrapper.template.h @@ -54,17 +54,21 @@ // C4275: When _HAS_EXCEPTIONS is set to 0, system STL header // will generate the warning which we can't modify. // C4530: We know that code won't be able to catch exceptions, // but that's OK because we're not throwing them. #pragma warning( push ) #pragma warning( disable : 4275 4530 ) +#ifdef __clang__ +#include_next <${HEADER}> +#else #include <${HEADER_PATH}> +#endif #pragma warning( pop ) #ifdef MOZ_INCLUDE_MOZALLOC_H_FROM_${HEADER} // See if we're in code that can use mozalloc. NB: this duplicates // code in nscore.h because nscore.h pulls in prtypes.h, and chromium // can't build with that being included before base/basictypes.h. # if !defined(XPCOM_GLUE) && !defined(NS_NO_XPCOM) && !defined(MOZ_NO_MOZALLOC)
--- a/devtools/client/aboutdebugging/test/head.js +++ b/devtools/client/aboutdebugging/test/head.js @@ -77,21 +77,21 @@ function addTab(url, win, backgroundTab targetWindow.focus(); let tab = targetBrowser.addTab(url); if (!backgroundTab) { targetBrowser.selectedTab = tab; } let linkedBrowser = tab.linkedBrowser; - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); - info("Tab added and finished loading: " + url); - done(tab); - }, true); + BrowserTestUtils.browserLoaded(linkedBrowser) + .then(function () { + info("Tab added and finished loading: " + url); + done(tab); + }); }); } function removeTab(tab, win) { info("Removing tab."); return new Promise(done => { let targetWindow = win || window;
--- a/devtools/client/canvasdebugger/test/head.js +++ b/devtools/client/canvasdebugger/test/head.js @@ -76,21 +76,21 @@ function addTab(aUrl, aWindow) { let deferred = promise.defer(); let targetWindow = aWindow || window; let targetBrowser = targetWindow.gBrowser; targetWindow.focus(); let tab = targetBrowser.selectedTab = targetBrowser.addTab(aUrl); let linkedBrowser = tab.linkedBrowser; - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); - info("Tab added and finished loading: " + aUrl); - deferred.resolve(tab); - }, true); + BrowserTestUtils.browserLoaded(linkedBrowser) + .then(function () { + info("Tab added and finished loading: " + aUrl); + deferred.resolve(tab); + }); return deferred.promise; } function removeTab(aTab, aWindow) { info("Removing tab."); let deferred = promise.defer();
--- a/devtools/client/debugger/test/mochitest/head.js +++ b/devtools/client/debugger/test/mochitest/head.js @@ -86,21 +86,21 @@ this.addTab = function addTab(aUrl, aWin targetWindow.focus(); let tab = targetBrowser.selectedTab = targetBrowser.addTab(aUrl); let linkedBrowser = tab.linkedBrowser; info("Loading frame script with url " + FRAME_SCRIPT_URL + "."); linkedBrowser.messageManager.loadFrameScript(FRAME_SCRIPT_URL, false); - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); - info("Tab added and finished loading: " + aUrl); - deferred.resolve(tab); - }, true); + BrowserTestUtils.browserLoaded(linkedBrowser) + .then(function () { + info("Tab added and finished loading: " + aUrl); + deferred.resolve(tab); + }); return deferred.promise; }; this.removeTab = function removeTab(aTab, aWindow) { info("Removing tab."); let deferred = promise.defer();
--- a/devtools/client/framework/test/browser_keybindings_01.js +++ b/devtools/client/framework/test/browser_keybindings_01.js @@ -1,36 +1,32 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ // Tests that the keybindings for opening and closing the inspector work as expected // Can probably make this a shared test that tests all of the tools global keybindings - +const TEST_URL = "data:text/html,<html><head><title>Test for the " + + "highlighter keybindings</title></head><body>" + + "<h1>Keybindings!</h1></body></html>" function test() { waitForExplicitFinish(); let doc; let node; let inspector; let keysetMap = { }; - gBrowser.selectedTab = gBrowser.addTab(); - gBrowser.selectedBrowser.addEventListener("load", function onload() { - gBrowser.selectedBrowser.removeEventListener("load", onload, true); + addTab(TEST_URL).then(function () { doc = content.document; node = doc.querySelector("h1"); waitForFocus(setupKeyBindingsTest); - }, true); - - content.location = "data:text/html,<html><head><title>Test for the " + - "highlighter keybindings</title></head><body>" + - "<h1>Keybindings!</h1></body></html>"; + }); function buildDevtoolsKeysetMap(keyset) { [].forEach.call(keyset.querySelectorAll("key"), function (key) { if (!key.getAttribute("key")) { return; }
--- a/devtools/client/framework/test/browser_toolbox_custom_host.js +++ b/devtools/client/framework/test/browser_toolbox_custom_host.js @@ -1,35 +1,32 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ +const TEST_URL = "data:text/html,test custom host"; + function test() { let {Toolbox} = require("devtools/client/framework/toolbox"); - let toolbox, iframe, target, tab; - - gBrowser.selectedTab = gBrowser.addTab(); - target = TargetFactory.forTab(gBrowser.selectedTab); + let toolbox, iframe, target; window.addEventListener("message", onMessage); iframe = document.createElement("iframe"); document.documentElement.appendChild(iframe); - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + addTab(TEST_URL).then(function (tab) { + target = TargetFactory.forTab(tab); let options = {customIframe: iframe}; gDevTools.showToolbox(target, null, Toolbox.HostType.CUSTOM, options) .then(testCustomHost, console.error) .then(null, console.error); - }, true); - - content.location = "data:text/html,test custom host"; + }); function onMessage(event) { info("onMessage: " + event.data); let json = JSON.parse(event.data); if (json.name == "toolbox-close") { ok("Got the `toolbox-close` message"); window.removeEventListener("message", onMessage); cleanup(); @@ -45,13 +42,13 @@ function test() { function cleanup() { iframe.remove(); // Even if we received "toolbox-close", the toolbox may still be destroying // toolbox.destroy() returns a singleton promise that ensures // everything is cleaned up before proceeding. toolbox.destroy().then(() => { - toolbox = iframe = target = tab = null; + toolbox = iframe = target = null; finish(); }); } }
--- a/devtools/client/framework/test/browser_toolbox_dynamic_registration.js +++ b/devtools/client/framework/test/browser_toolbox_dynamic_registration.js @@ -1,26 +1,23 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ +const TEST_URL = "data:text/html,test for dynamically registering and unregistering tools"; + var toolbox; function test() { - gBrowser.selectedTab = gBrowser.addTab(); - let target = TargetFactory.forTab(gBrowser.selectedTab); - - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + addTab(TEST_URL).then(tab => { + let target = TargetFactory.forTab(tab); gDevTools.showToolbox(target).then(testRegister); - }, true); - - content.location = "data:text/html,test for dynamically registering and unregistering tools"; + }); } function testRegister(aToolbox) { toolbox = aToolbox; gDevTools.once("tool-registered", toolRegistered); gDevTools.registerTool({
--- a/devtools/client/framework/test/browser_toolbox_options_disable_buttons.js +++ b/devtools/client/framework/test/browser_toolbox_options_disable_buttons.js @@ -1,33 +1,29 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ /* import-globals-from shared-head.js */ "use strict"; +const TEST_URL = "data:text/html;charset=utf8,test for dynamically " + + "registering and unregistering tools"; var doc = null, toolbox = null, panelWin = null, modifiedPrefs = []; function test() { - gBrowser.selectedTab = gBrowser.addTab(); - let target = TargetFactory.forTab(gBrowser.selectedTab); - - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + addTab(TEST_URL).then(tab => { + let target = TargetFactory.forTab(tab); gDevTools.showToolbox(target) .then(testSelectTool) .then(testToggleToolboxButtons) .then(testPrefsAreRespectedWhenReopeningToolbox) .then(cleanup, errorHandler); - }, true); - - content.location = "data:text/html;charset=utf8,test for dynamically " + - "registering and unregistering tools"; + }); } function testPrefsAreRespectedWhenReopeningToolbox() { let deferred = defer(); let target = TargetFactory.forTab(gBrowser.selectedTab); info("Closing toolbox to test after reopening"); gDevTools.closeToolbox(target).then(() => {
--- a/devtools/client/framework/test/browser_toolbox_options_disable_js.js +++ b/devtools/client/framework/test/browser_toolbox_options_disable_js.js @@ -3,25 +3,20 @@ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ // Tests that disabling JavaScript for a tab works as it should. const TEST_URI = URL_ROOT + "browser_toolbox_options_disable_js.html"; function test() { - gBrowser.selectedTab = gBrowser.addTab(); - let target = TargetFactory.forTab(gBrowser.selectedTab); - - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + addTab(TEST_URI).then(tab => { + let target = TargetFactory.forTab(tab); gDevTools.showToolbox(target).then(testSelectTool); - }, true); - - BrowserTestUtils.loadURI(gBrowser.selectedBrowser, TEST_URI); + }); } function testSelectTool(toolbox) { toolbox.once("options-selected", () => testToggleJS(toolbox)); toolbox.selectTool("options"); } let testToggleJS = Task.async(function* (toolbox) {
--- a/devtools/client/framework/test/browser_toolbox_options_enable_serviceworkers_testing.js +++ b/devtools/client/framework/test/browser_toolbox_options_enable_serviceworkers_testing.js @@ -26,29 +26,25 @@ function test() { SpecialPowers.pushPrefEnv({"set": [ ["dom.serviceWorkers.exemptFromPerDomainMax", true], ["dom.serviceWorkers.enabled", true], ["dom.serviceWorkers.testing.enabled", false] ]}, init); } function init() { - let tab = gBrowser.selectedTab = gBrowser.addTab(); - let target = TargetFactory.forTab(gBrowser.selectedTab); - let linkedBrowser = tab.linkedBrowser; - - linkedBrowser.messageManager.loadFrameScript(COMMON_FRAME_SCRIPT_URL, false); - linkedBrowser.messageManager.loadFrameScript(FRAME_SCRIPT_URL, false); + addTab(TEST_URI).then(tab => { + let target = TargetFactory.forTab(tab); + let linkedBrowser = tab.linkedBrowser; - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + linkedBrowser.messageManager.loadFrameScript(COMMON_FRAME_SCRIPT_URL, false); + linkedBrowser.messageManager.loadFrameScript(FRAME_SCRIPT_URL, false); + gDevTools.showToolbox(target).then(testSelectTool); - }, true); - - content.location = TEST_URI; + }); } function testSelectTool(aToolbox) { toolbox = aToolbox; toolbox.once("options-selected", start); toolbox.selectTool("options"); }
--- a/devtools/client/framework/test/browser_toolbox_raise.js +++ b/devtools/client/framework/test/browser_toolbox_raise.js @@ -1,30 +1,27 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ +const TEST_URL = "data:text/html,test for opening toolbox in different hosts"; + var {Toolbox} = require("devtools/client/framework/toolbox"); -var toolbox, target, tab1, tab2; +var toolbox, tab1, tab2; function test() { - gBrowser.selectedTab = tab1 = gBrowser.addTab(); - tab2 = gBrowser.addTab(); - target = TargetFactory.forTab(gBrowser.selectedTab); - - gBrowser.selectedBrowser.addEventListener("load", function onLoad(evt) { - gBrowser.selectedBrowser.removeEventListener(evt.type, onLoad, true); + addTab(TEST_URL).then(tab => { + tab2 = gBrowser.addTab(); + let target = TargetFactory.forTab(tab); gDevTools.showToolbox(target) .then(testBottomHost, console.error) .then(null, console.error); - }, true); - - content.location = "data:text/html,test for opening toolbox in different hosts"; + }); } function testBottomHost(aToolbox) { toolbox = aToolbox; // switch to another tab and test toolbox.raise() gBrowser.selectedTab = tab2; executeSoon(function () { @@ -68,14 +65,14 @@ function onFocus() { // Now raise toolbox. toolbox.raise(); } function cleanup() { Services.prefs.setCharPref("devtools.toolbox.host", Toolbox.HostType.BOTTOM); toolbox.destroy().then(function () { - toolbox = target = null; + toolbox = null; gBrowser.removeCurrentTab(); gBrowser.removeCurrentTab(); finish(); }); }
--- a/devtools/client/framework/test/browser_toolbox_ready.js +++ b/devtools/client/framework/test/browser_toolbox_ready.js @@ -1,27 +1,21 @@ /* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* vim: set ft=javascript ts=2 et sw=2 tw=80: */ /* Any copyright is dedicated to the Public Domain. * http://creativecommons.org/publicdomain/zero/1.0/ */ -function test() { - gBrowser.selectedTab = gBrowser.addTab(); - let target = TargetFactory.forTab(gBrowser.selectedTab); +const TEST_URL = "data:text/html,test for toolbox being ready"; - const onLoad = Task.async(function* (evt) { - gBrowser.selectedBrowser.removeEventListener("load", onLoad); - - const toolbox = yield gDevTools.showToolbox(target, "webconsole"); - ok(toolbox.isReady, "toolbox isReady is set"); - ok(toolbox.threadClient, "toolbox has a thread client"); +add_task(function* () { + let tab = yield addTab(TEST_URL); + let target = TargetFactory.forTab(tab); - const toolbox2 = yield gDevTools.showToolbox(toolbox.target, toolbox.toolId); - is(toolbox2, toolbox, "same toolbox"); + const toolbox = yield gDevTools.showToolbox(target, "webconsole"); + ok(toolbox.isReady, "toolbox isReady is set"); + ok(toolbox.threadClient, "toolbox has a thread client"); - yield toolbox.destroy(); - gBrowser.removeCurrentTab(); - finish(); - }); + const toolbox2 = yield gDevTools.showToolbox(toolbox.target, toolbox.toolId); + is(toolbox2, toolbox, "same toolbox"); - gBrowser.selectedBrowser.addEventListener("load", onLoad, true); - content.location = "data:text/html,test for toolbox being ready"; -} + yield toolbox.destroy(); + gBrowser.removeCurrentTab(); +});
--- a/devtools/client/framework/test/browser_two_tabs.js +++ b/devtools/client/framework/test/browser_two_tabs.js @@ -25,27 +25,24 @@ function test() { DebuggerServer.addBrowserActors(); } openTabs(); } function openTabs() { // Open two tabs, select the second - gTab1 = gBrowser.addTab(TAB_URL_1); - gTab1.linkedBrowser.addEventListener("load", function onLoad1(evt) { - gTab1.linkedBrowser.removeEventListener("load", onLoad1); - - gTab2 = gBrowser.selectedTab = gBrowser.addTab(TAB_URL_2); - gTab2.linkedBrowser.addEventListener("load", function onLoad2(evt) { - gTab2.linkedBrowser.removeEventListener("load", onLoad2); + addTab(TAB_URL_1).then(tab1 => { + gTab1 = tab1; + addTab(TAB_URL_2).then(tab2 => { + gTab2 = tab2; connect(); - }, true); - }, true); + }); + }); } function connect() { // Connect to debugger server to fetch the two tab actors gClient = new DebuggerClient(DebuggerServer.connectPipe()); gClient.connect() .then(() => gClient.listTabs()) .then(response => {
--- a/devtools/client/framework/test/helper_disable_cache.js +++ b/devtools/client/framework/test/helper_disable_cache.js @@ -86,22 +86,20 @@ function* setDisableCacheCheckboxChecked yield waitForTick(); } } function reloadTab(tabX) { let def = defer(); let browser = gBrowser.selectedBrowser; - // once() doesn't work here so we use a standard handler instead. - browser.addEventListener("load", function onLoad() { - browser.removeEventListener("load", onLoad, true); + BrowserTestUtils.browserLoaded(browser).then(function () { info("Reloaded tab " + tabX.title); def.resolve(); - }, true); + }); info("Reloading tab " + tabX.title); let mm = getFrameScript(); mm.sendAsyncMessage("devtools:test:reload"); return def.promise; }
--- a/devtools/client/framework/test/shared-head.js +++ b/devtools/client/framework/test/shared-head.js @@ -108,17 +108,17 @@ registerCleanupFunction(function* cleanu * Add a new test tab in the browser and load the given url. * @param {String} url The url to be loaded in the new tab * @return a promise that resolves to the tab object when the url is loaded */ var addTab = Task.async(function* (url) { info("Adding a new tab with URL: " + url); let tab = gBrowser.selectedTab = gBrowser.addTab(url); - yield once(gBrowser.selectedBrowser, "load", true); + yield BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser); info("Tab added and finished loading"); return tab; }); /** * Remove the given tab. @@ -137,17 +137,17 @@ var removeTab = Task.async(function* (ta /** * Refresh the given tab. * @param {Object} tab The tab to be refreshed. * @return Promise<undefined> resolved when the tab is successfully refreshed. */ var refreshTab = Task.async(function*(tab) { info("Refreshing tab."); - const finished = once(gBrowser.selectedBrowser, "load", true); + const finished = BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser); gBrowser.reloadTab(gBrowser.selectedTab); yield finished; info("Tab finished refreshing."); }); /** * Simulate a key event from a <key> element. * @param {DOMNode} key @@ -286,19 +286,17 @@ function waitForTick() { * This shouldn't be used in the tests, but is useful when writing new tests or * debugging existing tests in order to introduce delays in the test steps * * @param {Number} ms * The time to wait * @return A promise that resolves when the time is passed */ function wait(ms) { - let def = defer(); - content.setTimeout(def.resolve, ms); - return def.promise; + return new promise(resolve => setTimeout(resolve, ms)); } /** * Open the toolbox in a given tab. * @param {XULNode} tab The tab the toolbox should be opened in. * @param {String} toolId Optional. The ID of the tool to be selected. * @param {String} hostType Optional. The type of toolbox host to be used. * @return {Promise} Resolves with the toolbox, when it has been opened.
--- a/devtools/client/inspector/computed/test/browser_computed_search-filter_context-menu.js +++ b/devtools/client/inspector/computed/test/browser_computed_search-filter_context-menu.js @@ -47,17 +47,17 @@ add_task(function* () { yield onContextMenuHidden; info("Copy text in search field using the context menu"); searchField.value = TEST_INPUT; searchField.select(); EventUtils.synthesizeMouse(searchField, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup; - yield waitForClipboard(() => cmdCopy.click(), TEST_INPUT); + yield waitForClipboardPromise(() => cmdCopy.click(), TEST_INPUT); searchContextMenu.hidePopup(); yield onContextMenuHidden; info("Reopen context menu and check command properties"); EventUtils.synthesizeMouse(searchField, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup;
--- a/devtools/client/inspector/computed/test/browser_computed_select-and-copy-styles.js +++ b/devtools/client/inspector/computed/test/browser_computed_select-and-copy-styles.js @@ -57,17 +57,17 @@ function* checkCopySelection(view) { info("Checking that cssHtmlTree.siBoundCopy() returns the correct " + "clipboard value"); let expectedPattern = "font-family: helvetica,sans-serif;[\\r\\n]+" + "font-size: 16px;[\\r\\n]+" + "font-variant-caps: small-caps;[\\r\\n]*"; try { - yield waitForClipboard(() => fireCopyEvent(props[0]), + yield waitForClipboardPromise(() => fireCopyEvent(props[0]), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function* checkSelectAll(view) { info("Testing select-all copy"); @@ -79,17 +79,17 @@ function* checkSelectAll(view) { "clipboard value"); view._contextmenu._onSelectAll(); let expectedPattern = "color: rgb\\(255, 255, 0\\);[\\r\\n]+" + "font-family: helvetica,sans-serif;[\\r\\n]+" + "font-size: 16px;[\\r\\n]+" + "font-variant-caps: small-caps;[\\r\\n]*"; try { - yield waitForClipboard(() => fireCopyEvent(prop), + yield waitForClipboardPromise(() => fireCopyEvent(prop), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function checkClipboardData(expectedPattern) { let actual = SpecialPowers.getClipboardData("text/unicode");
--- a/devtools/client/inspector/markup/test/browser_markup_links_05.js +++ b/devtools/client/inspector/markup/test/browser_markup_links_05.js @@ -20,17 +20,17 @@ add_task(function* () { openContextMenuAndGetAllItems(inspector, { target: editor.attrElements.get("poster").querySelector(".link"), }); info("Follow the link and wait for the new tab to open"); let onTabOpened = once(gBrowser.tabContainer, "TabOpen"); inspector.onFollowLink(); let {target: tab} = yield onTabOpened; - yield waitForTabLoad(tab); + yield BrowserTestUtils.browserLoaded(tab.linkedBrowser); ok(true, "A new tab opened"); is(tab.linkedBrowser.currentURI.spec, URL_ROOT + "doc_markup_tooltip.png", "The URL for the new tab is correct"); gBrowser.removeTab(tab); info("Select a node with a IDREF attribute"); yield selectNode("label", inspector); @@ -62,21 +62,8 @@ add_task(function* () { let onFailed = inspector.once("idref-attribute-link-failed"); inspector.onFollowLink(); yield onFailed; ok(true, "The node selection failed"); is(inspector.selection.nodeFront.tagName.toLowerCase(), "output", "The <output> node is still selected"); }); - -function waitForTabLoad(tab) { - let def = defer(); - tab.addEventListener("load", function onLoad(e) { - // Skip load event for about:blank - if (tab.linkedBrowser.currentURI.spec === "about:blank") { - return; - } - tab.removeEventListener("load", onLoad); - def.resolve(); - }); - return def.promise; -}
--- a/devtools/client/inspector/markup/test/browser_markup_links_07.js +++ b/devtools/client/inspector/markup/test/browser_markup_links_07.js @@ -53,29 +53,16 @@ add_task(function* () { info("Try to follow link wiith middle-click, check no new node selected"); yield followLinkNoNewNode(linkEl, false, inspector); info("Try to follow link wiith meta/ctrl-click, check no new node selected"); yield followLinkNoNewNode(linkEl, true, inspector); }); -function waitForTabLoad(tab) { - let def = defer(); - tab.addEventListener("load", function onLoad() { - // Skip load event for about:blank - if (tab.linkedBrowser.currentURI.spec === "about:blank") { - return; - } - tab.removeEventListener("load", onLoad); - def.resolve(); - }); - return def.promise; -} - function performMouseDown(linkEl, metactrl) { let evt = linkEl.ownerDocument.createEvent("MouseEvents"); let button = -1; if (metactrl) { info("Performing Meta/Ctrl+Left Click"); button = 0; @@ -90,17 +77,17 @@ function performMouseDown(linkEl, metact linkEl.dispatchEvent(evt); } function* followLinkWaitForTab(linkEl, isMetaClick, expectedTabURI) { let onTabOpened = once(gBrowser.tabContainer, "TabOpen"); performMouseDown(linkEl, isMetaClick); let {target} = yield onTabOpened; - yield waitForTabLoad(target); + yield BrowserTestUtils.browserLoaded(target.linkedBrowser); ok(true, "A new tab opened"); is(target.linkedBrowser.currentURI.spec, expectedTabURI, "The URL for the new tab is correct"); gBrowser.removeTab(target); } function* followLinkWaitForNewNode(linkEl, isMetaClick, inspector) { let onSelection = inspector.selection.once("new-node-front");
--- a/devtools/client/inspector/markup/test/head.js +++ b/devtools/client/inspector/markup/test/head.js @@ -267,28 +267,16 @@ function searchUsingSelectorSearch(selec info("Entering \"" + selector + "\" into the selector-search input field"); let field = getSelectorSearchBox(inspector); field.focus(); field.value = selector; EventUtils.sendKey("return", inspector.panelWin); } /** - * This shouldn't be used in the tests, but is useful when writing new tests or - * debugging existing tests in order to introduce delays in the test steps - * @param {Number} ms The time to wait - * @return A promise that resolves when the time is passed - */ -function wait(ms) { - let def = defer(); - setTimeout(def.resolve, ms); - return def.promise; -} - -/** * Check to see if the inspector menu items for editing are disabled. * Things like Edit As HTML, Delete Node, etc. * @param {NodeFront} nodeFront * @param {InspectorPanel} inspector * @param {Boolean} assert Should this function run assertions inline. * @return A promise that resolves with a boolean indicating whether * the menu items are disabled once the menu has been checked. */
--- a/devtools/client/inspector/rules/test/browser_rules_copy_styles.js +++ b/devtools/client/inspector/rules/test/browser_rules_copy_styles.js @@ -262,17 +262,17 @@ function* checkCopyStyle(view, node, men visible.copySelector); is(menuitemCopyRule.visible, visible.copyRule, "Copy Rule visible attribute is as expected: " + visible.copyRule); try { - yield waitForClipboard(() => menuItem.click(), + yield waitForClipboardPromise(() => menuItem.click(), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function* disableProperty(view, index) { let ruleEditor = getRuleViewRuleEditor(view, 1);
--- a/devtools/client/inspector/rules/test/browser_rules_search-filter_context-menu.js +++ b/devtools/client/inspector/rules/test/browser_rules_search-filter_context-menu.js @@ -46,17 +46,17 @@ add_task(function* () { yield onContextMenuHidden; info("Copy text in search field using the context menu"); searchField.value = TEST_INPUT; searchField.select(); EventUtils.synthesizeMouse(searchField, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup; - yield waitForClipboard(() => cmdCopy.click(), TEST_INPUT); + yield waitForClipboardPromise(() => cmdCopy.click(), TEST_INPUT); searchContextMenu.hidePopup(); yield onContextMenuHidden; info("Reopen context menu and check command properties"); EventUtils.synthesizeMouse(searchField, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup;
--- a/devtools/client/inspector/rules/test/browser_rules_select-and-copy-styles.js +++ b/devtools/client/inspector/rules/test/browser_rules_select-and-copy-styles.js @@ -70,17 +70,17 @@ function* checkCopySelection(view) { let allMenuItems = openStyleContextMenuAndGetAllItems(view, prop); let menuitemCopy = allMenuItems.find(item => item.label === STYLE_INSPECTOR_L10N.getStr("styleinspector.contextmenu.copy")); ok(menuitemCopy.visible, "Copy menu item is displayed as expected"); try { - yield waitForClipboard(() => menuitemCopy.click(), + yield waitForClipboardPromise(() => menuitemCopy.click(), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function* checkSelectAll(view) { info("Testing select-all copy"); @@ -104,17 +104,17 @@ function* checkSelectAll(view) { let allMenuItems = openStyleContextMenuAndGetAllItems(view, prop); let menuitemCopy = allMenuItems.find(item => item.label === STYLE_INSPECTOR_L10N.getStr("styleinspector.contextmenu.copy")); ok(menuitemCopy.visible, "Copy menu item is displayed as expected"); try { - yield waitForClipboard(() => menuitemCopy.click(), + yield waitForClipboardPromise(() => menuitemCopy.click(), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function* checkCopyEditorValue(view) { info("Testing CSS property editor value copy"); @@ -132,17 +132,17 @@ function* checkCopyEditorValue(view) { let allMenuItems = openStyleContextMenuAndGetAllItems(view, editor.input); let menuitemCopy = allMenuItems.find(item => item.label === STYLE_INSPECTOR_L10N.getStr("styleinspector.contextmenu.copy")); ok(menuitemCopy.visible, "Copy menu item is displayed as expected"); try { - yield waitForClipboard(() => menuitemCopy.click(), + yield waitForClipboardPromise(() => menuitemCopy.click(), () => checkClipboardData(expectedPattern)); } catch (e) { failedClipboard(expectedPattern); } } function checkClipboardData(expectedPattern) { let actual = SpecialPowers.getClipboardData("text/unicode");
--- a/devtools/client/inspector/shared/test/browser_styleinspector_context-menu-copy-color_02.js +++ b/devtools/client/inspector/shared/test/browser_styleinspector_context-menu-copy-color_02.js @@ -34,17 +34,17 @@ function* testCopyToClipboard(inspector, .querySelector(".ruleview-colorswatch"); let allMenuItems = openStyleContextMenuAndGetAllItems(view, element); let menuitemCopyColor = allMenuItems.find(item => item.label === STYLE_INSPECTOR_L10N.getStr("styleinspector.contextmenu.copyColor")); ok(menuitemCopyColor.visible, "Copy color is visible"); - yield waitForClipboard(() => menuitemCopyColor.click(), + yield waitForClipboardPromise(() => menuitemCopyColor.click(), "#123ABC"); EventUtils.synthesizeKey("VK_ESCAPE", { }); } function* testManualEdit(inspector, view) { info("Testing manually edited colors"); yield selectNode("div", inspector);
--- a/devtools/client/inspector/shared/test/browser_styleinspector_context-menu-copy-urls.js +++ b/devtools/client/inspector/shared/test/browser_styleinspector_context-menu-copy-urls.js @@ -82,22 +82,22 @@ function* testCopyUrlToClipboard({view, info("Context menu is displayed"); ok(menuitemCopyUrl.visible, "\"Copy URL\" menu entry is displayed"); ok(menuitemCopyImageDataUrl.visible, "\"Copy Image Data-URL\" menu entry is displayed"); if (type == "data-uri") { info("Click Copy Data URI and wait for clipboard"); - yield waitForClipboard(() => { + yield waitForClipboardPromise(() => { return menuitemCopyImageDataUrl.click(); }, expected); } else { info("Click Copy URL and wait for clipboard"); - yield waitForClipboard(() => { + yield waitForClipboardPromise(() => { return menuitemCopyUrl.click(); }, expected); } info("Hide context menu"); } function getBackgroundImageProperty(view, selector) {
--- a/devtools/client/inspector/test/browser_inspector_highlighter-eyedropper-clipboard.js +++ b/devtools/client/inspector/test/browser_inspector_highlighter-eyedropper-clipboard.js @@ -18,17 +18,17 @@ add_task(function* () { waitForElementAttributeSet, waitForElementAttributeRemoved} = helper; info("Show the eyedropper with the copyOnSelect option"); yield show("html", {copyOnSelect: true}); info("Make sure to wait until the eyedropper is done taking a screenshot of the page"); yield waitForElementAttributeSet("root", "drawn", helper); - yield waitForClipboard(() => { + yield waitForClipboardPromise(() => { info("Activate the eyedropper so the background color is copied"); EventUtils.synthesizeKey("VK_RETURN", {}); }, "#FF0000"); ok(true, "The clipboard contains the right value"); yield waitForElementAttributeRemoved("root", "drawn", helper); yield waitForElementAttributeSet("root", "hidden", helper);
--- a/devtools/client/inspector/test/browser_inspector_keyboard-shortcuts-copy-outerhtml.js +++ b/devtools/client/inspector/test/browser_inspector_keyboard-shortcuts-copy-outerhtml.js @@ -29,17 +29,17 @@ add_task(function* () { function* setSelectionNodeFront(node, inspector) { let updated = inspector.once("inspector-updated"); inspector.selection.setNodeFront(node); yield updated; } function* checkClipboard(expectedText, node) { try { - yield waitForClipboard(() => fireCopyEvent(node), expectedText); + yield waitForClipboardPromise(() => fireCopyEvent(node), expectedText); ok(true, "Clipboard successfully filled with : " + expectedText); } catch (e) { ok(false, "Clipboard could not be filled with the expected text : " + expectedText); } } function getElementByType(inspector, type) {
--- a/devtools/client/inspector/test/browser_inspector_menu-02-copy-items.js +++ b/devtools/client/inspector/test/browser_inspector_menu-02-copy-items.js @@ -39,11 +39,11 @@ add_task(function* () { for (let {desc, id, selector, text} of COPY_ITEMS_TEST_DATA) { info("Testing " + desc); yield selectNode(selector, inspector); let allMenuItems = openContextMenuAndGetAllItems(inspector); let item = allMenuItems.find(i => i.id === id); ok(item, "The popup has a " + desc + " menu item."); - yield waitForClipboard(() => item.click(), text); + yield waitForClipboardPromise(() => item.click(), text); } });
--- a/devtools/client/inspector/test/browser_inspector_search-filter_context-menu.js +++ b/devtools/client/inspector/test/browser_inspector_search-filter_context-menu.js @@ -45,17 +45,17 @@ add_task(function* () { yield onContextMenuHidden; info("Copy text in search field using the context menu"); searchBox.value = TEST_INPUT; searchBox.select(); EventUtils.synthesizeMouse(searchBox, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup; - yield waitForClipboard(() => cmdCopy.click(), TEST_INPUT); + yield waitForClipboardPromise(() => cmdCopy.click(), TEST_INPUT); searchContextMenu.hidePopup(); yield onContextMenuHidden; info("Reopen context menu and check command properties"); EventUtils.synthesizeMouse(searchBox, 2, 2, {type: "contextmenu", button: 2}, win); yield onContextMenuPopup;
--- a/devtools/client/inspector/test/head.js +++ b/devtools/client/inspector/test/head.js @@ -614,33 +614,16 @@ function waitForStyleEditor(toolbox, hre panel.UI.on("editor-selected", gotEditor); } }); return def.promise; } /** - * @see SimpleTest.waitForClipboard - * - * @param {Function} setup - * Function to execute before checking for the - * clipboard content - * @param {String|Function} expected - * An expected string or validator function - * @return a promise that resolves when the expected string has been found or - * the validator function has returned true, rejects otherwise. - */ -function waitForClipboard(setup, expected) { - let def = defer(); - SimpleTest.waitForClipboard(expected, setup, def.resolve, def.reject); - return def.promise; -} - -/** * Checks if document's active element is within the given element. * @param {HTMLDocument} doc document with active element in question * @param {DOMNode} container element tested on focus containment * @return {Boolean} */ function containsFocus(doc, container) { let elm = doc.activeElement; while (elm) { @@ -657,18 +640,17 @@ function containsFocus(doc, container) { * does and completes the load event. * * @return a promise that resolves to the tab object */ var waitForTab = Task.async(function* () { info("Waiting for a tab to open"); yield once(gBrowser.tabContainer, "TabOpen"); let tab = gBrowser.selectedTab; - let browser = tab.linkedBrowser; - yield once(browser, "load", true); + yield BrowserTestUtils.browserLoaded(tab.linkedBrowser); info("The tab load completed"); return tab; }); /** * Simulate the key input for the given input in the window. * * @param {String} input
--- a/devtools/client/projecteditor/test/head.js +++ b/devtools/client/projecteditor/test/head.js @@ -52,25 +52,23 @@ registerCleanupFunction(() => { * Add a new test tab in the browser and load the given url. * @param {String} url The url to be loaded in the new tab * @return a promise that resolves to the tab object when the url is loaded */ function addTab(url) { info("Adding a new tab with URL: '" + url + "'"); let def = promise.defer(); - let tab = gBrowser.selectedTab = gBrowser.addTab(); - gBrowser.selectedBrowser.addEventListener("load", function onload() { - gBrowser.selectedBrowser.removeEventListener("load", onload, true); + let tab = gBrowser.selectedTab = gBrowser.addTab(url); + BrowserTestUtils.browserLoaded(tab.linkedBrowser).then(function () { info("URL '" + url + "' loading complete"); waitForFocus(() => { def.resolve(tab); }, content); - }, true); - content.location = url; + }); return def.promise; } /** * Some tests may need to import one or more of the test helper scripts. * A test helper script is simply a js file that contains common test code that * is either not common-enough to be in head.js, or that is located in a separate
--- a/devtools/client/responsivedesign/test/head.js +++ b/devtools/client/responsivedesign/test/head.js @@ -190,28 +190,22 @@ function openRuleView() { var addTab = Task.async(function* (url) { info("Adding a new tab with URL: '" + url + "'"); window.focus(); let tab = gBrowser.selectedTab = gBrowser.addTab(url); let browser = tab.linkedBrowser; - yield once(browser, "load", true); + yield BrowserTestUtils.browserLoaded(browser); info("URL '" + url + "' loading complete"); return tab; }); -function wait(ms) { - let def = promise.defer(); - setTimeout(def.resolve, ms); - return def.promise; -} - /** * Waits for the next load to complete in the current browser. * * @return promise */ function waitForDocLoadComplete(aBrowser = gBrowser) { let deferred = promise.defer(); let progressListener = {
--- a/devtools/client/shadereditor/test/head.js +++ b/devtools/client/shadereditor/test/head.js @@ -71,21 +71,20 @@ function addTab(aUrl, aWindow) { let deferred = promise.defer(); let targetWindow = aWindow || window; let targetBrowser = targetWindow.gBrowser; targetWindow.focus(); let tab = targetBrowser.selectedTab = targetBrowser.addTab(aUrl); let linkedBrowser = tab.linkedBrowser; - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); + BrowserTestUtils.browserLoaded(linkedBrowser).then(function () { info("Tab added and finished loading: " + aUrl); deferred.resolve(tab); - }, true); + }); return deferred.promise; } function removeTab(aTab, aWindow) { info("Removing tab."); let deferred = promise.defer();
--- a/devtools/client/sourceeditor/test/browser_codemirror.js +++ b/devtools/client/sourceeditor/test/browser_codemirror.js @@ -7,19 +7,12 @@ const URI = "chrome://mochitests/content/browser/devtools/client/" + "sourceeditor/test/codemirror/codemirror.html"; loadHelperScript("helper_codemirror_runner.js"); function test() { requestLongerTimeout(3); waitForExplicitFinish(); - let tab = gBrowser.addTab(); - gBrowser.selectedTab = tab; - - let browser = gBrowser.getBrowserForTab(tab); - browser.addEventListener("load", function onLoad() { - browser.removeEventListener("load", onLoad, true); - runCodeMirrorTest(browser); - }, true); - - browser.loadURI(URI); + addTab(URI).then(function (tab) { + runCodeMirrorTest(tab.linkedBrowser); + }); }
--- a/devtools/client/sourceeditor/test/browser_css_autocompletion.js +++ b/devtools/client/sourceeditor/test/browser_css_autocompletion.js @@ -74,23 +74,20 @@ let doc = null; let index = 0; let completer = null; let progress; let progressDiv; let inspector; function test() { waitForExplicitFinish(); - gBrowser.selectedTab = gBrowser.addTab(); - gBrowser.selectedBrowser.addEventListener("load", function onload() { - gBrowser.selectedBrowser.removeEventListener("load", onload, true); + addTab(TEST_URI).then(function () { doc = content.document; runTests(); - }, true); - content.location = TEST_URI; + }); } function runTests() { progress = doc.getElementById("progress"); progressDiv = doc.querySelector("#progress > div"); let target = TargetFactory.forTab(gBrowser.selectedTab); target.makeRemote().then(() => { inspector = InspectorFront(target.client, target.form);
--- a/devtools/client/sourceeditor/test/browser_vimemacs.js +++ b/devtools/client/sourceeditor/test/browser_vimemacs.js @@ -6,19 +6,12 @@ const URI = "chrome://mochitests/content/browser/devtools/client" + "/sourceeditor/test/codemirror/vimemacs.html"; loadHelperScript("helper_codemirror_runner.js"); function test() { requestLongerTimeout(4); waitForExplicitFinish(); - let tab = gBrowser.addTab(); - gBrowser.selectedTab = tab; - - let browser = gBrowser.getBrowserForTab(tab); - browser.addEventListener("load", function onLoad() { - browser.removeEventListener("load", onLoad, true); - runCodeMirrorTest(browser); - }, true); - - browser.loadURI(URI); + addTab(URI).then(function (tab) { + runCodeMirrorTest(tab.linkedBrowser); + }); }
--- a/devtools/client/sourceeditor/test/head.js +++ b/devtools/client/sourceeditor/test/head.js @@ -17,28 +17,26 @@ SimpleTest.registerCleanupFunction(() => }); /** * Open a new tab at a URL and call a callback on load */ function addTab(url, callback) { waitForExplicitFinish(); - gBrowser.selectedTab = gBrowser.addTab(); - content.location = url; - + gBrowser.selectedTab = gBrowser.addTab(url); let tab = gBrowser.selectedTab; let browser = gBrowser.getBrowserForTab(tab); - function onTabLoad() { - browser.removeEventListener("load", onTabLoad, true); - callback(browser, tab, browser.contentDocument); - } - - browser.addEventListener("load", onTabLoad, true); + return BrowserTestUtils.browserLoaded(browser).then(function () { + if (typeof(callback) == "function") { + callback(browser, tab, browser.contentDocument); + } + return tab; + }); } function promiseTab(url) { return new Promise(resolve => addTab(url, resolve)); } function promiseWaitForFocus() {
--- a/devtools/client/styleeditor/test/head.js +++ b/devtools/client/styleeditor/test/head.js @@ -24,21 +24,21 @@ const TEST_HOST = "mochi.test:8888"; var addTab = function (url, win) { info("Adding a new tab with URL: '" + url + "'"); let def = defer(); let targetWindow = win || window; let targetBrowser = targetWindow.gBrowser; let tab = targetBrowser.selectedTab = targetBrowser.addTab(url); - targetBrowser.selectedBrowser.addEventListener("load", function onload() { - targetBrowser.selectedBrowser.removeEventListener("load", onload, true); - info("URL '" + url + "' loading complete"); - def.resolve(tab); - }, true); + BrowserTestUtils.browserLoaded(targetBrowser.selectedBrowser) + .then(function () { + info("URL '" + url + "' loading complete"); + def.resolve(tab); + }); return def.promise; }; /** * Navigate the currently selected tab to a new URL and wait for it to load. * @param {String} url The url to be loaded in the current tab. * @return a promise that resolves when the page has fully loaded.
--- a/devtools/client/webaudioeditor/test/head.js +++ b/devtools/client/webaudioeditor/test/head.js @@ -68,21 +68,20 @@ function addTab(aUrl, aWindow) { let deferred = Promise.defer(); let targetWindow = aWindow || window; let targetBrowser = targetWindow.gBrowser; targetWindow.focus(); let tab = targetBrowser.selectedTab = targetBrowser.addTab(aUrl); let linkedBrowser = tab.linkedBrowser; - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); + BrowserTestUtils.browserLoaded(linkedBrowser).then(function () { info("Tab added and finished loading: " + aUrl); deferred.resolve(tab); - }, true); + }); return deferred.promise; } function removeTab(aTab, aWindow) { info("Removing tab."); let deferred = Promise.defer();
--- a/devtools/client/webide/test/head.js +++ b/devtools/client/webide/test/head.js @@ -162,21 +162,20 @@ function addTab(aUrl, aWindow) { let deferred = promise.defer(); let targetWindow = aWindow || window; let targetBrowser = targetWindow.gBrowser; targetWindow.focus(); let tab = targetBrowser.selectedTab = targetBrowser.addTab(aUrl); let linkedBrowser = tab.linkedBrowser; - linkedBrowser.addEventListener("load", function onLoad() { - linkedBrowser.removeEventListener("load", onLoad, true); + BrowserTestUtils.browserLoaded(linkedBrowser).then(function () { info("Tab added and finished loading: " + aUrl); deferred.resolve(tab); - }, true); + }); return deferred.promise; } function removeTab(aTab, aWindow) { info("Removing tab."); let deferred = promise.defer();
--- a/devtools/server/tests/browser/browser_canvasframe_helper_04.js +++ b/devtools/server/tests/browser/browser_canvasframe_helper_04.js @@ -60,17 +60,17 @@ add_task(function* () { info("Synthesizing an event on the element"); let onDocMouseDown = once(doc, "mousedown"); synthesizeMouseDown(100, 100, doc.defaultView); yield onDocMouseDown; is(mouseDownHandled, 1, "The mousedown event was handled once before navigation"); info("Navigating to a new page"); - let loaded = once(gBrowser.selectedBrowser, "load", true); + let loaded = BrowserTestUtils.browserLoaded(gBrowser.selectedBrowser); content.location = TEST_URL_2; yield loaded; doc = gBrowser.selectedBrowser.contentWindow.document; info("Try to access the element again"); is(el.getAttribute("class"), "child-element", "The attribute is correct after navigation"); is(el.getTextContent(), "test content",
--- a/devtools/server/tests/browser/head.js +++ b/devtools/server/tests/browser/head.js @@ -28,17 +28,17 @@ waitForExplicitFinish(); * @return a promise that resolves to the new browser that the document * is loaded in. Note that we cannot return the document * directly, since this would be a CPOW in the e10s case, * and Promises cannot be resolved with CPOWs (see bug 1233497). */ var addTab = Task.async(function* (url) { info(`Adding a new tab with URL: ${url}`); let tab = gBrowser.selectedTab = gBrowser.addTab(url); - yield once(gBrowser.selectedBrowser, "load", true); + yield BrowserTestUtils.browserLoaded(tab.linkedBrowser); info(`Tab added and URL ${url} loaded`); return tab.linkedBrowser; }); function* initAnimationsFrontForUrl(url) { const {AnimationsFront} = require("devtools/shared/fronts/animation");
--- a/dom/animation/test/css-animations/file_animation-id.html +++ b/dom/animation/test/css-animations/file_animation-id.html @@ -13,20 +13,12 @@ test(function(t) { div.style.animation = 'abc 100s'; var animation = div.getAnimations()[0]; assert_equals(animation.id, '', 'id for CSS Animation is initially empty'); animation.id = 'anim' assert_equals(animation.id, 'anim', 'animation.id reflects the value set'); }, 'Animation.id for CSS Animations'); -test(function(t) { - var div = addDiv(t); - var animation = div.animate({}, 100 * MS_PER_SEC); - assert_equals(animation.id, '', 'id for CSS Animation is initially empty'); - animation.id = 'anim' - - assert_equals(animation.id, 'anim', 'animation.id reflects the value set'); -}, 'Animation.id for CSS Animations'); done(); </script> </body> </html>
--- a/dom/animation/test/css-transitions/file_animation-cancel.html +++ b/dom/animation/test/css-transitions/file_animation-cancel.html @@ -1,106 +1,101 @@ <!doctype html> <meta charset=utf-8> <script src="../testcommon.js"></script> <body> <script> 'use strict'; -async_test(function(t) { +promise_test(function(t) { var div = addDiv(t, { style: 'margin-left: 0px' }); flushComputedStyle(div); div.style.transition = 'margin-left 100s'; div.style.marginLeft = '1000px'; flushComputedStyle(div); var animation = div.getAnimations()[0]; - animation.ready.then(waitForFrame).then(t.step_func(function() { + return animation.ready.then(waitForFrame).then(function() { assert_not_equals(getComputedStyle(div).marginLeft, '1000px', 'transform style is animated before cancelling'); animation.cancel(); assert_equals(getComputedStyle(div).marginLeft, div.style.marginLeft, 'transform style is no longer animated after cancelling'); - t.done(); - })); + }); }, 'Animated style is cleared after cancelling a running CSS transition'); -async_test(function(t) { +promise_test(function(t) { var div = addDiv(t, { style: 'margin-left: 0px' }); flushComputedStyle(div); div.style.transition = 'margin-left 100s'; div.style.marginLeft = '1000px'; flushComputedStyle(div); - div.addEventListener('transitionend', t.step_func(function() { + div.addEventListener('transitionend', function() { assert_unreached('Got unexpected end event on cancelled transition'); - })); + }); var animation = div.getAnimations()[0]; - animation.ready.then(t.step_func(function() { + return animation.ready.then(function() { // Seek to just before the end then cancel animation.currentTime = 99.9 * 1000; animation.cancel(); // Then wait a couple of frames and check that no event was dispatched return waitForAnimationFrames(2); - })).then(t.step_func(function() { - t.done(); - })); + }); }, 'Cancelled CSS transitions do not dispatch events'); -async_test(function(t) { +promise_test(function(t) { var div = addDiv(t, { style: 'margin-left: 0px' }); flushComputedStyle(div); div.style.transition = 'margin-left 100s'; div.style.marginLeft = '1000px'; flushComputedStyle(div); var animation = div.getAnimations()[0]; - animation.ready.then(t.step_func(function() { + return animation.ready.then(function() { animation.cancel(); assert_equals(getComputedStyle(div).marginLeft, '1000px', 'margin-left style is not animated after cancelling'); animation.play(); assert_equals(getComputedStyle(div).marginLeft, '0px', 'margin-left style is animated after re-starting transition'); return animation.ready; - })).then(t.step_func(function() { + }).then(function() { assert_equals(animation.playState, 'running', 'Transition succeeds in running after being re-started'); - t.done(); - })); + }); }, 'After cancelling a transition, it can still be re-used'); -async_test(function(t) { +promise_test(function(t) { var div = addDiv(t, { style: 'margin-left: 0px' }); flushComputedStyle(div); div.style.transition = 'margin-left 100s'; div.style.marginLeft = '1000px'; flushComputedStyle(div); var animation = div.getAnimations()[0]; - animation.ready.then(t.step_func(function() { + return animation.ready.then(function() { animation.finish(); animation.cancel(); assert_equals(getComputedStyle(div).marginLeft, '1000px', 'margin-left style is not animated after cancelling'); animation.play(); assert_equals(getComputedStyle(div).marginLeft, '0px', 'margin-left style is animated after re-starting transition'); return animation.ready; - })).then(t.step_func(function() { + }).then(function() { assert_equals(animation.playState, 'running', 'Transition succeeds in running after being re-started'); - t.done(); - })); + }); }, 'After cancelling a finished transition, it can still be re-used'); test(function(t) { var div = addDiv(t, { style: 'margin-left: 0px' }); flushComputedStyle(div); div.style.transition = 'margin-left 100s'; div.style.marginLeft = '1000px'; @@ -118,11 +113,25 @@ test(function(t) { assert_equals(getComputedStyle(div).marginLeft, '1000px', 'margin-left style is still not animated after updating' + ' transition-duration'); assert_equals(animation.playState, 'idle', 'Transition is still idle after updating transition-duration'); }, 'After cancelling a transition, updating transition properties doesn\'t make' + ' it live again'); +test(function(t) { + var div = addDiv(t, { style: 'margin-left: 0px' }); + flushComputedStyle(div); + + div.style.transition = 'margin-left 100s'; + div.style.marginLeft = '1000px'; + flushComputedStyle(div); + + var animation = div.getAnimations()[0]; + div.style.display = 'none'; + assert_equals(animation.playState, 'idle'); + assert_equals(getComputedStyle(div).marginLeft, '1000px'); +}, 'Setting display:none on an element cancels its transitions'); + done(); </script> </body>
--- a/dom/canvas/test/webgl-mochitest/mochitest.ini +++ b/dom/canvas/test/webgl-mochitest/mochitest.ini @@ -79,17 +79,17 @@ skip-if = android_version == '18' #Andro [test_uninit_data.html] [test_webgl_available.html] #[test_webgl_color_buffer_float.html] # We haven't cleaned up the Try results yet, but let's get this on the books first. [test_webgl_conformance.html] skip-if = toolkit == 'android' #bug 865443- seperate suite - the non_conf* tests pass except for one on armv6 tests [test_webgl_compressed_texture_es3.html] [test_webgl_disjoint_timer_query.html] -fail-if = (os == 'win' && (os_version == '6.1' || os_version == '6.2')) +fail-if = (os == 'win' && (os_version == '6.1' || os_version == '6.2' || os_version == '10.0')) [test_webgl_force_enable.html] [test_webgl_request_context.html] skip-if = toolkit == 'android' #bug 865443- seperate suite - the non_conf* tests pass except for one on armv6 tests [test_webgl_request_mismatch.html] skip-if = toolkit == 'android' #bug 865443- seperate suite - the non_conf* tests pass except for one on armv6 tests [test_webgl2_not_exposed.html] skip-if = toolkit == 'android' #bug 865443- seperate suite - the non_conf* tests pass except for one on armv6 tests [test_webgl2_invalidate_framebuffer.html]
--- a/dom/html/HTMLMediaElement.cpp +++ b/dom/html/HTMLMediaElement.cpp @@ -4336,17 +4336,17 @@ void HTMLMediaElement::MetadataLoaded(co "Video resolution must be known on 'loadedmetadata'"); DispatchAsyncEvent(NS_LITERAL_STRING("loadedmetadata")); if (mDecoder && mDecoder->IsTransportSeekable() && mDecoder->IsMediaSeekable()) { ProcessMediaFragmentURI(); mDecoder->SetFragmentEndTime(mFragmentEnd); } if (mIsEncrypted) { if (!mMediaSource && Preferences::GetBool("media.eme.mse-only", true)) { - DecodeError(); + DecodeError(NS_ERROR_DOM_MEDIA_FATAL_ERR); return; } #ifdef MOZ_EME // Dispatch a distinct 'encrypted' event for each initData we have. for (const auto& initData : mPendingEncryptedInitData.mInitDatas) { DispatchEncrypted(initData.mInitData, initData.mType); } @@ -4411,17 +4411,17 @@ void HTMLMediaElement::FirstFrameLoaded( void HTMLMediaElement::NetworkError() { if (mDecoder) { ShutdownDecoder(); } Error(nsIDOMMediaError::MEDIA_ERR_NETWORK); } -void HTMLMediaElement::DecodeError() +void HTMLMediaElement::DecodeError(const MediaResult& aError) { nsAutoString src; GetCurrentSrc(src); const char16_t* params[] = { src.get() }; ReportLoadError("MediaLoadDecodeError", params, ArrayLength(params)); if (mDecoder) { ShutdownDecoder(); @@ -4435,45 +4435,50 @@ void HTMLMediaElement::DecodeError() mError = nullptr; if (mSourceLoadCandidate) { DispatchAsyncSourceError(mSourceLoadCandidate); QueueLoadFromSourceTask(); } else { NS_WARNING("Should know the source we were loading from!"); } } else { - Error(nsIDOMMediaError::MEDIA_ERR_DECODE); + Error(nsIDOMMediaError::MEDIA_ERR_DECODE, aError); } } bool HTMLMediaElement::HasError() const { return GetError(); } void HTMLMediaElement::LoadAborted() { Error(nsIDOMMediaError::MEDIA_ERR_ABORTED); } -void HTMLMediaElement::Error(uint16_t aErrorCode) +void HTMLMediaElement::Error(uint16_t aErrorCode, + const MediaResult& aErrorDetails) { NS_ASSERTION(aErrorCode == nsIDOMMediaError::MEDIA_ERR_DECODE || aErrorCode == nsIDOMMediaError::MEDIA_ERR_NETWORK || aErrorCode == nsIDOMMediaError::MEDIA_ERR_ABORTED, "Only use nsIDOMMediaError codes!"); // Since we have multiple paths calling into DecodeError, e.g. // MediaKeys::Terminated and EMEH264Decoder::Error. We should take the 1st // one only in order not to fire multiple 'error' events. if (mError) { return; } - - mError = new MediaError(this, aErrorCode); + nsCString message; + if (NS_FAILED(aErrorDetails)) { + message = aErrorDetails.Description(); + } + mError = new MediaError(this, aErrorCode, message); + DispatchAsyncEvent(NS_LITERAL_STRING("error")); if (mReadyState == nsIDOMHTMLMediaElement::HAVE_NOTHING) { ChangeNetworkState(nsIDOMHTMLMediaElement::NETWORK_EMPTY); DispatchAsyncEvent(NS_LITERAL_STRING("emptied")); } else { ChangeNetworkState(nsIDOMHTMLMediaElement::NETWORK_IDLE); } ChangeDelayLoadStatus(false);
--- a/dom/html/HTMLMediaElement.h +++ b/dom/html/HTMLMediaElement.h @@ -174,17 +174,17 @@ public: virtual void FirstFrameLoaded() final override; // Called by the video decoder object, on the main thread, // when the resource has a network error during loading. virtual void NetworkError() final override; // Called by the video decoder object, on the main thread, when the // resource has a decode error during metadata loading or decoding. - virtual void DecodeError() final override; + virtual void DecodeError(const MediaResult& aError) final override; // Return true if error attribute is not null. virtual bool HasError() const final override; // Called by the video decoder object, on the main thread, when the // resource load has been cancelled. virtual void LoadAborted() final override; @@ -1111,17 +1111,17 @@ protected: * Dispatches an error event to a child source element. */ void DispatchAsyncSourceError(nsIContent* aSourceElement); /** * Resets the media element for an error condition as per aErrorCode. * aErrorCode must be one of nsIDOMHTMLMediaError codes. */ - void Error(uint16_t aErrorCode); + void Error(uint16_t aErrorCode, const MediaResult& aErrorDetails = NS_OK); /** * Returns the URL spec of the currentSrc. **/ void GetCurrentSpec(nsCString& aString); /** * Process any media fragment entries in the URI
--- a/dom/html/MediaError.cpp +++ b/dom/html/MediaError.cpp @@ -16,30 +16,38 @@ NS_IMPL_CYCLE_COLLECTING_ADDREF(MediaErr NS_IMPL_CYCLE_COLLECTING_RELEASE(MediaError) NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(MediaError) NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY NS_INTERFACE_MAP_ENTRY(nsIDOMMediaError) NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIDOMMediaError) NS_INTERFACE_MAP_END -MediaError::MediaError(HTMLMediaElement* aParent, uint16_t aCode) +MediaError::MediaError(HTMLMediaElement* aParent, uint16_t aCode, + const nsACString& aMessage) : mParent(aParent) , mCode(aCode) + , mMessage(aMessage) { } NS_IMETHODIMP MediaError::GetCode(uint16_t* aCode) { if (aCode) *aCode = Code(); return NS_OK; } +NS_IMETHODIMP MediaError::GetMessage(nsAString& aResult) +{ + CopyUTF8toUTF16(mMessage, aResult); + return NS_OK; +} + JSObject* MediaError::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) { return MediaErrorBinding::Wrap(aCx, this, aGivenProto); } } // namespace dom } // namespace mozilla
--- a/dom/html/MediaError.h +++ b/dom/html/MediaError.h @@ -17,17 +17,18 @@ namespace mozilla { namespace dom { class MediaError final : public nsIDOMMediaError, public nsWrapperCache { ~MediaError() {} public: - MediaError(HTMLMediaElement* aParent, uint16_t aCode); + MediaError(HTMLMediaElement* aParent, uint16_t aCode, + const nsACString& aMessage = nsCString()); // nsISupports NS_DECL_CYCLE_COLLECTING_ISUPPORTS NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(MediaError) // nsIDOMMediaError NS_DECL_NSIDOMMEDIAERROR @@ -43,14 +44,16 @@ public: return mCode; } private: RefPtr<HTMLMediaElement> mParent; // Error code const uint16_t mCode; + // Error details; + const nsCString mMessage; }; } // namespace dom } // namespace mozilla #endif // mozilla_dom_MediaError_h
--- a/dom/interfaces/html/nsIDOMMediaError.idl +++ b/dom/interfaces/html/nsIDOMMediaError.idl @@ -7,21 +7,23 @@ [uuid(7bd8c29f-8a76-453f-9373-79f820f2dc01)] interface nsIDOMMediaError : nsISupports { /* The download of the media resource was aborted by the user agent at the user's requet */ const unsigned short MEDIA_ERR_ABORTED = 1; - /* A network error of some description caused the + /* A network error of some description caused the user agent to stop downloading the media resource */ const unsigned short MEDIA_ERR_NETWORK = 2; - /* An error of some description occurred while decoding + /* An error of some description occurred while decoding the media resource */ const unsigned short MEDIA_ERR_DECODE = 3; /* No suitable media resource could be found */ const unsigned short MEDIA_ERR_SRC_NOT_SUPPORTED = 4; readonly attribute unsigned short code; + + readonly attribute DOMString message; };
--- a/dom/ipc/ContentChild.cpp +++ b/dom/ipc/ContentChild.cpp @@ -1307,24 +1307,41 @@ StartMacOSContentSandbox() } nsAutoCString tempDirPath; rv = tempDir->GetNativePath(tempDirPath); if (NS_FAILED(rv)) { MOZ_CRASH("Failed to get NS_OS_TEMP_DIR path"); } + nsCOMPtr<nsIFile> profileDir; + ContentChild::GetSingleton()->GetProfileDir(getter_AddRefs(profileDir)); + nsCString profileDirPath; + if (profileDir) { + rv = profileDir->GetNativePath(profileDirPath); + if (NS_FAILED(rv) || profileDirPath.IsEmpty()) { + MOZ_CRASH("Failed to get profile path"); + } + } + MacSandboxInfo info; info.type = MacSandboxType_Content; info.level = info.level = sandboxLevel; info.appPath.assign(appPath.get()); info.appBinaryPath.assign(appBinaryPath.get()); info.appDir.assign(appDir.get()); info.appTempDir.assign(tempDirPath.get()); + if (profileDir) { + info.hasSandboxedProfile = true; + info.profileDir.assign(profileDirPath.get()); + } else { + info.hasSandboxedProfile = false; + } + std::string err; if (!mozilla::StartMacSandbox(info, err)) { NS_WARNING(err.c_str()); MOZ_CRASH("sandbox_init() failed"); } return true; }
--- a/dom/ipc/ContentChild.h +++ b/dom/ipc/ContentChild.h @@ -16,16 +16,19 @@ #include "nsHashKeys.h" #include "nsIObserver.h" #include "nsTHashtable.h" #include "nsRefPtrHashtable.h" #include "nsWeakPtr.h" #include "nsIWindowProvider.h" +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) +#include "nsIFile.h" +#endif struct ChromePackage; class nsIObserver; struct SubstitutionMapping; struct OverrideMapping; class nsIDomainPolicy; namespace mozilla { @@ -109,16 +112,29 @@ public: } void SetProcessName(const nsAString& aName, bool aDontOverride = false); void GetProcessName(nsAString& aName) const; void GetProcessName(nsACString& aName) const; +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) + void GetProfileDir(nsIFile** aProfileDir) const + { + *aProfileDir = mProfileDir; + NS_IF_ADDREF(*aProfileDir); + } + + void SetProfileDir(nsIFile* aProfileDir) + { + mProfileDir = aProfileDir; + } +#endif + bool IsAlive() const; bool IsShuttingDown() const; static void AppendProcessId(nsACString& aName); ContentBridgeParent* GetLastBridge() { @@ -676,16 +692,20 @@ private: bool mIsAlive; nsString mProcessName; static ContentChild* sSingleton; nsCOMPtr<nsIDomainPolicy> mPolicy; nsCOMPtr<nsITimer> mForceKillTimer; +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) + nsCOMPtr<nsIFile> mProfileDir; +#endif + // Hashtable to keep track of the pending GetFilesHelper objects. // This GetFilesHelperChild objects are removed when RecvGetFilesResponse is // received. nsRefPtrHashtable<nsIDHashKey, GetFilesHelperChild> mGetFilesPendingRequests; bool mShuttingDown; DISALLOW_EVIL_CONSTRUCTORS(ContentChild);
--- a/dom/ipc/ContentProcess.cpp +++ b/dom/ipc/ContentProcess.cpp @@ -109,26 +109,45 @@ SetUpSandboxEnvironment() #endif void ContentProcess::SetAppDir(const nsACString& aPath) { mXREEmbed.SetAppDir(aPath); } +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) +void +ContentProcess::SetProfile(const nsACString& aProfile) +{ + bool flag; + nsresult rv = + XRE_GetFileFromPath(aProfile.BeginReading(), getter_AddRefs(mProfileDir)); + if (NS_FAILED(rv) || + NS_FAILED(mProfileDir->Exists(&flag)) || !flag) { + NS_WARNING("Invalid profile directory passed to content process."); + mProfileDir = nullptr; + } +} +#endif + bool ContentProcess::Init() { mContent.Init(IOThreadChild::message_loop(), ParentPid(), IOThreadChild::channel()); mXREEmbed.Start(); mContent.InitXPCOM(); mContent.InitGraphicsDeviceData(); +#if (defined(XP_MACOSX)) && defined(MOZ_CONTENT_SANDBOX) + mContent.SetProfileDir(mProfileDir); +#endif + #if (defined(XP_WIN) || defined(XP_MACOSX)) && defined(MOZ_CONTENT_SANDBOX) SetUpSandboxEnvironment(); #endif return true; } // Note: CleanUp() never gets called in non-debug builds because we exit early
--- a/dom/ipc/ContentProcess.h +++ b/dom/ipc/ContentProcess.h @@ -34,19 +34,28 @@ public: ~ContentProcess() { } virtual bool Init() override; virtual void CleanUp() override; void SetAppDir(const nsACString& aPath); +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) + void SetProfile(const nsACString& aProfile); +#endif + private: ContentChild mContent; mozilla::ipc::ScopedXREEmbed mXREEmbed; + +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) + nsCOMPtr<nsIFile> mProfileDir; +#endif + #if defined(XP_WIN) // This object initializes and configures COM. mozilla::mscom::MainThreadRuntime mCOMRuntime; #endif DISALLOW_EVIL_CONSTRUCTORS(ContentProcess); };
--- a/dom/media/ADTSDemuxer.cpp +++ b/dom/media/ADTSDemuxer.cpp @@ -313,17 +313,17 @@ ADTSDemuxer::InitInternal() RefPtr<ADTSDemuxer::InitPromise> ADTSDemuxer::Init() { if (!InitInternal()) { ADTSLOG("Init() failure: waiting for data"); return InitPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } ADTSLOG("Init() successful"); return InitPromise::CreateAndResolve(NS_OK, __func__); } bool ADTSDemuxer::HasTrackType(TrackInfo::TrackType aType) const @@ -510,20 +510,17 @@ RefPtr<ADTSTrackDemuxer::SamplesPromise> ADTSTrackDemuxer::GetSamples(int32_t aNumSamples) { ADTSLOGV("GetSamples(%d) Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 " mSamplesPerFrame=%d " "mSamplesPerSecond=%d mChannels=%d", aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels); - if (!aNumSamples) { - return SamplesPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); - } + MOZ_ASSERT(aNumSamples); RefPtr<SamplesHolder> frames = new SamplesHolder(); while (aNumSamples--) { RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame())); if (!frame) break; @@ -535,17 +532,17 @@ ADTSTrackDemuxer::GetSamples(int32_t aNu " mTotalFrameLen=%" PRIu64 " mSamplesPerFrame=%d mSamplesPerSecond=%d " "mChannels=%d", frames->mSamples.Length(), aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels); if (frames->mSamples.IsEmpty()) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::END_OF_STREAM, __func__); + NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } return SamplesPromise::CreateAndResolve(frames, __func__); } void ADTSTrackDemuxer::Reset() { @@ -557,17 +554,17 @@ ADTSTrackDemuxer::Reset() FastSeek(media::TimeUnit()); } RefPtr<ADTSTrackDemuxer::SkipAccessPointPromise> ADTSTrackDemuxer::SkipToNextRandomAccessPoint(media::TimeUnit aTimeThreshold) { // Will not be called for audio-only resources. return SkipAccessPointPromise::CreateAndReject( - SkipFailureHolder(DemuxerFailureReason::DEMUXER_ERROR, 0), __func__); + SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__); } int64_t ADTSTrackDemuxer::GetResourceOffset() const { return mOffset; }
--- a/dom/media/AccurateSeekTask.cpp +++ b/dom/media/AccurateSeekTask.cpp @@ -56,17 +56,17 @@ AccurateSeekTask::~AccurateSeekTask() } void AccurateSeekTask::Discard() { AssertOwnerThread(); // Disconnect MDSM. - RejectIfExist(__func__); + RejectIfExist(NS_ERROR_DOM_MEDIA_CANCELED, __func__); // Disconnect MediaDecoderReaderWrapper. mSeekRequest.DisconnectIfExists(); CancelCallbacks(); mIsDiscarded = true; } @@ -115,17 +115,17 @@ AccurateSeekTask::DropAudioUpToSeekTarge { AssertOwnerThread(); RefPtr<AudioData> audio(aSample->As<AudioData>()); MOZ_ASSERT(audio && mTarget.IsAccurate()); CheckedInt64 sampleDuration = FramesToUsecs(audio->mFrames, mAudioRate); if (!sampleDuration.isValid()) { - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } if (audio->mTime + sampleDuration.value() <= mTarget.GetTime().ToMicroseconds()) { // Our seek target lies after the frames in this AudioData. Don't // push it onto the audio queue, and keep decoding forwards. return NS_OK; } @@ -149,17 +149,17 @@ AccurateSeekTask::DropAudioUpToSeekTarge NS_ASSERTION(mTarget.GetTime().ToMicroseconds() >= audio->mTime, "Target must at or be after data start."); NS_ASSERTION(mTarget.GetTime().ToMicroseconds() < audio->mTime + sampleDuration.value(), "Data must end after target."); CheckedInt64 framesToPrune = UsecsToFrames(mTarget.GetTime().ToMicroseconds() - audio->mTime, mAudioRate); if (!framesToPrune.isValid()) { - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } if (framesToPrune.value() > audio->mFrames) { // We've messed up somehow. Don't try to trim frames, the |frames| // variable below will overflow. DECODER_WARN("Can't prune more frames that we have!"); return NS_ERROR_FAILURE; } uint32_t frames = audio->mFrames - static_cast<uint32_t>(framesToPrune.value()); @@ -169,17 +169,17 @@ AccurateSeekTask::DropAudioUpToSeekTarge return NS_ERROR_OUT_OF_MEMORY; } memcpy(audioData.get(), audio->mAudioData.get() + (framesToPrune.value() * channels), frames * channels * sizeof(AudioDataValue)); CheckedInt64 duration = FramesToUsecs(frames, mAudioRate); if (!duration.isValid()) { - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } RefPtr<AudioData> data(new AudioData(audio->mOffset, mTarget.GetTime().ToMicroseconds(), duration.value(), frames, Move(audioData), channels, audio->mRate)); @@ -255,17 +255,17 @@ AccurateSeekTask::OnSeekResolved(media:: void AccurateSeekTask::OnSeekRejected(nsresult aResult) { AssertOwnerThread(); mSeekRequest.Complete(); MOZ_ASSERT(NS_FAILED(aResult), "Cancels should also disconnect mSeekRequest"); - RejectIfExist(__func__); + RejectIfExist(aResult, __func__); } void AccurateSeekTask::AdjustFastSeekIfNeeded(MediaData* aSample) { AssertOwnerThread(); if (mTarget.IsFast() && mTarget.GetTime() > mCurrentTimeBeforeSeek && @@ -303,81 +303,82 @@ AccurateSeekTask::OnAudioDecoded(MediaDa } AdjustFastSeekIfNeeded(audio); if (mTarget.IsFast()) { // Non-precise seek; we can stop the seek at the first sample. mSeekedAudioData = audio; mDoneAudioSeeking = true; - } else if (NS_FAILED(DropAudioUpToSeekTarget(audio))) { - CancelCallbacks(); - RejectIfExist(__func__); - return; + } else { + nsresult rv = DropAudioUpToSeekTarget(audio); + if (NS_FAILED(rv)) { + CancelCallbacks(); + RejectIfExist(rv, __func__); + return; + } } if (!mDoneAudioSeeking) { RequestAudioData(); return; } MaybeFinishSeek(); } void AccurateSeekTask::OnNotDecoded(MediaData::Type aType, - MediaDecoderReader::NotDecodedReason aReason) + const MediaResult& aError) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); - SAMPLE_LOG("OnNotDecoded type=%d reason=%u", aType, aReason); + SAMPLE_LOG("OnNotDecoded type=%d reason=%u", aType, aError.Code()); // Ignore pending requests from video-only seek. if (aType == MediaData::AUDIO_DATA && mTarget.IsVideoOnly()) { return; } - if (aReason == MediaDecoderReader::DECODE_ERROR) { - // If this is a decode error, delegate to the generic error path. - CancelCallbacks(); - RejectIfExist(__func__); - return; - } - // If the decoder is waiting for data, we tell it to call us back when the // data arrives. - if (aReason == MediaDecoderReader::WAITING_FOR_DATA) { + if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { mReader->WaitForData(aType); return; } - if (aReason == MediaDecoderReader::CANCELED) { + if (aError == NS_ERROR_DOM_MEDIA_CANCELED) { if (aType == MediaData::AUDIO_DATA) { RequestAudioData(); } else { RequestVideoData(); } return; } - if (aReason == MediaDecoderReader::END_OF_STREAM) { + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { if (aType == MediaData::AUDIO_DATA) { mIsAudioQueueFinished = true; mDoneAudioSeeking = true; } else { mIsVideoQueueFinished = true; mDoneVideoSeeking = true; if (mFirstVideoFrameAfterSeek) { // Hit the end of stream. Move mFirstVideoFrameAfterSeek into // mSeekedVideoData so we have something to display after seeking. mSeekedVideoData = mFirstVideoFrameAfterSeek.forget(); } } MaybeFinishSeek(); + return; } + + // This is a decode error, delegate to the generic error path. + CancelCallbacks(); + RejectIfExist(aError, __func__); } void AccurateSeekTask::OnVideoDecoded(MediaData* aVideoSample) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); @@ -390,20 +391,23 @@ AccurateSeekTask::OnVideoDecoded(MediaDa SAMPLE_LOG("OnVideoDecoded [%lld,%lld]", video->mTime, video->GetEndTime()); AdjustFastSeekIfNeeded(video); if (mTarget.IsFast()) { // Non-precise seek. We can stop the seek at the first sample. mSeekedVideoData = video; mDoneVideoSeeking = true; - } else if (NS_FAILED(DropVideoUpToSeekTarget(video.get()))) { - CancelCallbacks(); - RejectIfExist(__func__); - return; + } else { + nsresult rv = DropVideoUpToSeekTarget(video.get()); + if (NS_FAILED(rv)) { + CancelCallbacks(); + RejectIfExist(rv, __func__); + return; + } } if (!mDoneVideoSeeking) { RequestVideoData(); return; } MaybeFinishSeek(); } @@ -414,28 +418,28 @@ AccurateSeekTask::SetCallbacks() AssertOwnerThread(); mAudioCallback = mReader->AudioCallback().Connect( OwnerThread(), [this] (AudioCallbackData aData) { if (aData.is<MediaData*>()) { OnAudioDecoded(aData.as<MediaData*>()); } else { OnNotDecoded(MediaData::AUDIO_DATA, - aData.as<MediaDecoderReader::NotDecodedReason>()); + aData.as<MediaResult>()); } }); mVideoCallback = mReader->VideoCallback().Connect( OwnerThread(), [this] (VideoCallbackData aData) { typedef Tuple<MediaData*, TimeStamp> Type; if (aData.is<Type>()) { OnVideoDecoded(Get<0>(aData.as<Type>())); } else { OnNotDecoded(MediaData::VIDEO_DATA, - aData.as<MediaDecoderReader::NotDecodedReason>()); + aData.as<MediaResult>()); } }); mAudioWaitCallback = mReader->AudioWaitCallback().Connect( OwnerThread(), [this] (WaitCallbackData aData) { // Ignore pending requests from video-only seek. if (mTarget.IsVideoOnly()) { return;
--- a/dom/media/AccurateSeekTask.h +++ b/dom/media/AccurateSeekTask.h @@ -45,17 +45,17 @@ private: void OnSeekResolved(media::TimeUnit); void OnSeekRejected(nsresult aResult); void OnAudioDecoded(MediaData* aAudioSample); void OnVideoDecoded(MediaData* aVideoSample); - void OnNotDecoded(MediaData::Type, MediaDecoderReader::NotDecodedReason); + void OnNotDecoded(MediaData::Type, const MediaResult&); void SetCallbacks(); void CancelCallbacks(); void AdjustFastSeekIfNeeded(MediaData* aSample); /*
--- a/dom/media/Benchmark.cpp +++ b/dom/media/Benchmark.cpp @@ -164,17 +164,17 @@ BenchmarkPlayback::DemuxSamples() mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0); if (!mTrackDemuxer) { MainThreadShutdown(); return; } DemuxNextSample(); }, - [this, ref](DemuxerFailureReason aReason) { MainThreadShutdown(); }); + [this, ref](const MediaResult& aError) { MainThreadShutdown(); }); } void BenchmarkPlayback::DemuxNextSample() { MOZ_ASSERT(OnThread()); RefPtr<Benchmark> ref(mMainThreadState); @@ -185,19 +185,19 @@ BenchmarkPlayback::DemuxNextSample() mSamples.AppendElements(Move(aHolder->mSamples)); if (ref->mParameters.mStopAtFrame && mSamples.Length() == (size_t)ref->mParameters.mStopAtFrame.ref()) { InitDecoder(Move(*mTrackDemuxer->GetInfo())); } else { Dispatch(NS_NewRunnableFunction([this, ref]() { DemuxNextSample(); })); } }, - [this, ref](DemuxerFailureReason aReason) { - switch (aReason) { - case DemuxerFailureReason::END_OF_STREAM: + [this, ref](const MediaResult& aError) { + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: InitDecoder(Move(*mTrackDemuxer->GetInfo())); break; default: MainThreadShutdown(); } }); } @@ -213,17 +213,17 @@ BenchmarkPlayback::InitDecoder(TrackInfo return; } RefPtr<Benchmark> ref(mMainThreadState); mDecoder->Init()->Then( Thread(), __func__, [this, ref](TrackInfo::TrackType aTrackType) { InputExhausted(); }, - [this, ref](MediaDataDecoder::DecoderFailureReason aReason) { + [this, ref](MediaResult aError) { MainThreadShutdown(); }); } void BenchmarkPlayback::MainThreadShutdown() { MOZ_ASSERT(OnThread()); @@ -276,17 +276,17 @@ BenchmarkPlayback::Output(MediaData* aDa ref->Dispatch(NS_NewRunnableFunction([ref, decodeFps]() { ref->ReturnResult(decodeFps); })); } })); } void -BenchmarkPlayback::Error(MediaDataDecoderError aError) +BenchmarkPlayback::Error(const MediaResult& aError) { RefPtr<Benchmark> ref(mMainThreadState); Dispatch(NS_NewRunnableFunction([this, ref]() { MainThreadShutdown(); })); } void BenchmarkPlayback::InputExhausted() {
--- a/dom/media/Benchmark.h +++ b/dom/media/Benchmark.h @@ -27,17 +27,17 @@ class BenchmarkPlayback : public QueueOb void DemuxSamples(); void DemuxNextSample(); void MainThreadShutdown(); void InitDecoder(TrackInfo&& aInfo); // MediaDataDecoderCallback // Those methods are called on the MediaDataDecoder's task queue. void Output(MediaData* aData) override; - void Error(MediaDataDecoderError aError) override; + void Error(const MediaResult& aError) override; void InputExhausted() override; void DrainComplete() override; bool OnReaderTaskQueue() override; Atomic<Benchmark*> mMainThreadState; RefPtr<TaskQueue> mDecoderTaskQueue; RefPtr<MediaDataDecoder> mDecoder;
--- a/dom/media/MP3Demuxer.cpp +++ b/dom/media/MP3Demuxer.cpp @@ -50,17 +50,17 @@ MP3Demuxer::InitInternal() { } RefPtr<MP3Demuxer::InitPromise> MP3Demuxer::Init() { if (!InitInternal()) { MP3LOG("MP3Demuxer::Init() failure: waiting for data"); return InitPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } MP3LOG("MP3Demuxer::Init() successful"); return InitPromise::CreateAndResolve(NS_OK, __func__); } bool MP3Demuxer::HasTrackType(TrackInfo::TrackType aType) const { @@ -271,17 +271,17 @@ MP3TrackDemuxer::GetSamples(int32_t aNum MP3LOGV("GetSamples(%d) Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64 " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64 " mSamplesPerFrame=%d " "mSamplesPerSecond=%d mChannels=%d", aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels); if (!aNumSamples) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } RefPtr<SamplesHolder> frames = new SamplesHolder(); while (aNumSamples--) { RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame())); if (!frame) { break; @@ -295,34 +295,34 @@ MP3TrackDemuxer::GetSamples(int32_t aNum " mTotalFrameLen=%" PRIu64 " mSamplesPerFrame=%d mSamplesPerSecond=%d " "mChannels=%d", frames->mSamples.Length(), aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels); if (frames->mSamples.IsEmpty()) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::END_OF_STREAM, __func__); + NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } return SamplesPromise::CreateAndResolve(frames, __func__); } void MP3TrackDemuxer::Reset() { MP3LOG("Reset()"); FastSeek(TimeUnit()); mParser.Reset(); } RefPtr<MP3TrackDemuxer::SkipAccessPointPromise> MP3TrackDemuxer::SkipToNextRandomAccessPoint(TimeUnit aTimeThreshold) { // Will not be called for audio-only resources. return SkipAccessPointPromise::CreateAndReject( - SkipFailureHolder(DemuxerFailureReason::DEMUXER_ERROR, 0), __func__); + SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__); } int64_t MP3TrackDemuxer::GetResourceOffset() const { return mOffset; } TimeIntervals
--- a/dom/media/MediaDataDemuxer.h +++ b/dom/media/MediaDataDemuxer.h @@ -7,46 +7,37 @@ #if !defined(MediaDataDemuxer_h) #define MediaDataDemuxer_h #include "mozilla/MozPromise.h" #include "mozilla/UniquePtr.h" #include "MediaData.h" #include "MediaInfo.h" +#include "MediaResult.h" #include "TimeUnits.h" #include "nsISupportsImpl.h" #include "mozilla/RefPtr.h" #include "nsTArray.h" namespace mozilla { class MediaTrackDemuxer; class TrackMetadataHolder; -enum class DemuxerFailureReason : int8_t -{ - WAITING_FOR_DATA, - END_OF_STREAM, - DEMUXER_ERROR, - CANCELED, - SHUTDOWN, -}; - - // Allows reading the media data: to retrieve the metadata and demux samples. // MediaDataDemuxer isn't designed to be thread safe. // When used by the MediaFormatDecoder, care is taken to ensure that the demuxer // will never be called from more than one thread at once. class MediaDataDemuxer { public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDemuxer) - typedef MozPromise<nsresult, DemuxerFailureReason, /* IsExclusive = */ true> InitPromise; + typedef MozPromise<nsresult, MediaResult, /* IsExclusive = */ true> InitPromise; // Initializes the demuxer. Other methods cannot be called unless // initialization has completed and succeeded. // Typically a demuxer will wait to parse the metadata before resolving the // promise. The promise must not be resolved until sufficient data is // supplied. For example, an incomplete metadata would cause the promise to be // rejected should no more data be coming, while the demuxer would wait // otherwise. @@ -115,26 +106,26 @@ public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesHolder) nsTArray<RefPtr<MediaRawData>> mSamples; private: ~SamplesHolder() {} }; class SkipFailureHolder { public: - SkipFailureHolder(DemuxerFailureReason aFailure, uint32_t aSkipped) + SkipFailureHolder(const MediaResult& aFailure, uint32_t aSkipped) : mFailure(aFailure) , mSkipped(aSkipped) {} - DemuxerFailureReason mFailure; + MediaResult mFailure; uint32_t mSkipped; }; - typedef MozPromise<media::TimeUnit, DemuxerFailureReason, /* IsExclusive = */ true> SeekPromise; - typedef MozPromise<RefPtr<SamplesHolder>, DemuxerFailureReason, /* IsExclusive = */ true> SamplesPromise; + typedef MozPromise<media::TimeUnit, MediaResult, /* IsExclusive = */ true> SeekPromise; + typedef MozPromise<RefPtr<SamplesHolder>, MediaResult, /* IsExclusive = */ true> SamplesPromise; typedef MozPromise<uint32_t, SkipFailureHolder, /* IsExclusive = */ true> SkipAccessPointPromise; // Returns the TrackInfo (a.k.a Track Description) for this track. // The TrackInfo returned will be: // TrackInfo::kVideoTrack -> VideoInfo. // TrackInfo::kAudioTrack -> AudioInfo. // respectively. virtual UniquePtr<TrackInfo> GetInfo() const = 0;
--- a/dom/media/MediaDecoder.cpp +++ b/dom/media/MediaDecoder.cpp @@ -193,17 +193,17 @@ MediaDecoder::ResourceCallback::NotifyNe } void MediaDecoder::ResourceCallback::NotifyDecodeError() { RefPtr<ResourceCallback> self = this; nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([=] () { if (self->mDecoder) { - self->mDecoder->DecodeError(); + self->mDecoder->DecodeError(NS_ERROR_DOM_MEDIA_FATAL_ERR); } }); AbstractThread::MainThread()->Dispatch(r.forget()); } /* static */ void MediaDecoder::ResourceCallback::TimerCallback(nsITimer* aTimer, void* aClosure) { @@ -602,16 +602,17 @@ MediaDecoder::Shutdown() // This changes the decoder state to SHUTDOWN and does other things // necessary to unblock the state machine thread if it's blocked, so // the asynchronous shutdown in nsDestroyStateMachine won't deadlock. if (mDecoderStateMachine) { mTimedMetadataListener.Disconnect(); mMetadataLoadedListener.Disconnect(); mFirstFrameLoadedListener.Disconnect(); mOnPlaybackEvent.Disconnect(); + mOnPlaybackErrorEvent.Disconnect(); mOnMediaNotSeekable.Disconnect(); mDecoderStateMachine->BeginShutdown() ->Then(AbstractThread::MainThread(), __func__, this, &MediaDecoder::FinishShutdown, &MediaDecoder::FinishShutdown); } else { // Ensure we always unregister asynchronously in order not to disrupt @@ -656,32 +657,35 @@ MediaDecoder::OnPlaybackEvent(MediaEvent ComputePlaybackRate(); break; case MediaEventType::PlaybackEnded: PlaybackEnded(); break; case MediaEventType::SeekStarted: SeekingStarted(); break; - case MediaEventType::DecodeError: - DecodeError(); - break; case MediaEventType::Invalidate: Invalidate(); break; case MediaEventType::EnterVideoSuspend: mOwner->DispatchAsyncEvent(NS_LITERAL_STRING("mozentervideosuspend")); break; case MediaEventType::ExitVideoSuspend: mOwner->DispatchAsyncEvent(NS_LITERAL_STRING("mozexitvideosuspend")); break; } } void +MediaDecoder::OnPlaybackErrorEvent(const MediaResult& aError) +{ + DecodeError(aError); +} + +void MediaDecoder::FinishShutdown() { MOZ_ASSERT(NS_IsMainThread()); mDecoderStateMachine->BreakCycles(); SetStateMachine(nullptr); mVideoFrameContainer = nullptr; MediaShutdownManager::Instance().Unregister(this); } @@ -744,16 +748,18 @@ MediaDecoder::SetStateMachineParameters( AbstractThread::MainThread(), this, &MediaDecoder::OnMetadataUpdate); mMetadataLoadedListener = mDecoderStateMachine->MetadataLoadedEvent().Connect( AbstractThread::MainThread(), this, &MediaDecoder::MetadataLoaded); mFirstFrameLoadedListener = mDecoderStateMachine->FirstFrameLoadedEvent().Connect( AbstractThread::MainThread(), this, &MediaDecoder::FirstFrameLoaded); mOnPlaybackEvent = mDecoderStateMachine->OnPlaybackEvent().Connect( AbstractThread::MainThread(), this, &MediaDecoder::OnPlaybackEvent); + mOnPlaybackErrorEvent = mDecoderStateMachine->OnPlaybackErrorEvent().Connect( + AbstractThread::MainThread(), this, &MediaDecoder::OnPlaybackErrorEvent); mOnMediaNotSeekable = mDecoderStateMachine->OnMediaNotSeekable().Connect( AbstractThread::MainThread(), this, &MediaDecoder::OnMediaNotSeekable); } void MediaDecoder::SetMinimizePrerollUntilPlaybackStarts() { MOZ_ASSERT(NS_IsMainThread()); @@ -1001,21 +1007,21 @@ MediaDecoder::NetworkError() { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(!IsShutdown()); mOwner->NetworkError(); MOZ_ASSERT(IsShutdown()); } void -MediaDecoder::DecodeError() +MediaDecoder::DecodeError(const MediaResult& aError) { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(!IsShutdown()); - mOwner->DecodeError(); + mOwner->DecodeError(aError); MOZ_ASSERT(IsShutdown()); } void MediaDecoder::UpdateSameOriginStatus(bool aSameOrigin) { MOZ_ASSERT(NS_IsMainThread()); mSameOriginMedia = aSameOrigin;
--- a/dom/media/MediaDecoder.h +++ b/dom/media/MediaDecoder.h @@ -431,17 +431,17 @@ private: UpdateLogicalPositionInternal(MediaDecoderEventVisibility::Observable); } // Find the end of the cached data starting at the current decoder // position. int64_t GetDownloadPosition(); // Notifies the element that decoding has failed. - void DecodeError(); + void DecodeError(const MediaResult& aError); // Indicate whether the media is same-origin with the element. void UpdateSameOriginStatus(bool aSameOrigin); MediaDecoderOwner* GetOwner() const override; #ifdef MOZ_EME typedef MozPromise<RefPtr<CDMProxy>, bool /* aIgnored */, /* IsExclusive = */ true> CDMProxyPromise; @@ -587,16 +587,17 @@ private: void MetadataLoaded(nsAutoPtr<MediaInfo> aInfo, nsAutoPtr<MetadataTags> aTags, MediaDecoderEventVisibility aEventVisibility); MediaEventSource<void>* DataArrivedEvent() override { return &mDataArrivedEvent; } void OnPlaybackEvent(MediaEventType aEvent); + void OnPlaybackErrorEvent(const MediaResult& aError); void OnMediaNotSeekable() { SetMediaSeekable(false); } void FinishShutdown(); @@ -726,16 +727,17 @@ protected: // A listener to receive metadata updates from MDSM. MediaEventListener mTimedMetadataListener; MediaEventListener mMetadataLoadedListener; MediaEventListener mFirstFrameLoadedListener; MediaEventListener mOnPlaybackEvent; + MediaEventListener mOnPlaybackErrorEvent; MediaEventListener mOnMediaNotSeekable; protected: // Whether the state machine is shut down. Mirror<bool> mStateMachineIsShutdown; // Buffered range, mirrored from the reader. Mirror<media::TimeIntervals> mBuffered;
--- a/dom/media/MediaDecoderOwner.h +++ b/dom/media/MediaDecoderOwner.h @@ -6,16 +6,17 @@ #ifndef MediaDecoderOwner_h_ #define MediaDecoderOwner_h_ #include "AbstractMediaDecoder.h" #include "nsAutoPtr.h" namespace mozilla { class VideoFrameContainer; +class MediaResult; namespace dom { class HTMLMediaElement; } // namespace dom class MediaDecoderOwner { public: @@ -62,17 +63,17 @@ public: // The decoder owner should call Shutdown() on the decoder and drop the // reference to the decoder to prevent further calls into the decoder. virtual void NetworkError() = 0; // Called by the decoder object, on the main thread, when the // resource has a decode error during metadata loading or decoding. // The decoder owner should call Shutdown() on the decoder and drop the // reference to the decoder to prevent further calls into the decoder. - virtual void DecodeError() = 0; + virtual void DecodeError(const MediaResult& aError) = 0; // Return true if media element error attribute is not null. virtual bool HasError() const = 0; // Called by the video decoder object, on the main thread, when the // resource load has been cancelled. virtual void LoadAborted() = 0;
--- a/dom/media/MediaDecoderReader.cpp +++ b/dom/media/MediaDecoderReader.cpp @@ -284,22 +284,22 @@ size_t MediaDecoderReader::SizeOfAudioQu { return mAudioQueue.GetSize(); } nsresult MediaDecoderReader::ResetDecode(TrackSet aTracks) { if (aTracks.contains(TrackInfo::kVideoTrack)) { VideoQueue().Reset(); - mBaseVideoPromise.RejectIfExists(CANCELED, __func__); + mBaseVideoPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } if (aTracks.contains(TrackInfo::kAudioTrack)) { AudioQueue().Reset(); - mBaseAudioPromise.RejectIfExists(CANCELED, __func__); + mBaseAudioPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } return NS_OK; } RefPtr<MediaDecoderReader::MediaDataPromise> MediaDecoderReader::DecodeToFirstVideoData() { @@ -318,17 +318,17 @@ MediaDecoderReader::DecodeToFirstVideoDa return true; }, [self] () -> bool { MOZ_ASSERT(self->OnTaskQueue()); return self->VideoQueue().GetSize(); })->Then(OwnerThread(), __func__, [self, p] () { p->Resolve(self->VideoQueue().PeekFront(), __func__); }, [p] () { // We don't have a way to differentiate EOS, error, and shutdown here. :-( - p->Reject(END_OF_STREAM, __func__); + p->Reject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); }); return p.forget(); } void MediaDecoderReader::UpdateBuffered() { @@ -355,31 +355,29 @@ MediaDecoderReader::GetBuffered() } return GetEstimatedBufferedTimeRanges(stream, mDuration.Ref().ref().ToMicroseconds()); } RefPtr<MediaDecoderReader::MetadataPromise> MediaDecoderReader::AsyncReadMetadata() { - typedef ReadMetadataFailureReason Reason; - MOZ_ASSERT(OnTaskQueue()); DECODER_LOG("MediaDecoderReader::AsyncReadMetadata"); // Attempt to read the metadata. RefPtr<MetadataHolder> metadata = new MetadataHolder(); nsresult rv = ReadMetadata(&metadata->mInfo, getter_Transfers(metadata->mTags)); metadata->mInfo.AssertValid(); // We're not waiting for anything. If we didn't get the metadata, that's an // error. if (NS_FAILED(rv) || !metadata->mInfo.HasValidMedia()) { DECODER_WARN("ReadMetadata failed, rv=%x HasValidMedia=%d", rv, metadata->mInfo.HasValidMedia()); - return MetadataPromise::CreateAndReject(Reason::METADATA_ERROR, __func__); + return MetadataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } // Success! return MetadataPromise::CreateAndResolve(metadata, __func__); } class ReRequestVideoWithSkipTask : public Runnable { @@ -451,17 +449,17 @@ MediaDecoderReader::RequestVideoData(boo mTaskQueue->Dispatch(task.forget()); return p; } } if (VideoQueue().GetSize() > 0) { RefPtr<VideoData> v = VideoQueue().PopFront(); mBaseVideoPromise.Resolve(v, __func__); } else if (VideoQueue().IsFinished()) { - mBaseVideoPromise.Reject(END_OF_STREAM, __func__); + mBaseVideoPromise.Reject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } else { MOZ_ASSERT(false, "Dropping this promise on the floor"); } return p; } RefPtr<MediaDecoderReader::MediaDataPromise> @@ -483,33 +481,35 @@ MediaDecoderReader::RequestAudioData() mTaskQueue->Dispatch(task.forget()); return p; } } if (AudioQueue().GetSize() > 0) { RefPtr<AudioData> a = AudioQueue().PopFront(); mBaseAudioPromise.Resolve(a, __func__); } else if (AudioQueue().IsFinished()) { - mBaseAudioPromise.Reject(mHitAudioDecodeError ? DECODE_ERROR : END_OF_STREAM, __func__); + mBaseAudioPromise.Reject(mHitAudioDecodeError + ? NS_ERROR_DOM_MEDIA_FATAL_ERR + : NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); mHitAudioDecodeError = false; } else { MOZ_ASSERT(false, "Dropping this promise on the floor"); } return p; } RefPtr<ShutdownPromise> MediaDecoderReader::Shutdown() { MOZ_ASSERT(OnTaskQueue()); mShutdown = true; - mBaseAudioPromise.RejectIfExists(END_OF_STREAM, __func__); - mBaseVideoPromise.RejectIfExists(END_OF_STREAM, __func__); + mBaseAudioPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); + mBaseVideoPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); mDataArrivedListener.DisconnectIfExists(); ReleaseResources(); mDuration.DisconnectIfConnected(); mBuffered.DisconnectAll(); mIsSuspended.DisconnectAll();
--- a/dom/media/MediaDecoderReader.h +++ b/dom/media/MediaDecoderReader.h @@ -8,16 +8,17 @@ #include "mozilla/EnumSet.h" #include "mozilla/MozPromise.h" #include "nsAutoPtr.h" #include "AbstractMediaDecoder.h" #include "MediaInfo.h" #include "MediaData.h" +#include "MediaResult.h" #include "MediaMetadataManager.h" #include "MediaQueue.h" #include "MediaTimer.h" #include "AudioCompactor.h" #include "Intervals.h" #include "TimeUnits.h" #include "SeekTarget.h" @@ -45,47 +46,35 @@ public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MetadataHolder) MediaInfo mInfo; nsAutoPtr<MetadataTags> mTags; private: virtual ~MetadataHolder() {} }; -enum class ReadMetadataFailureReason : int8_t -{ - METADATA_ERROR -}; - // Encapsulates the decoding and reading of media data. Reading can either // synchronous and done on the calling "decode" thread, or asynchronous and // performed on a background thread, with the result being returned by // callback. Never hold the decoder monitor when calling into this class. // Unless otherwise specified, methods and fields of this class can only // be accessed on the decode task queue. class MediaDecoderReader { friend class ReRequestVideoWithSkipTask; friend class ReRequestAudioTask; static const bool IsExclusive = true; public: - enum NotDecodedReason { - END_OF_STREAM, - DECODE_ERROR, - WAITING_FOR_DATA, - CANCELED - }; - using TrackSet = EnumSet<TrackInfo::TrackType>; using MetadataPromise = - MozPromise<RefPtr<MetadataHolder>, ReadMetadataFailureReason, IsExclusive>; + MozPromise<RefPtr<MetadataHolder>, MediaResult, IsExclusive>; using MediaDataPromise = - MozPromise<RefPtr<MediaData>, NotDecodedReason, IsExclusive>; + MozPromise<RefPtr<MediaData>, MediaResult, IsExclusive>; using SeekPromise = MozPromise<media::TimeUnit, nsresult, IsExclusive>; // Note that, conceptually, WaitForData makes sense in a non-exclusive sense. // But in the current architecture it's only ever used exclusively (by MDSM), // so we mark it that way to verify our assumptions. If you have a use-case // for multiple WaitForData consumers, feel free to flip the exclusivity here. using WaitForDataPromise = MozPromise<MediaData::Type, WaitForDataRejectValue, IsExclusive>;
--- a/dom/media/MediaDecoderReaderWrapper.cpp +++ b/dom/media/MediaDecoderReaderWrapper.cpp @@ -71,32 +71,32 @@ public: RefPtr<StartTimeRendezvous> self = this; AwaitStartTime()->Then( mOwnerThread, __func__, [p, data, self] () { MOZ_ASSERT(self->mOwnerThread->IsCurrentThreadIn()); p->Resolve(data, __func__); }, [p] () { - p->Reject(MediaDecoderReader::CANCELED, __func__); + p->Reject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); }); return p.forget(); } template<MediaData::Type SampleType> - void FirstSampleRejected(MediaDecoderReader::NotDecodedReason aReason) + void FirstSampleRejected(const MediaResult& aError) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); - if (aReason == MediaDecoderReader::DECODE_ERROR) { - mHaveStartTimePromise.RejectIfExists(false, __func__); - } else if (aReason == MediaDecoderReader::END_OF_STREAM) { + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { LOG("StartTimeRendezvous=%p SampleType(%d) Has no samples.", this, SampleType); MaybeSetChannelStartTime<SampleType>(INT64_MAX); + } else if (aError != NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { + mHaveStartTimePromise.RejectIfExists(false, __func__); } } bool HaveStartTime() const { return mAudioStartTime.isSome() && mVideoStartTime.isSome(); } @@ -195,19 +195,19 @@ MediaDecoderReaderWrapper::RequestAudioD RefPtr<MediaDecoderReaderWrapper> self = this; mAudioDataRequest.Begin(p->Then(mOwnerThread, __func__, [self] (MediaData* aAudioSample) { self->mAudioDataRequest.Complete(); aAudioSample->AdjustForStartTime(self->StartTime().ToMicroseconds()); self->mAudioCallback.Notify(AsVariant(aAudioSample)); }, - [self] (MediaDecoderReader::NotDecodedReason aReason) { + [self] (const MediaResult& aError) { self->mAudioDataRequest.Complete(); - self->mAudioCallback.Notify(AsVariant(aReason)); + self->mAudioCallback.Notify(AsVariant(aError)); })); } void MediaDecoderReaderWrapper::RequestVideoData(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold) { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); @@ -235,19 +235,19 @@ MediaDecoderReaderWrapper::RequestVideoD RefPtr<MediaDecoderReaderWrapper> self = this; mVideoDataRequest.Begin(p->Then(mOwnerThread, __func__, [self, videoDecodeStartTime] (MediaData* aVideoSample) { self->mVideoDataRequest.Complete(); aVideoSample->AdjustForStartTime(self->StartTime().ToMicroseconds()); self->mVideoCallback.Notify(AsVariant(MakeTuple(aVideoSample, videoDecodeStartTime))); }, - [self] (MediaDecoderReader::NotDecodedReason aReason) { + [self] (const MediaResult& aError) { self->mVideoDataRequest.Complete(); - self->mVideoCallback.Notify(AsVariant(aReason)); + self->mVideoCallback.Notify(AsVariant(aError)); })); } bool MediaDecoderReaderWrapper::IsRequestingAudioData() const { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn()); return mAudioDataRequest.Exists();
--- a/dom/media/MediaDecoderReaderWrapper.h +++ b/dom/media/MediaDecoderReaderWrapper.h @@ -16,18 +16,18 @@ #include "MediaEventSource.h" namespace mozilla { class StartTimeRendezvous; typedef MozPromise<bool, bool, /* isExclusive = */ false> HaveStartTimePromise; -typedef Variant<MediaData*, MediaDecoderReader::NotDecodedReason> AudioCallbackData; -typedef Variant<Tuple<MediaData*, TimeStamp>, MediaDecoderReader::NotDecodedReason> VideoCallbackData; +typedef Variant<MediaData*, MediaResult> AudioCallbackData; +typedef Variant<Tuple<MediaData*, TimeStamp>, MediaResult> VideoCallbackData; typedef Variant<MediaData::Type, WaitForDataRejectValue> WaitCallbackData; /** * A wrapper around MediaDecoderReader to offset the timestamps of Audio/Video * samples by the start time to ensure MDSM can always assume zero start time. * It also adjusts the seek target passed to Seek() to ensure correct seek time * is passed to the underlying reader. */
--- a/dom/media/MediaDecoderStateMachine.cpp +++ b/dom/media/MediaDecoderStateMachine.cpp @@ -258,18 +258,18 @@ public: // We disconnect mMetadataRequest in Exit() so it is fine to capture // a raw pointer here. mMetadataRequest.Begin(Reader()->ReadMetadata() ->Then(OwnerThread(), __func__, [this] (MetadataHolder* aMetadata) { OnMetadataRead(aMetadata); }, - [this] (ReadMetadataFailureReason aReason) { - OnMetadataNotRead(aReason); + [this] (const MediaResult& aError) { + OnMetadataNotRead(aError); })); } void Exit() override { mMetadataRequest.DisconnectIfExists(); } @@ -350,21 +350,21 @@ private: // to become available so that we can build the correct decryptor/decoder. SetState(DECODER_STATE_WAIT_FOR_CDM); return; } SetState(DECODER_STATE_DECODING_FIRSTFRAME); } - void OnMetadataNotRead(ReadMetadataFailureReason aReason) + void OnMetadataNotRead(const MediaResult& aError) { mMetadataRequest.Complete(); SWARN("Decode metadata failed, shutting down decoder"); - mMaster->DecodeError(); + mMaster->DecodeError(aError); } MozPromiseRequestHolder<MediaDecoderReader::MetadataPromise> mMetadataRequest; // True if we need to enter dormant state after reading metadata. Note that // we can't enter dormant state until reading metadata is done for some // limitations of the reader. bool mPendingDormant = false; @@ -974,66 +974,65 @@ MediaDecoderStateMachine::OnVideoPopped( MOZ_ASSERT(OnTaskQueue()); mPlaybackOffset = std::max(mPlaybackOffset.Ref(), aSample->mOffset); UpdateNextFrameStatus(); DispatchVideoDecodeTaskIfNeeded(); } void MediaDecoderStateMachine::OnNotDecoded(MediaData::Type aType, - MediaDecoderReader::NotDecodedReason aReason) + const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(mState != DECODER_STATE_SEEKING); - SAMPLE_LOG("OnNotDecoded (aType=%u, aReason=%u)", aType, aReason); + SAMPLE_LOG("OnNotDecoded (aType=%u, aError=%u)", aType, aError.Code()); bool isAudio = aType == MediaData::AUDIO_DATA; MOZ_ASSERT_IF(!isAudio, aType == MediaData::VIDEO_DATA); if (IsShutdown()) { // Already shutdown; return; } - // If this is a decode error, delegate to the generic error path. - if (aReason == MediaDecoderReader::DECODE_ERROR) { - DecodeError(); - return; - } - // If the decoder is waiting for data, we tell it to call us back when the // data arrives. - if (aReason == MediaDecoderReader::WAITING_FOR_DATA) { + if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { MOZ_ASSERT(mReader->IsWaitForDataSupported(), "Readers that send WAITING_FOR_DATA need to implement WaitForData"); mReader->WaitForData(aType); // We are out of data to decode and will enter buffering mode soon. // We want to play the frames we have already decoded, so we stop pre-rolling // and ensure that loadeddata is fired as required. if (isAudio) { StopPrerollingAudio(); } else { StopPrerollingVideo(); } return; } - if (aReason == MediaDecoderReader::CANCELED) { + if (aError == NS_ERROR_DOM_MEDIA_CANCELED) { if (isAudio) { EnsureAudioDecodeTaskQueued(); } else { EnsureVideoDecodeTaskQueued(); } return; } + // If this is a decode error, delegate to the generic error path. + if (aError != NS_ERROR_DOM_MEDIA_END_OF_STREAM) { + DecodeError(aError); + return; + } + // This is an EOS. Finish off the queue, and then handle things based on our // state. - MOZ_ASSERT(aReason == MediaDecoderReader::END_OF_STREAM); if (isAudio) { AudioQueue().Finish(); StopPrerollingAudio(); } else { VideoQueue().Finish(); StopPrerollingVideo(); } switch (mState) { @@ -1214,28 +1213,28 @@ MediaDecoderStateMachine::SetMediaDecode { MOZ_ASSERT(OnTaskQueue()); mAudioCallback = mReader->AudioCallback().Connect( mTaskQueue, [this] (AudioCallbackData aData) { if (aData.is<MediaData*>()) { OnAudioDecoded(aData.as<MediaData*>()); } else { - OnNotDecoded(MediaData::AUDIO_DATA, aData.as<MediaDecoderReader::NotDecodedReason>()); + OnNotDecoded(MediaData::AUDIO_DATA, aData.as<MediaResult>()); } }); mVideoCallback = mReader->VideoCallback().Connect( mTaskQueue, [this] (VideoCallbackData aData) { typedef Tuple<MediaData*, TimeStamp> Type; if (aData.is<Type>()) { auto&& v = aData.as<Type>(); OnVideoDecoded(Get<0>(v), Get<1>(v)); } else { - OnNotDecoded(MediaData::VIDEO_DATA, aData.as<MediaDecoderReader::NotDecodedReason>()); + OnNotDecoded(MediaData::VIDEO_DATA, aData.as<MediaResult>()); } }); mAudioWaitCallback = mReader->AudioWaitCallback().Connect( mTaskQueue, [this] (WaitCallbackData aData) { if (aData.is<MediaData::Type>()) { EnsureAudioDecodeTaskQueued(); } @@ -2078,17 +2077,17 @@ MediaDecoderStateMachine::OnSeekTaskReje StopPrerollingAudio(); } if (aValue.mIsVideoQueueFinished) { VideoQueue().Finish(); StopPrerollingVideo(); } - DecodeError(); + DecodeError(aValue.mError); DiscardSeekTaskIfExist(); } void MediaDecoderStateMachine::DiscardSeekTaskIfExist() { if (mSeekTask) { @@ -2287,23 +2286,23 @@ bool MediaDecoderStateMachine::HasLowUnd return false; } media::TimeInterval interval(media::TimeUnit::FromMicroseconds(endOfDecodedData), media::TimeUnit::FromMicroseconds(std::min(endOfDecodedData + aUsecs, Duration().ToMicroseconds()))); return endOfDecodedData != INT64_MAX && !mBuffered.Ref().Contains(interval); } void -MediaDecoderStateMachine::DecodeError() +MediaDecoderStateMachine::DecodeError(const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(!IsShutdown()); DECODER_WARN("Decode error"); // Notify the decode error and MediaDecoder will shut down MDSM. - mOnPlaybackEvent.Notify(MediaEventType::DecodeError); + mOnPlaybackErrorEvent.Notify(aError); } void MediaDecoderStateMachine::EnqueueLoadedMetadataEvent() { MOZ_ASSERT(OnTaskQueue()); MediaDecoderEventVisibility visibility = mSentLoadedMetadataEvent ? MediaDecoderEventVisibility::Suppressed @@ -2914,17 +2913,17 @@ MediaDecoderStateMachine::OnMediaSinkVid MOZ_ASSERT(mInfo.HasVideo()); VERBOSE_LOG("[%s]", __func__); mMediaSinkVideoPromise.Complete(); mVideoCompleted = true; if (HasAudio()) { return; } - DecodeError(); + DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); } void MediaDecoderStateMachine::OnMediaSinkAudioComplete() { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(mInfo.HasAudio()); VERBOSE_LOG("[%s]", __func__); @@ -2945,17 +2944,17 @@ void MediaDecoderStateMachine::OnMediaSi // Make the best effort to continue playback when there is video. if (HasVideo()) { return; } // Otherwise notify media decoder/element about this error for it makes // no sense to play an audio-only file without sound output. - DecodeError(); + DecodeError(MediaResult(NS_ERROR_DOM_MEDIA_MEDIASINK_ERR, __func__)); } #ifdef MOZ_EME void MediaDecoderStateMachine::OnCDMProxyReady(RefPtr<CDMProxy> aProxy) { MOZ_ASSERT(OnTaskQueue()); mCDMProxyPromise.Complete();
--- a/dom/media/MediaDecoderStateMachine.h +++ b/dom/media/MediaDecoderStateMachine.h @@ -114,17 +114,16 @@ class TaskQueue; extern LazyLogModule gMediaDecoderLog; extern LazyLogModule gMediaSampleLog; enum class MediaEventType : int8_t { PlaybackStarted, PlaybackStopped, PlaybackEnded, SeekStarted, - DecodeError, Invalidate, EnterVideoSuspend, ExitVideoSuspend }; /* The state machine class. This manages the decoding and seeking in the MediaDecoderReader on the decode task queue, and A/V sync on the shared @@ -240,16 +239,18 @@ public: MetadataLoadedEvent() { return mMetadataLoadedEvent; } MediaEventSourceExc<nsAutoPtr<MediaInfo>, MediaDecoderEventVisibility>& FirstFrameLoadedEvent() { return mFirstFrameLoadedEvent; } MediaEventSource<MediaEventType>& OnPlaybackEvent() { return mOnPlaybackEvent; } + MediaEventSource<MediaResult>& + OnPlaybackErrorEvent() { return mOnPlaybackErrorEvent; } size_t SizeOfVideoQueue() const; size_t SizeOfAudioQueue() const; private: class StateObject; class DecodeMetadataState; @@ -339,17 +340,17 @@ private: // Returns true if we're currently playing. The decoder monitor must // be held. bool IsPlaying() const; // TODO: Those callback function may receive demuxed-only data. // Need to figure out a suitable API name for this case. void OnAudioDecoded(MediaData* aAudioSample); void OnVideoDecoded(MediaData* aVideoSample, TimeStamp aDecodeStartTime); - void OnNotDecoded(MediaData::Type aType, MediaDecoderReader::NotDecodedReason aReason); + void OnNotDecoded(MediaData::Type aType, const MediaResult& aError); // Resets all state related to decoding and playback, emptying all buffers // and aborting all pending operations on the decode task queue. void Reset(TrackSet aTracks = TrackSet(TrackInfo::kAudioTrack, TrackInfo::kVideoTrack)); protected: virtual ~MediaDecoderStateMachine(); @@ -476,17 +477,17 @@ protected: // The entry action of DECODER_STATE_DECODING. void StartDecoding(); // Moves the decoder into the shutdown state, and dispatches an error // event to the media element. This begins shutting down the decoder. // The decoder monitor must be held. This is only called on the // decode thread. - void DecodeError(); + void DecodeError(const MediaResult& aError); // Dispatches a LoadedMetadataEvent. // This is threadsafe and can be called on any thread. // The decoder monitor must be held. void EnqueueLoadedMetadataEvent(); void EnqueueFirstFrameLoadedEvent(); @@ -887,16 +888,17 @@ private: MediaEventProducerExc<nsAutoPtr<MediaInfo>, nsAutoPtr<MetadataTags>, MediaDecoderEventVisibility> mMetadataLoadedEvent; MediaEventProducerExc<nsAutoPtr<MediaInfo>, MediaDecoderEventVisibility> mFirstFrameLoadedEvent; MediaEventProducer<MediaEventType> mOnPlaybackEvent; + MediaEventProducer<MediaResult> mOnPlaybackErrorEvent; // True if audio is offloading. // Playback will not start when audio is offloading. bool mAudioOffloading; #ifdef MOZ_EME void OnCDMProxyReady(RefPtr<CDMProxy> aProxy); void OnCDMProxyNotReady();
--- a/dom/media/MediaFormatReader.cpp +++ b/dom/media/MediaFormatReader.cpp @@ -88,24 +88,24 @@ MediaFormatReader::~MediaFormatReader() } RefPtr<ShutdownPromise> MediaFormatReader::Shutdown() { MOZ_ASSERT(OnTaskQueue()); mDemuxerInitRequest.DisconnectIfExists(); - mMetadataPromise.RejectIfExists(ReadMetadataFailureReason::METADATA_ERROR, __func__); - mSeekPromise.RejectIfExists(NS_ERROR_FAILURE, __func__); + mMetadataPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); + mSeekPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); mSkipRequest.DisconnectIfExists(); if (mAudio.mDecoder) { Reset(TrackInfo::kAudioTrack); if (mAudio.HasPromise()) { - mAudio.RejectPromise(CANCELED, __func__); + mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } mAudio.ShutdownDecoder(); } if (mAudio.mTrackDemuxer) { mAudio.ResetDemuxer(); mAudio.mTrackDemuxer->BreakCycles(); mAudio.mTrackDemuxer = nullptr; } @@ -114,17 +114,17 @@ MediaFormatReader::Shutdown() mAudio.mTaskQueue->AwaitShutdownAndIdle(); mAudio.mTaskQueue = nullptr; } MOZ_ASSERT(!mAudio.HasPromise()); if (mVideo.mDecoder) { Reset(TrackInfo::kVideoTrack); if (mVideo.HasPromise()) { - mVideo.RejectPromise(CANCELED, __func__); + mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } mVideo.ShutdownDecoder(); } if (mVideo.mTrackDemuxer) { mVideo.ResetDemuxer(); mVideo.mTrackDemuxer->BreakCycles(); mVideo.mTrackDemuxer = nullptr; } @@ -278,26 +278,26 @@ MediaFormatReader::OnDemuxerInitDone(nsr // To decode, we need valid video and a place to put it. bool videoActive = !!mDemuxer->GetNumberTracks(TrackInfo::kVideoTrack) && GetImageContainer(); if (videoActive) { // We currently only handle the first video track. mVideo.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kVideoTrack, 0); if (!mVideo.mTrackDemuxer) { - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } UniquePtr<TrackInfo> videoInfo = mVideo.mTrackDemuxer->GetInfo(); videoActive = videoInfo && videoInfo->IsValid(); if (videoActive) { if (platform && !platform->SupportsMimeType(videoInfo->mMimeType, nullptr)) { // We have no decoder for this track. Error. - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } mInfo.mVideo = *videoInfo->GetAsVideoInfo(); for (const MetadataTag& tag : videoInfo->mTags) { tags->Put(tag.mKey, tag.mValue); } mVideo.mCallback = new DecoderCallback(this, TrackInfo::kVideoTrack); mVideo.mTimeRanges = mVideo.mTrackDemuxer->GetBuffered(); @@ -307,17 +307,17 @@ MediaFormatReader::OnDemuxerInitDone(nsr mVideo.mTrackDemuxer = nullptr; } } bool audioActive = !!mDemuxer->GetNumberTracks(TrackInfo::kAudioTrack); if (audioActive) { mAudio.mTrackDemuxer = mDemuxer->GetTrackDemuxer(TrackInfo::kAudioTrack, 0); if (!mAudio.mTrackDemuxer) { - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } UniquePtr<TrackInfo> audioInfo = mAudio.mTrackDemuxer->GetInfo(); // We actively ignore audio tracks that we know we can't play. audioActive = audioInfo && audioInfo->IsValid() && (!platform || platform->SupportsMimeType(audioInfo->mMimeType, nullptr)); @@ -359,56 +359,54 @@ MediaFormatReader::OnDemuxerInitDone(nsr mInfo.mMetadataDuration = Some(TimeUnit::FromMicroseconds(duration)); } mInfo.mMediaSeekable = mDemuxer->IsSeekable(); mInfo.mMediaSeekableOnlyInBufferedRanges = mDemuxer->IsSeekableOnlyInBufferedRanges(); if (!videoActive && !audioActive) { - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } mInitDone = true; RefPtr<MetadataHolder> metadata = new MetadataHolder(); metadata->mInfo = mInfo; metadata->mTags = tags->Count() ? tags.release() : nullptr; mMetadataPromise.Resolve(metadata, __func__); } void -MediaFormatReader::OnDemuxerInitFailed(DemuxerFailureReason aFailure) +MediaFormatReader::OnDemuxerInitFailed(const MediaResult& aError) { mDemuxerInitRequest.Complete(); - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(aError, __func__); } -bool +MediaResult MediaFormatReader::EnsureDecoderCreated(TrackType aTrack) { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(!IsSuspended()); auto& decoder = GetDecoderData(aTrack); if (decoder.mDecoder) { - return true; + return NS_OK; } if (!mPlatform) { mPlatform = new PDMFactory(); - NS_ENSURE_TRUE(mPlatform, false); if (IsEncrypted()) { #ifdef MOZ_EME MOZ_ASSERT(mCDMProxy); mPlatform->SetCDMProxy(mCDMProxy); #else - // EME not supported. - return false; + return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, "EME not supported"); #endif } } decoder.mDecoderInitialized = false; MonitorAutoLock mon(decoder.mMonitor); @@ -438,20 +436,20 @@ MediaFormatReader::EnsureDecoderCreated( }); break; } default: break; } if (decoder.mDecoder ) { decoder.mDescription = decoder.mDecoder->GetDescriptionName(); - } else { - decoder.mDescription = "error creating decoder"; + return NS_OK; } - return decoder.mDecoder != nullptr; + decoder.mDescription = "error creating decoder"; + return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, "error creating decoder"); } bool MediaFormatReader::EnsureDecoderInitialized(TrackType aTrack) { MOZ_ASSERT(OnTaskQueue()); MOZ_DIAGNOSTIC_ASSERT(!IsSuspended()); @@ -474,21 +472,21 @@ MediaFormatReader::EnsureDecoderInitiali MOZ_DIAGNOSTIC_ASSERT(decoder.mDecoder); decoder.mInitPromise.Complete(); decoder.mDecoderInitialized = true; MonitorAutoLock mon(decoder.mMonitor); decoder.mDescription = decoder.mDecoder->GetDescriptionName(); self->SetVideoDecodeThreshold(); self->ScheduleUpdate(aTrack); }, - [self, aTrack] (MediaDataDecoder::DecoderFailureReason aResult) { + [self, aTrack] (MediaResult aError) { auto& decoder = self->GetDecoderData(aTrack); decoder.mInitPromise.Complete(); decoder.ShutdownDecoder(); - self->NotifyError(aTrack); + self->NotifyError(aTrack, aError); })); return false; } void MediaFormatReader::ReadUpdatedMetadata(MediaInfo* aInfo) { *aInfo = mInfo; @@ -529,31 +527,31 @@ MediaFormatReader::RequestVideoData(bool MOZ_DIAGNOSTIC_ASSERT(!mVideo.HasPromise(), "No duplicate sample requests"); MOZ_DIAGNOSTIC_ASSERT(!mVideo.mSeekRequest.Exists() || mVideo.mTimeThreshold.isSome()); MOZ_DIAGNOSTIC_ASSERT(!IsSeeking(), "called mid-seek"); LOGV("RequestVideoData(%d, %lld)", aSkipToNextKeyframe, aTimeThreshold); if (!HasVideo()) { LOG("called with no video track"); - return MediaDataPromise::CreateAndReject(DECODE_ERROR, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (IsSeeking()) { LOG("called mid-seek. Rejecting."); - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } if (mShutdown) { NS_WARNING("RequestVideoData on shutdown MediaFormatReader!"); - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } if (IsSuspended()) { - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } media::TimeUnit timeThreshold{media::TimeUnit::FromMicroseconds(aTimeThreshold)}; // Ensure we have no pending seek going as ShouldSkip could return out of date // information. if (!mVideo.HasInternalSeekPending() && ShouldSkip(aSkipToNextKeyframe, timeThreshold)) { RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__); @@ -563,47 +561,43 @@ MediaFormatReader::RequestVideoData(bool RefPtr<MediaDataPromise> p = mVideo.EnsurePromise(__func__); ScheduleUpdate(TrackInfo::kVideoTrack); return p; } void -MediaFormatReader::OnDemuxFailed(TrackType aTrack, DemuxerFailureReason aFailure) +MediaFormatReader::OnDemuxFailed(TrackType aTrack, const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); - LOG("Failed to demux %s, failure:%d", - aTrack == TrackType::kVideoTrack ? "video" : "audio", aFailure); + LOG("Failed to demux %s, failure:%u", + aTrack == TrackType::kVideoTrack ? "video" : "audio", aError.Code()); auto& decoder = GetDecoderData(aTrack); decoder.mDemuxRequest.Complete(); - switch (aFailure) { - case DemuxerFailureReason::END_OF_STREAM: + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: if (!decoder.mWaitingForData) { decoder.mNeedDraining = true; } NotifyEndOfStream(aTrack); break; - case DemuxerFailureReason::DEMUXER_ERROR: - NotifyError(aTrack); - break; - case DemuxerFailureReason::WAITING_FOR_DATA: + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: if (!decoder.mWaitingForData) { decoder.mNeedDraining = true; } NotifyWaitingForData(aTrack); break; - case DemuxerFailureReason::CANCELED: MOZ_FALLTHROUGH; - case DemuxerFailureReason::SHUTDOWN: + case NS_ERROR_DOM_MEDIA_CANCELED: if (decoder.HasPromise()) { - decoder.RejectPromise(CANCELED, __func__); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } break; default: - MOZ_ASSERT(false); + NotifyError(aTrack, aError); break; } } void MediaFormatReader::DoDemuxVideo() { mVideo.mDemuxRequest.Begin(mVideo.mTrackDemuxer->GetSamples(1) @@ -633,31 +627,31 @@ MediaFormatReader::RequestAudioData() MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !mAudio.mSeekRequest.Exists() || mAudio.mTimeThreshold.isSome()); MOZ_DIAGNOSTIC_ASSERT(IsVideoSeeking() || !IsSeeking(), "called mid-seek"); LOGV(""); if (!HasAudio()) { LOG("called with no audio track"); - return MediaDataPromise::CreateAndReject(DECODE_ERROR, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (IsSuspended()) { - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } if (IsSeeking()) { LOG("called mid-seek. Rejecting."); - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } if (mShutdown) { NS_WARNING("RequestAudioData on shutdown MediaFormatReader!"); - return MediaDataPromise::CreateAndReject(CANCELED, __func__); + return MediaDataPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } RefPtr<MediaDataPromise> p = mAudio.EnsurePromise(__func__); ScheduleUpdate(TrackInfo::kAudioTrack); return p; } @@ -718,17 +712,17 @@ MediaFormatReader::NotifyDrainComplete(T LOG("MediaFormatReader called DrainComplete() before flushing, ignoring."); return; } decoder.mDrainComplete = true; ScheduleUpdate(aTrack); } void -MediaFormatReader::NotifyError(TrackType aTrack, MediaDataDecoderError aError) +MediaFormatReader::NotifyError(TrackType aTrack, const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); LOGV("%s Decoding error", TrackTypeToStr(aTrack)); auto& decoder = GetDecoderData(aTrack); decoder.mError = decoder.HasFatalError() ? decoder.mError : Some(aError); ScheduleUpdate(aTrack); } @@ -933,19 +927,20 @@ MediaFormatReader::HandleDemuxedSamples( } auto& decoder = GetDecoderData(aTrack); if (decoder.mQueuedSamples.IsEmpty()) { return; } - if (!EnsureDecoderCreated(aTrack)) { + MediaResult rv = EnsureDecoderCreated(aTrack); + if (NS_FAILED(rv)) { NS_WARNING("Error constructing decoders"); - NotifyError(aTrack); + NotifyError(aTrack, rv); return; } if (!EnsureDecoderInitialized(aTrack)) { return; } if (!ForceZeroStartTime() && decoder.mFirstDemuxedSampleTime.isNothing()) { @@ -1047,34 +1042,33 @@ MediaFormatReader::InternalSeek(TrackTyp auto& decoder = self->GetDecoderData(aTrack); decoder.mSeekRequest.Complete(); MOZ_ASSERT(decoder.mTimeThreshold, "Seek promise must be disconnected when timethreshold is reset"); decoder.mTimeThreshold.ref().mHasSeeked = true; self->SetVideoDecodeThreshold(); self->ScheduleUpdate(aTrack); }, - [self, aTrack] (DemuxerFailureReason aResult) { + [self, aTrack] (const MediaResult& aError) { auto& decoder = self->GetDecoderData(aTrack); decoder.mSeekRequest.Complete(); - switch (aResult) { - case DemuxerFailureReason::WAITING_FOR_DATA: + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: self->NotifyWaitingForData(aTrack); break; - case DemuxerFailureReason::END_OF_STREAM: + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: decoder.mTimeThreshold.reset(); self->NotifyEndOfStream(aTrack); break; - case DemuxerFailureReason::CANCELED: MOZ_FALLTHROUGH; - case DemuxerFailureReason::SHUTDOWN: + case NS_ERROR_DOM_MEDIA_CANCELED: decoder.mTimeThreshold.reset(); break; default: decoder.mTimeThreshold.reset(); - self->NotifyError(aTrack); + self->NotifyError(aTrack, aError); break; } })); } void MediaFormatReader::DrainDecoder(TrackType aTrack) { @@ -1200,71 +1194,70 @@ MediaFormatReader::Update(TrackType aTra mPreviousDecodedKeyframeTime_us = output->mTime; } nsCString error; mVideo.mIsHardwareAccelerated = mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error); } } else if (decoder.HasFatalError()) { LOG("Rejecting %s promise: DECODE_ERROR", TrackTypeToStr(aTrack)); - decoder.RejectPromise(DECODE_ERROR, __func__); + decoder.RejectPromise(decoder.mError.ref(), __func__); return; } else if (decoder.mDrainComplete) { bool wasDraining = decoder.mDraining; decoder.mDrainComplete = false; decoder.mDraining = false; if (decoder.mDemuxEOS) { LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack)); - decoder.RejectPromise(END_OF_STREAM, __func__); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } else if (decoder.mWaitingForData) { if (wasDraining && decoder.mLastSampleTime && !decoder.mNextStreamSourceID) { // We have completed draining the decoder following WaitingForData. // Set up the internal seek machinery to be able to resume from the // last sample decoded. LOG("Seeking to last sample time: %lld", decoder.mLastSampleTime.ref().mStart.ToMicroseconds()); InternalSeek(aTrack, InternalSeekTarget(decoder.mLastSampleTime.ref(), true)); } if (!decoder.mReceivedNewData) { LOG("Rejecting %s promise: WAITING_FOR_DATA", TrackTypeToStr(aTrack)); - decoder.RejectPromise(WAITING_FOR_DATA, __func__); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); } } // Now that draining has completed, we check if we have received // new data again as the result may now be different from the earlier // run. if (UpdateReceivedNewData(aTrack) || decoder.mSeekRequest.Exists()) { LOGV("Nothing more to do"); return; } } else if (decoder.mDemuxEOS && !decoder.mNeedDraining && !decoder.HasPendingDrain() && decoder.mQueuedSamples.IsEmpty()) { // It is possible to transition from WAITING_FOR_DATA directly to EOS // state during the internal seek; in which case no draining would occur. // There is no more samples left to be decoded and we are already in // EOS state. We can immediately reject the data promise. LOG("Rejecting %s promise: EOS", TrackTypeToStr(aTrack)); - decoder.RejectPromise(END_OF_STREAM, __func__); + decoder.RejectPromise(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } } if (decoder.mNeedDraining) { DrainDecoder(aTrack); return; } - if (decoder.mError && - decoder.mError.ref() == MediaDataDecoderError::DECODE_ERROR) { + if (decoder.mError && !decoder.HasFatalError()) { decoder.mDecodePending = false; - decoder.mError.reset(); if (++decoder.mNumOfConsecutiveError > decoder.mMaxConsecutiveError) { - NotifyError(aTrack); + NotifyError(aTrack, decoder.mError.ref()); return; } + decoder.mError.reset(); LOG("%s decoded error count %d", TrackTypeToStr(aTrack), decoder.mNumOfConsecutiveError); media::TimeUnit nextKeyframe; if (aTrack == TrackType::kVideoTrack && !decoder.HasInternalSeekPending() && NS_SUCCEEDED(decoder.mTrackDemuxer->GetNextRandomAccessPoint(&nextKeyframe))) { SkipVideoDemuxToNextKeyFrame(decoder.mLastSampleTime.refOr(TimeInterval()).Length()); return; } @@ -1393,37 +1386,37 @@ MediaFormatReader::ResetDecode(TrackSet // Reset miscellaneous seeking state. mPendingSeekTime.reset(); if (HasVideo() && aTracks.contains(TrackInfo::kVideoTrack)) { mVideo.ResetDemuxer(); Reset(TrackInfo::kVideoTrack); if (mVideo.HasPromise()) { - mVideo.RejectPromise(CANCELED, __func__); + mVideo.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } } if (HasAudio() && aTracks.contains(TrackInfo::kAudioTrack)) { mAudio.ResetDemuxer(); Reset(TrackInfo::kAudioTrack); if (mAudio.HasPromise()) { - mAudio.RejectPromise(CANCELED, __func__); + mAudio.RejectPromise(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } } return MediaDecoderReader::ResetDecode(aTracks); } void MediaFormatReader::Output(TrackType aTrack, MediaData* aSample) { if (!aSample) { NS_WARNING("MediaFormatReader::Output() passed a null sample"); - Error(aTrack); + Error(aTrack, MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); return; } LOGV("Decoded %s sample time=%lld timecode=%lld kf=%d dur=%lld", TrackTypeToStr(aTrack), aSample->mTime, aSample->mTimecode, aSample->mKeyframe, aSample->mDuration); RefPtr<nsIRunnable> task = @@ -1446,20 +1439,20 @@ MediaFormatReader::InputExhausted(TrackT { RefPtr<nsIRunnable> task = NewRunnableMethod<TrackType>( this, &MediaFormatReader::NotifyInputExhausted, aTrack); OwnerThread()->Dispatch(task.forget()); } void -MediaFormatReader::Error(TrackType aTrack, MediaDataDecoderError aError) +MediaFormatReader::Error(TrackType aTrack, const MediaResult& aError) { RefPtr<nsIRunnable> task = - NewRunnableMethod<TrackType, MediaDataDecoderError>( + NewRunnableMethod<TrackType, MediaResult>( this, &MediaFormatReader::NotifyError, aTrack, aError); OwnerThread()->Dispatch(task.forget()); } void MediaFormatReader::Reset(TrackType aTrack) { MOZ_ASSERT(OnTaskQueue()); @@ -1562,34 +1555,33 @@ MediaFormatReader::OnVideoSkipCompleted( void MediaFormatReader::OnVideoSkipFailed(MediaTrackDemuxer::SkipFailureHolder aFailure) { MOZ_ASSERT(OnTaskQueue()); LOG("Skipping failed, skipped %u frames", aFailure.mSkipped); mSkipRequest.Complete(); - switch (aFailure.mFailure) { - case DemuxerFailureReason::END_OF_STREAM: MOZ_FALLTHROUGH; - case DemuxerFailureReason::WAITING_FOR_DATA: + switch (aFailure.mFailure.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: // Some frames may have been output by the decoder since we initiated the // videoskip process and we know they would be late. DropDecodedSamples(TrackInfo::kVideoTrack); // We can't complete the skip operation, will just service a video frame // normally. ScheduleUpdate(TrackInfo::kVideoTrack); break; - case DemuxerFailureReason::CANCELED: MOZ_FALLTHROUGH; - case DemuxerFailureReason::SHUTDOWN: + case NS_ERROR_DOM_MEDIA_CANCELED: if (mVideo.HasPromise()) { - mVideo.RejectPromise(CANCELED, __func__); + mVideo.RejectPromise(aFailure.mFailure, __func__); } break; default: - NotifyError(TrackType::kVideoTrack); + NotifyError(TrackType::kVideoTrack, aFailure.mFailure); break; } } RefPtr<MediaDecoderReader::SeekPromise> MediaFormatReader::Seek(SeekTarget aTarget, int64_t aUnused) { MOZ_ASSERT(OnTaskQueue()); @@ -1691,27 +1683,27 @@ MediaFormatReader::AttemptSeek() } else if (HasAudio()) { DoAudioSeek(); } else { MOZ_CRASH(); } } void -MediaFormatReader::OnSeekFailed(TrackType aTrack, DemuxerFailureReason aResult) +MediaFormatReader::OnSeekFailed(TrackType aTrack, const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); - LOGV("%s failure:%d", TrackTypeToStr(aTrack), aResult); + LOGV("%s failure:%u", TrackTypeToStr(aTrack), aError.Code()); if (aTrack == TrackType::kVideoTrack) { mVideo.mSeekRequest.Complete(); } else { mAudio.mSeekRequest.Complete(); } - if (aResult == DemuxerFailureReason::WAITING_FOR_DATA) { + if (aError == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { if (HasVideo() && aTrack == TrackType::kAudioTrack && mFallbackSeekTime.isSome() && mPendingSeekTime.ref() != mFallbackSeekTime.ref()) { // We have failed to seek audio where video seeked to earlier. // Attempt to seek instead to the closest point that we know we have in // order to limit A/V sync discrepency. // Ensure we have the most up to date buffered ranges. @@ -1735,17 +1727,17 @@ MediaFormatReader::OnSeekFailed(TrackTyp DoAudioSeek(); return; } NotifyWaitingForData(aTrack); return; } MOZ_ASSERT(!mVideo.mSeekRequest.Exists() && !mAudio.mSeekRequest.Exists()); mPendingSeekTime.reset(); - mSeekPromise.Reject(NS_ERROR_FAILURE, __func__); + mSeekPromise.Reject(aError, __func__); } void MediaFormatReader::DoVideoSeek() { MOZ_ASSERT(mPendingSeekTime.isSome()); LOGV("Seeking video to %lld", mPendingSeekTime.ref().ToMicroseconds()); media::TimeUnit seekTime = mPendingSeekTime.ref(); @@ -1776,20 +1768,20 @@ MediaFormatReader::OnVideoSeekCompleted( DoAudioSeek(); } else { mPendingSeekTime.reset(); mSeekPromise.Resolve(aTime, __func__); } } void -MediaFormatReader::OnVideoSeekFailed(DemuxerFailureReason aFailure) +MediaFormatReader::OnVideoSeekFailed(const MediaResult& aError) { mPreviousDecodedKeyframeTime_us = sNoPreviousDecodedKeyframe; - OnSeekFailed(TrackType::kVideoTrack, aFailure); + OnSeekFailed(TrackType::kVideoTrack, aError); } void MediaFormatReader::SetVideoDecodeThreshold() { MOZ_ASSERT(OnTaskQueue()); if (!HasVideo() || !mVideo.mDecoder) { @@ -1843,19 +1835,19 @@ MediaFormatReader::OnAudioSeekCompleted( MOZ_ASSERT(OnTaskQueue()); LOGV("Audio seeked to %lld", aTime.ToMicroseconds()); mAudio.mSeekRequest.Complete(); mPendingSeekTime.reset(); mSeekPromise.Resolve(aTime, __func__); } void -MediaFormatReader::OnAudioSeekFailed(DemuxerFailureReason aFailure) +MediaFormatReader::OnAudioSeekFailed(const MediaResult& aError) { - OnSeekFailed(TrackType::kAudioTrack, aFailure); + OnSeekFailed(TrackType::kAudioTrack, aError); } media::TimeIntervals MediaFormatReader::GetBuffered() { MOZ_ASSERT(OnTaskQueue()); media::TimeIntervals videoti; media::TimeIntervals audioti;
--- a/dom/media/MediaFormatReader.h +++ b/dom/media/MediaFormatReader.h @@ -112,17 +112,17 @@ private: bool InitDemuxer(); // Notify the demuxer that new data has been received. // The next queued task calling GetBuffered() is guaranteed to have up to date // buffered ranges. void NotifyDemuxer(); void ReturnOutput(MediaData* aData, TrackType aTrack); - bool EnsureDecoderCreated(TrackType aTrack); + MediaResult EnsureDecoderCreated(TrackType aTrack); bool EnsureDecoderInitialized(TrackType aTrack); // Enqueues a task to call Update(aTrack) on the decoder task queue. // Lock for corresponding track must be held. void ScheduleUpdate(TrackType aTrack); void Update(TrackType aTrack); // Handle actions should more data be received. // Returns true if no more action is required. @@ -161,30 +161,30 @@ private: // the first sample past the target will be dropped. void InternalSeek(TrackType aTrack, const InternalSeekTarget& aTarget); // Drain the current decoder. void DrainDecoder(TrackType aTrack); void NotifyNewOutput(TrackType aTrack, MediaData* aSample); void NotifyInputExhausted(TrackType aTrack); void NotifyDrainComplete(TrackType aTrack); - void NotifyError(TrackType aTrack, MediaDataDecoderError aError = MediaDataDecoderError::FATAL_ERROR); + void NotifyError(TrackType aTrack, const MediaResult& aError); void NotifyWaitingForData(TrackType aTrack); void NotifyEndOfStream(TrackType aTrack); void ExtractCryptoInitData(nsTArray<uint8_t>& aInitData); // Initializes mLayersBackendType if possible. void InitLayersBackendType(); // DecoderCallback proxies the MediaDataDecoderCallback calls to these // functions. void Output(TrackType aType, MediaData* aSample); void InputExhausted(TrackType aTrack); - void Error(TrackType aTrack, MediaDataDecoderError aError = MediaDataDecoderError::FATAL_ERROR); + void Error(TrackType aTrack, const MediaResult& aError); void Reset(TrackType aTrack); void DrainComplete(TrackType aTrack); void DropDecodedSamples(TrackType aTrack); void WaitingForKey(TrackType aTrack); bool ShouldSkip(bool aSkipToNextKeyframe, media::TimeUnit aTimeThreshold); void SetVideoDecodeThreshold(); @@ -201,17 +201,17 @@ private: { } void Output(MediaData* aSample) override { mReader->Output(mType, aSample); } void InputExhausted() override { mReader->InputExhausted(mType); } - void Error(MediaDataDecoderError aError) override { + void Error(const MediaResult& aError) override { mReader->Error(mType, aError); } void DrainComplete() override { mReader->DrainComplete(mType); } void ReleaseMediaResources() override { mReader->ReleaseResources(); } @@ -322,20 +322,20 @@ private: bool HasPendingDrain() const { return mDraining || mDrainComplete; } uint32_t mNumOfConsecutiveError; uint32_t mMaxConsecutiveError; - Maybe<MediaDataDecoderError> mError; + Maybe<MediaResult> mError; bool HasFatalError() const { - return mError.isSome() && mError.ref() == MediaDataDecoderError::FATAL_ERROR; + return mError.isSome() && mError.ref() != NS_ERROR_DOM_MEDIA_DECODE_ERR; } // If set, all decoded samples prior mTimeThreshold will be dropped. // Used for internal seeking when a change of stream is detected or when // encountering data discontinuity. Maybe<InternalSeekTarget> mTimeThreshold; // Time of last sample returned. Maybe<media::TimeInterval> mLastSampleTime; @@ -349,17 +349,17 @@ private: uint64_t mNumSamplesSkippedTotal; // These get overridden in the templated concrete class. // Indicate if we have a pending promise for decoded frame. // Rejecting the promise will stop the reader from decoding ahead. virtual bool HasPromise() const = 0; virtual RefPtr<MediaDataPromise> EnsurePromise(const char* aMethodName) = 0; virtual void ResolvePromise(MediaData* aData, const char* aMethodName) = 0; - virtual void RejectPromise(MediaDecoderReader::NotDecodedReason aReason, + virtual void RejectPromise(const MediaResult& aError, const char* aMethodName) = 0; // Clear track demuxer related data. void ResetDemuxer() { mDemuxRequest.DisconnectIfExists(); mSeekRequest.DisconnectIfExists(); mTrackDemuxer->Reset(); @@ -457,21 +457,21 @@ private: void ResolvePromise(MediaData* aData, const char* aMethodName) override { MOZ_ASSERT(mOwner->OnTaskQueue()); mPromise.Resolve(aData, aMethodName); mHasPromise = false; } - void RejectPromise(MediaDecoderReader::NotDecodedReason aReason, + void RejectPromise(const MediaResult& aError, const char* aMethodName) override { MOZ_ASSERT(mOwner->OnTaskQueue()); - mPromise.Reject(aReason, aMethodName); + mPromise.Reject(aError, aMethodName); mHasPromise = false; } private: MozPromiseHolder<MediaDataPromise> mPromise; Atomic<bool> mHasPromise; }; @@ -482,32 +482,32 @@ private: bool NeedInput(DecoderData& aDecoder); DecoderData& GetDecoderData(TrackType aTrack); // Demuxer objects. RefPtr<MediaDataDemuxer> mDemuxer; bool mDemuxerInitDone; void OnDemuxerInitDone(nsresult); - void OnDemuxerInitFailed(DemuxerFailureReason aFailure); + void OnDemuxerInitFailed(const MediaResult& aError); MozPromiseRequestHolder<MediaDataDemuxer::InitPromise> mDemuxerInitRequest; - void OnDemuxFailed(TrackType aTrack, DemuxerFailureReason aFailure); + void OnDemuxFailed(TrackType aTrack, const MediaResult& aError); void DoDemuxVideo(); void OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples); - void OnVideoDemuxFailed(DemuxerFailureReason aFailure) + void OnVideoDemuxFailed(const MediaResult& aError) { - OnDemuxFailed(TrackType::kVideoTrack, aFailure); + OnDemuxFailed(TrackType::kVideoTrack, aError); } void DoDemuxAudio(); void OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples); - void OnAudioDemuxFailed(DemuxerFailureReason aFailure) + void OnAudioDemuxFailed(const MediaResult& aError) { - OnDemuxFailed(TrackType::kAudioTrack, aFailure); + OnDemuxFailed(TrackType::kAudioTrack, aError); } void SkipVideoDemuxToNextKeyFrame(media::TimeUnit aTimeThreshold); MozPromiseRequestHolder<MediaTrackDemuxer::SkipAccessPointPromise> mSkipRequest; void VideoSkipReset(uint32_t aSkipped); void OnVideoSkipCompleted(uint32_t aSkipped); void OnVideoSkipFailed(MediaTrackDemuxer::SkipFailureHolder aFailure); @@ -545,25 +545,25 @@ private: media::TimeUnit DemuxStartTime(); bool IsSeeking() const { return mPendingSeekTime.isSome(); } bool IsVideoSeeking() const { return IsSeeking() && mOriginalSeekTarget.IsVideoOnly(); } void ScheduleSeek(); void AttemptSeek(); - void OnSeekFailed(TrackType aTrack, DemuxerFailureReason aFailure); + void OnSeekFailed(TrackType aTrack, const MediaResult& aError); void DoVideoSeek(); void OnVideoSeekCompleted(media::TimeUnit aTime); - void OnVideoSeekFailed(DemuxerFailureReason aFailure); + void OnVideoSeekFailed(const MediaResult& aError); bool mSeekScheduled; void DoAudioSeek(); void OnAudioSeekCompleted(media::TimeUnit aTime); - void OnAudioSeekFailed(DemuxerFailureReason aFailure); + void OnAudioSeekFailed(const MediaResult& aError); // The SeekTarget that was last given to Seek() SeekTarget mOriginalSeekTarget; // Temporary seek information while we wait for the data Maybe<media::TimeUnit> mFallbackSeekTime; Maybe<media::TimeUnit> mPendingSeekTime; MozPromiseHolder<SeekPromise> mSeekPromise; RefPtr<VideoFrameContainer> mVideoFrameContainer;
new file mode 100644 --- /dev/null +++ b/dom/media/MediaResult.h @@ -0,0 +1,60 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaResult_h_ +#define MediaResult_h_ + +#include "nsError.h" +#include "nsPrintfCString.h" + +// MediaResult can be used interchangeably with nsresult. +// It allows to store extra information such as where the error occurred. +// While nsresult is typically passed by value; due to its potential size, using +// MediaResult const references is recommended. +namespace mozilla { + +class MediaResult +{ +public: + MOZ_IMPLICIT MediaResult(nsresult aResult) + : mCode(aResult) + { + } + MediaResult(nsresult aResult, const nsACString& aMessage) + : mCode(aResult) + , mMessage(aMessage) + { + } + MediaResult(nsresult aResult, const char* aMessage) + : mCode(aResult) + , mMessage(aMessage) + { + } + MediaResult(const MediaResult& aOther) = default; + MediaResult(MediaResult&& aOther) = default; + MediaResult& operator=(const MediaResult& aOther) = default; + MediaResult& operator=(MediaResult&& aOther) = default; + + nsresult Code() const { return mCode; } + const nsCString& Message() const { return mMessage; } + + // Interoperations with nsresult. + bool operator==(nsresult aResult) const { return aResult == mCode; } + bool operator!=(nsresult aResult) const { return aResult != mCode; } + operator nsresult () const { return mCode; } + + nsCString Description() const + { + return nsPrintfCString("0x%08x: %s", mCode, mMessage.get()); + } + +private: + nsresult mCode; + nsCString mMessage; +}; + +} // namespace mozilla +#endif // MediaResult_h_ \ No newline at end of file
--- a/dom/media/NextFrameSeekTask.cpp +++ b/dom/media/NextFrameSeekTask.cpp @@ -48,17 +48,17 @@ NextFrameSeekTask::~NextFrameSeekTask() } void NextFrameSeekTask::Discard() { AssertOwnerThread(); // Disconnect MDSM. - RejectIfExist(__func__); + RejectIfExist(NS_ERROR_DOM_MEDIA_CANCELED, __func__); // Disconnect MediaDecoderReader. CancelCallbacks(); mIsDiscarded = true; } bool @@ -178,22 +178,22 @@ NextFrameSeekTask::OnAudioDecoded(MediaD // We accept any audio data here. mSeekedAudioData = aAudioSample; MaybeFinishSeek(); } void -NextFrameSeekTask::OnAudioNotDecoded(MediaDecoderReader::NotDecodedReason aReason) +NextFrameSeekTask::OnAudioNotDecoded(const MediaResult& aError) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); - SAMPLE_LOG("OnAudioNotDecoded (aReason=%u)", aReason); + SAMPLE_LOG("OnAudioNotDecoded (aError=%u)", aError.Code()); // We don't really handle audio deocde error here. Let MDSM to trigger further // audio decoding tasks if it needs to play audio, and MDSM will then receive // the decoding state from MediaDecoderReader. MaybeFinishSeek(); } @@ -219,46 +219,46 @@ NextFrameSeekTask::OnVideoDecoded(MediaD RequestVideoData(); return; } MaybeFinishSeek(); } void -NextFrameSeekTask::OnVideoNotDecoded(MediaDecoderReader::NotDecodedReason aReason) +NextFrameSeekTask::OnVideoNotDecoded(const MediaResult& aError) { AssertOwnerThread(); MOZ_ASSERT(!mSeekTaskPromise.IsEmpty(), "Seek shouldn't be finished"); - SAMPLE_LOG("OnVideoNotDecoded (aReason=%u)", aReason); + SAMPLE_LOG("OnVideoNotDecoded (aError=%u)", aError.Code()); - if (aReason == MediaDecoderReader::END_OF_STREAM) { + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { mIsVideoQueueFinished = true; } // Video seek not finished. if (NeedMoreVideo()) { - switch (aReason) { - case MediaDecoderReader::DECODE_ERROR: + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: + mReader->WaitForData(MediaData::VIDEO_DATA); + break; + case NS_ERROR_DOM_MEDIA_CANCELED: + RequestVideoData(); + break; + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + MOZ_ASSERT(false, "Shouldn't want more data for ended video."); + break; + default: // We might lose the audio sample after canceling the callbacks. // However it doesn't really matter because MDSM is gonna shut down // when seek fails. CancelCallbacks(); // Reject the promise since we can't finish video seek anyway. - RejectIfExist(__func__); - break; - case MediaDecoderReader::WAITING_FOR_DATA: - mReader->WaitForData(MediaData::VIDEO_DATA); - break; - case MediaDecoderReader::CANCELED: - RequestVideoData(); - break; - case MediaDecoderReader::END_OF_STREAM: - MOZ_ASSERT(false, "Shouldn't want more data for ended video."); + RejectIfExist(aError, __func__); break; } return; } MaybeFinishSeek(); } @@ -269,27 +269,27 @@ NextFrameSeekTask::SetCallbacks() // Register dummy callbcak for audio decoding since we don't need to handle // the decoded audio samples. mAudioCallback = mReader->AudioCallback().Connect( OwnerThread(), [this] (AudioCallbackData aData) { if (aData.is<MediaData*>()) { OnAudioDecoded(aData.as<MediaData*>()); } else { - OnAudioNotDecoded(aData.as<MediaDecoderReader::NotDecodedReason>()); + OnAudioNotDecoded(aData.as<MediaResult>()); } }); mVideoCallback = mReader->VideoCallback().Connect( OwnerThread(), [this] (VideoCallbackData aData) { typedef Tuple<MediaData*, TimeStamp> Type; if (aData.is<Type>()) { OnVideoDecoded(Get<0>(aData.as<Type>())); } else { - OnVideoNotDecoded(aData.as<MediaDecoderReader::NotDecodedReason>()); + OnVideoNotDecoded(aData.as<MediaResult>()); } }); mAudioWaitCallback = mReader->AudioWaitCallback().Connect( OwnerThread(), [this] (WaitCallbackData aData) { // We don't make an audio decode request here, instead, let MDSM to // trigger further audio decode tasks if MDSM itself needs to play audio. MaybeFinishSeek(); @@ -298,17 +298,17 @@ NextFrameSeekTask::SetCallbacks() mVideoWaitCallback = mReader->VideoWaitCallback().Connect( OwnerThread(), [this] (WaitCallbackData aData) { if (NeedMoreVideo()) { if (aData.is<MediaData::Type>()) { RequestVideoData(); } else { // Reject if we can't finish video seeking. CancelCallbacks(); - RejectIfExist(__func__); + RejectIfExist(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } return; } MaybeFinishSeek(); }); } void
--- a/dom/media/NextFrameSeekTask.h +++ b/dom/media/NextFrameSeekTask.h @@ -52,21 +52,21 @@ private: bool IsAudioSeekComplete() const; bool IsVideoSeekComplete() const; void MaybeFinishSeek(); void OnAudioDecoded(MediaData* aAudioSample); - void OnAudioNotDecoded(MediaDecoderReader::NotDecodedReason aReason); + void OnAudioNotDecoded(const MediaResult& aError); void OnVideoDecoded(MediaData* aVideoSample); - void OnVideoNotDecoded(MediaDecoderReader::NotDecodedReason aReason); + void OnVideoNotDecoded(const MediaResult& aError); void SetCallbacks(); void CancelCallbacks(); // Update the seek target's time before resolving this seek task, the updated // time will be used in the MDSM::SeekCompleted() to update the MDSM's position. void UpdateSeekTargetTime();
--- a/dom/media/SeekTask.cpp +++ b/dom/media/SeekTask.cpp @@ -41,23 +41,24 @@ SeekTask::Resolve(const char* aCallSite) val.mSeekedVideoData = mSeekedVideoData; val.mIsAudioQueueFinished = mIsAudioQueueFinished; val.mIsVideoQueueFinished = mIsVideoQueueFinished; mSeekTaskPromise.Resolve(val, aCallSite); } void -SeekTask::RejectIfExist(const char* aCallSite) +SeekTask::RejectIfExist(const MediaResult& aError, const char* aCallSite) { AssertOwnerThread(); SeekTaskRejectValue val; val.mIsAudioQueueFinished = mIsAudioQueueFinished; val.mIsVideoQueueFinished = mIsVideoQueueFinished; + val.mError = aError; mSeekTaskPromise.RejectIfExists(val, aCallSite); } void SeekTask::AssertOwnerThread() const { MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
--- a/dom/media/SeekTask.h +++ b/dom/media/SeekTask.h @@ -3,16 +3,17 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef SEEK_TASK_H #define SEEK_TASK_H #include "mozilla/MozPromise.h" +#include "MediaResult.h" #include "SeekTarget.h" namespace mozilla { class AbstractThread; class MediaData; class MediaDecoderReaderWrapper; @@ -25,18 +26,25 @@ struct SeekTaskResolveValue RefPtr<MediaData> mSeekedAudioData; RefPtr<MediaData> mSeekedVideoData; bool mIsAudioQueueFinished; bool mIsVideoQueueFinished; }; struct SeekTaskRejectValue { + SeekTaskRejectValue() + : mIsAudioQueueFinished(false) + , mIsVideoQueueFinished(false) + , mError(NS_ERROR_DOM_MEDIA_FATAL_ERR) + { + } bool mIsAudioQueueFinished; bool mIsVideoQueueFinished; + MediaResult mError; }; class SeekTask { NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SeekTask) public: static const bool IsExclusive = true; @@ -57,17 +65,17 @@ protected: AbstractThread* aThread, MediaDecoderReaderWrapper* aReader, const SeekTarget& aTarget); virtual ~SeekTask(); void Resolve(const char* aCallSite); - void RejectIfExist(const char* aCallSite); + void RejectIfExist(const MediaResult& aError, const char* aCallSite); void AssertOwnerThread() const; AbstractThread* OwnerThread() const; /* * Data shared with MDSM. */
--- a/dom/media/eme/MediaKeys.cpp +++ b/dom/media/eme/MediaKeys.cpp @@ -83,17 +83,17 @@ MediaKeys::Terminated() RefPtr<MediaKeySession>& session = iter.Data(); session->OnClosed(); } keySessions.Clear(); MOZ_ASSERT(mKeySessions.Count() == 0); // Notify the element about that CDM has terminated. if (mElement) { - mElement->DecodeError(); + mElement->DecodeError(NS_ERROR_DOM_MEDIA_CDM_ERR); } Shutdown(); } void MediaKeys::Shutdown() {
--- a/dom/media/flac/FlacDemuxer.cpp +++ b/dom/media/flac/FlacDemuxer.cpp @@ -579,17 +579,17 @@ FlacDemuxer::InitInternal() RefPtr<FlacDemuxer::InitPromise> FlacDemuxer::Init() { if (!InitInternal()) { LOG("Init() failure: waiting for data"); return InitPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } LOG("Init() successful"); return InitPromise::CreateAndResolve(NS_OK, __func__); } bool FlacDemuxer::HasTrackType(TrackInfo::TrackType aType) const @@ -850,17 +850,17 @@ FlacTrackDemuxer::GetSamples(int32_t aNu { LOGV("GetSamples(%d) Begin offset=%lld mParsedFramesDuration=%f" " mTotalFrameLen=%llu", aNumSamples, GetResourceOffset(), mParsedFramesDuration.ToSeconds(), mTotalFrameLen); if (!aNumSamples) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } RefPtr<SamplesHolder> frames = new SamplesHolder(); while (aNumSamples--) { RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame())); if (!frame) break; @@ -870,17 +870,17 @@ FlacTrackDemuxer::GetSamples(int32_t aNu LOGV("GetSamples() End mSamples.Length=%u aNumSamples=%d offset=%lld" " mParsedFramesDuration=%f mTotalFrameLen=%llu", frames->mSamples.Length(), aNumSamples, GetResourceOffset(), mParsedFramesDuration.ToSeconds(), mTotalFrameLen); if (frames->mSamples.IsEmpty()) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::END_OF_STREAM, __func__); + NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } return SamplesPromise::CreateAndResolve(frames, __func__); } void FlacTrackDemuxer::Reset() { @@ -894,17 +894,17 @@ FlacTrackDemuxer::Reset() mParser->EndFrameSession(); } RefPtr<FlacTrackDemuxer::SkipAccessPointPromise> FlacTrackDemuxer::SkipToNextRandomAccessPoint(TimeUnit aTimeThreshold) { // Will not be called for audio-only resources. return SkipAccessPointPromise::CreateAndReject( - SkipFailureHolder(DemuxerFailureReason::DEMUXER_ERROR, 0), __func__); + SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__); } int64_t FlacTrackDemuxer::GetResourceOffset() const { return mSource.Tell(); }
--- a/dom/media/fmp4/MP4Decoder.cpp +++ b/dom/media/fmp4/MP4Decoder.cpp @@ -275,17 +275,17 @@ MP4Decoder::IsVideoAccelerated(layers::L result.AppendLiteral("; "); AppendUTF8toUTF16(failureReason, result); } decoder->Shutdown(); taskQueue->BeginShutdown(); taskQueue->AwaitShutdownAndIdle(); promise->MaybeResolve(result); }, - [promise, decoder, taskQueue] (MediaDataDecoder::DecoderFailureReason aResult) { + [promise, decoder, taskQueue] (MediaResult aError) { decoder->Shutdown(); taskQueue->BeginShutdown(); taskQueue->AwaitShutdownAndIdle(); promise->MaybeResolve(NS_LITERAL_STRING("No; Failed to initialize H264 decoder")); }); return promise.forget(); }
--- a/dom/media/fmp4/MP4Demuxer.cpp +++ b/dom/media/fmp4/MP4Demuxer.cpp @@ -120,33 +120,33 @@ MP4Demuxer::MP4Demuxer(MediaResource* aR RefPtr<MP4Demuxer::InitPromise> MP4Demuxer::Init() { AutoPinned<mp4_demuxer::ResourceStream> stream(mStream); // Check that we have enough data to read the metadata. if (!mp4_demuxer::MP4Metadata::HasCompleteMetadata(stream)) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } mInitData = mp4_demuxer::MP4Metadata::Metadata(stream); if (!mInitData) { // OOM - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } RefPtr<mp4_demuxer::BufferStream> bufferstream = new mp4_demuxer::BufferStream(mInitData); mMetadata = MakeUnique<mp4_demuxer::MP4Metadata>(bufferstream); if (!mMetadata->GetNumberTracks(mozilla::TrackInfo::kAudioTrack) && !mMetadata->GetNumberTracks(mozilla::TrackInfo::kVideoTrack)) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } return InitPromise::CreateAndResolve(NS_OK, __func__); } bool MP4Demuxer::HasTrackType(TrackInfo::TrackType aType) const { @@ -293,17 +293,17 @@ MP4TrackDemuxer::Seek(media::TimeUnit aT mIterator->Seek(seekTime); // Check what time we actually seeked to. RefPtr<MediaRawData> sample; do { sample = GetNextSample(); if (!sample) { - return SeekPromise::CreateAndReject(DemuxerFailureReason::END_OF_STREAM, __func__); + return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } if (!sample->Size()) { // This sample can't be decoded, continue searching. continue; } if (sample->mKeyframe) { mQueuedSample = sample; seekTime = mQueuedSample->mTime; @@ -365,17 +365,17 @@ MP4TrackDemuxer::GetNextSample() } RefPtr<MP4TrackDemuxer::SamplesPromise> MP4TrackDemuxer::GetSamples(int32_t aNumSamples) { EnsureUpToDateIndex(); RefPtr<SamplesHolder> samples = new SamplesHolder; if (!aNumSamples) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } if (mQueuedSample) { MOZ_ASSERT(mQueuedSample->mKeyframe, "mQueuedSample must be a keyframe"); samples->mSamples.AppendElement(mQueuedSample); mQueuedSample = nullptr; aNumSamples--; @@ -385,17 +385,17 @@ MP4TrackDemuxer::GetSamples(int32_t aNum if (!sample->Size()) { continue; } samples->mSamples.AppendElement(sample); aNumSamples--; } if (samples->mSamples.IsEmpty()) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::END_OF_STREAM, __func__); + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } else { for (const auto& sample : samples->mSamples) { // Collect telemetry from h264 Annex B SPS. if (mNeedSPSForTelemetry && mp4_demuxer::AnnexB::HasSPS(sample)) { RefPtr<MediaByteBuffer> extradata = mp4_demuxer::AnnexB::ExtractExtraData(sample); mNeedSPSForTelemetry = AccumulateSPSTelemetry(extradata); } @@ -456,17 +456,17 @@ MP4TrackDemuxer::SkipToNextRandomAccessP found = true; mQueuedSample = sample; } } SetNextKeyFrameTime(); if (found) { return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); } else { - SkipFailureHolder failure(DemuxerFailureReason::END_OF_STREAM, parsed); + SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed); return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__); } } media::TimeIntervals MP4TrackDemuxer::GetBuffered() { EnsureUpToDateIndex();
--- a/dom/media/gtest/MockMediaDecoderOwner.h +++ b/dom/media/gtest/MockMediaDecoderOwner.h @@ -20,17 +20,17 @@ public: } void FireTimeUpdate(bool aPeriodic) override {} bool GetPaused() override { return false; } void MetadataLoaded(const MediaInfo* aInfo, nsAutoPtr<const MetadataTags> aTags) override { } void NetworkError() override {} - void DecodeError() override {} + void DecodeError(const MediaResult& aError) override {} bool HasError() const override { return false; } void LoadAborted() override {} void PlaybackEnded() override {} void SeekStarted() override {} void SeekCompleted() override {} void DownloadProgressed() override {} void UpdateReadyState() override {} void FirstFrameLoaded() override {}
--- a/dom/media/gtest/TestMP4Demuxer.cpp +++ b/dom/media/gtest/TestMP4Demuxer.cpp @@ -114,29 +114,29 @@ public: [track, binding] () { track->GetSamples()->Then(binding->mTaskQueue, __func__, [track, binding] (RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) { if (aSamples->mSamples.Length()) { binding->mSamples.AppendElements(aSamples->mSamples); binding->CheckTrackSamples(track); } }, - [binding] (DemuxerFailureReason aReason) { - if (aReason == DemuxerFailureReason::DEMUXER_ERROR) { - EXPECT_TRUE(false); - binding->mCheckTrackSamples.Reject(NS_ERROR_FAILURE, __func__); - } else if (aReason == DemuxerFailureReason::END_OF_STREAM) { + [binding] (const MediaResult& aError) { + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { EXPECT_TRUE(binding->mSamples.Length() > 1); for (uint32_t i = 0; i < (binding->mSamples.Length() - 1); i++) { EXPECT_LT(binding->mSamples[i]->mTimecode, binding->mSamples[i + 1]->mTimecode); if (binding->mSamples[i]->mKeyframe) { binding->mKeyFrameTimecodes.AppendElement(binding->mSamples[i]->mTimecode); } } binding->mCheckTrackSamples.Resolve(true, __func__); + } else { + EXPECT_TRUE(false); + binding->mCheckTrackSamples.Reject(aError, __func__); } } ); } ); return p; }
--- a/dom/media/gtest/TestMediaFormatReader.cpp +++ b/dom/media/gtest/TestMediaFormatReader.cpp @@ -83,17 +83,17 @@ public: { EXPECT_TRUE(aMetadata); mReader->RequestVideoData(true, 0) ->Then(mReader->OwnerThread(), __func__, this, &MediaFormatReaderBinding::OnVideoRawDataDemuxed, &MediaFormatReaderBinding::OnNotDemuxed); } - void OnMetadataNotRead(ReadMetadataFailureReason aReason) { + void OnMetadataNotRead(const MediaResult& aError) { EXPECT_TRUE(false); ReaderShutdown(); } void OnAudioRawDataDemuxed(MediaData* aAudioSample) { EXPECT_TRUE(aAudioSample); EXPECT_EQ(MediaData::RAW_DATA, aAudioSample->mType); @@ -102,17 +102,17 @@ public: void OnVideoRawDataDemuxed(MediaData* aVideoSample) { EXPECT_TRUE(aVideoSample); EXPECT_EQ(MediaData::RAW_DATA, aVideoSample->mType); ReaderShutdown(); } - void OnNotDemuxed(MediaDecoderReader::NotDecodedReason aReason) + void OnNotDemuxed(const MediaResult& aReason) { EXPECT_TRUE(false); ReaderShutdown(); } void ReaderShutdown() { RefPtr<MediaFormatReaderBinding> self = this;
--- a/dom/media/mediasource/MediaSource.cpp +++ b/dom/media/mediasource/MediaSource.cpp @@ -5,16 +5,17 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "MediaSource.h" #include "AsyncEventRunner.h" #include "DecoderTraits.h" #include "Benchmark.h" #include "DecoderDoctorDiagnostics.h" +#include "MediaResult.h" #include "MediaSourceUtils.h" #include "SourceBuffer.h" #include "SourceBufferList.h" #include "mozilla/ErrorResult.h" #include "mozilla/FloatingPoint.h" #include "mozilla/Preferences.h" #include "mozilla/dom/BindingDeclarations.h" #include "mozilla/dom/HTMLMediaElement.h" @@ -325,23 +326,34 @@ MediaSource::EndOfStream(const Optional< mDecoder->Ended(true); return; } switch (aError.Value()) { case MediaSourceEndOfStreamError::Network: mDecoder->NetworkError(); break; case MediaSourceEndOfStreamError::Decode: - mDecoder->DecodeError(); + mDecoder->DecodeError(NS_ERROR_DOM_MEDIA_FATAL_ERR); break; default: aRv.Throw(NS_ERROR_DOM_TYPE_ERR); } } +void +MediaSource::EndOfStream(const MediaResult& aError) +{ + MOZ_ASSERT(NS_IsMainThread()); + MSE_API("EndOfStream(aError=%d)", aError.Code()); + + SetReadyState(MediaSourceReadyState::Ended); + mSourceBuffers->Ended(); + mDecoder->DecodeError(aError); +} + /* static */ bool MediaSource::IsTypeSupported(const GlobalObject& aOwner, const nsAString& aType) { MOZ_ASSERT(NS_IsMainThread()); DecoderDoctorDiagnostics diagnostics; nsresult rv = IsTypeSupported(aType, &diagnostics); nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aOwner.GetAsSupports()); diagnostics.StoreFormatDiagnostics(window ? window->GetExtantDoc() : nullptr,
--- a/dom/media/mediasource/MediaSource.h +++ b/dom/media/mediasource/MediaSource.h @@ -24,16 +24,17 @@ struct JSContext; class JSObject; class nsPIDOMWindowInner; namespace mozilla { class ErrorResult; template <typename T> class AsyncEventRunner; +class MediaResult; namespace dom { class GlobalObject; class SourceBuffer; class SourceBufferList; template <typename T> class Optional; @@ -55,16 +56,17 @@ public: double Duration(); void SetDuration(double aDuration, ErrorResult& aRv); already_AddRefed<SourceBuffer> AddSourceBuffer(const nsAString& aType, ErrorResult& aRv); void RemoveSourceBuffer(SourceBuffer& aSourceBuffer, ErrorResult& aRv); void EndOfStream(const Optional<MediaSourceEndOfStreamError>& aError, ErrorResult& aRv); + void EndOfStream(const MediaResult& aError); void SetLiveSeekableRange(double aStart, double aEnd, ErrorResult& aRv); void ClearLiveSeekableRange(ErrorResult& aRv); static bool IsTypeSupported(const GlobalObject&, const nsAString& aType); static nsresult IsTypeSupported(const nsAString& aType, DecoderDoctorDiagnostics* aDiagnostics); static bool Enabled(JSContext* cx, JSObject* aGlobal);
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp +++ b/dom/media/mediasource/MediaSourceDemuxer.cpp @@ -242,17 +242,17 @@ MediaSourceDemuxer::GetManager(TrackType return mVideoTrack; default: return nullptr; } } MediaSourceDemuxer::~MediaSourceDemuxer() { - mInitPromise.RejectIfExists(DemuxerFailureReason::SHUTDOWN, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } void MediaSourceDemuxer::GetMozDebugReaderData(nsAString& aString) { MonitorAutoLock mon(mMonitor); nsAutoCString result; result += nsPrintfCString("Dumping data for demuxer %p:\n", this); @@ -374,99 +374,96 @@ MediaSourceTrackDemuxer::BreakCycles() self->mManager = nullptr; } ); mParent->GetTaskQueue()->Dispatch(task.forget()); } RefPtr<MediaSourceTrackDemuxer::SeekPromise> MediaSourceTrackDemuxer::DoSeek(media::TimeUnit aTime) { - typedef TrackBuffersManager::GetSampleResult Result; - TimeIntervals buffered = mManager->Buffered(mType); // Fuzz factor represents a +/- threshold. So when seeking it allows the gap // to be twice as big as the fuzz value. We only want to allow EOS_FUZZ gap. buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2); TimeUnit seekTime = std::max(aTime - mPreRoll, TimeUnit::FromMicroseconds(0)); if (mManager->IsEnded() && seekTime >= buffered.GetEnd()) { // We're attempting to seek past the end time. Cap seekTime so that we seek // to the last sample instead. seekTime = std::max(mManager->HighestStartTime(mType) - mPreRoll, TimeUnit::FromMicroseconds(0)); } if (!buffered.ContainsWithStrictEnd(seekTime)) { if (!buffered.ContainsWithStrictEnd(aTime)) { // We don't have the data to seek to. - return SeekPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, + return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); } // Theoretically we should reject the promise with WAITING_FOR_DATA, // however, to avoid unwanted regressions we assume that if at this time // we don't have the wanted data it won't come later. // Instead of using the pre-rolled time, use the earliest time available in // the interval. TimeIntervals::IndexType index = buffered.Find(aTime); MOZ_ASSERT(index != TimeIntervals::NoIndex); seekTime = buffered[index].mStart; } seekTime = mManager->Seek(mType, seekTime, MediaSourceDemuxer::EOS_FUZZ); - Result result; + MediaResult result = NS_OK; RefPtr<MediaRawData> sample = mManager->GetSample(mType, media::TimeUnit(), result); - MOZ_ASSERT(result != Result::ERROR && sample); + MOZ_ASSERT(NS_SUCCEEDED(result) && sample); mNextSample = Some(sample); mReset = false; { MonitorAutoLock mon(mMonitor); mNextRandomAccessPoint = mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ); } return SeekPromise::CreateAndResolve(seekTime, __func__); } RefPtr<MediaSourceTrackDemuxer::SamplesPromise> MediaSourceTrackDemuxer::DoGetSamples(int32_t aNumSamples) { - typedef TrackBuffersManager::GetSampleResult Result; - if (mReset) { // If a seek (or reset) was recently performed, we ensure that the data // we are about to retrieve is still available. TimeIntervals buffered = mManager->Buffered(mType); buffered.SetFuzz(MediaSourceDemuxer::EOS_FUZZ / 2); if (!buffered.Length() && mManager->IsEnded()) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::END_OF_STREAM, + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } if (!buffered.ContainsWithStrictEnd(TimeUnit::FromMicroseconds(0))) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); } mReset = false; } RefPtr<MediaRawData> sample; if (mNextSample) { sample = mNextSample.ref(); mNextSample.reset(); } else { - Result result; + MediaResult result = NS_OK; sample = mManager->GetSample(mType, MediaSourceDemuxer::EOS_FUZZ, result); if (!sample) { - if (result == Result::ERROR) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + if (result == NS_ERROR_DOM_MEDIA_END_OF_STREAM || + result == NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA) { + return SamplesPromise::CreateAndReject( + (result == NS_ERROR_DOM_MEDIA_END_OF_STREAM && mManager->IsEnded()) + ? NS_ERROR_DOM_MEDIA_END_OF_STREAM + : NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, __func__); } - return SamplesPromise::CreateAndReject( - (result == Result::EOS && mManager->IsEnded()) - ? DemuxerFailureReason::END_OF_STREAM - : DemuxerFailureReason::WAITING_FOR_DATA, __func__); + return SamplesPromise::CreateAndReject(result, __func__); } } RefPtr<SamplesHolder> samples = new SamplesHolder; samples->mSamples.AppendElement(sample); if (mNextRandomAccessPoint.ToMicroseconds() <= sample->mTime) { MonitorAutoLock mon(mMonitor); mNextRandomAccessPoint = mManager->GetNextRandomAccessPoint(mType, MediaSourceDemuxer::EOS_FUZZ); @@ -487,14 +484,14 @@ MediaSourceTrackDemuxer::DoSkipToNextRan aTimeThreadshold, MediaSourceDemuxer::EOS_FUZZ, found); if (found) { return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); } } SkipFailureHolder holder( - mManager->IsEnded() ? DemuxerFailureReason::END_OF_STREAM : - DemuxerFailureReason::WAITING_FOR_DATA, parsed); + mManager->IsEnded() ? NS_ERROR_DOM_MEDIA_END_OF_STREAM : + NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA, parsed); return SkipAccessPointPromise::CreateAndReject(holder, __func__); } } // namespace mozilla
--- a/dom/media/mediasource/MediaSourceDemuxer.h +++ b/dom/media/mediasource/MediaSourceDemuxer.h @@ -15,16 +15,17 @@ #include "MediaDataDemuxer.h" #include "MediaDecoderReader.h" #include "MediaResource.h" #include "MediaSource.h" #include "TrackBuffersManager.h" namespace mozilla { +class MediaResult; class MediaSourceTrackDemuxer; class MediaSourceDemuxer : public MediaDataDemuxer { public: explicit MediaSourceDemuxer(); RefPtr<InitPromise> Init() override; @@ -114,17 +115,17 @@ public: { return false; } private: RefPtr<SeekPromise> DoSeek(media::TimeUnit aTime); RefPtr<SamplesPromise> DoGetSamples(int32_t aNumSamples); RefPtr<SkipAccessPointPromise> DoSkipToNextRandomAccessPoint(media::TimeUnit aTimeThreadshold); - already_AddRefed<MediaRawData> GetSample(DemuxerFailureReason& aFailure); + already_AddRefed<MediaRawData> GetSample(MediaResult& aError); // Return the timestamp of the next keyframe after mLastSampleIndex. media::TimeUnit GetNextRandomAccessPoint(); RefPtr<MediaSourceDemuxer> mParent; RefPtr<TrackBuffersManager> mManager; TrackInfo::TrackType mType; // Monitor protecting members below accessed from multiple threads. Monitor mMonitor;
--- a/dom/media/mediasource/SourceBuffer.cpp +++ b/dom/media/mediasource/SourceBuffer.cpp @@ -440,50 +440,47 @@ SourceBuffer::AppendDataCompletedWithSuc mCurrentAttributes = aResult.second(); CheckEndTime(); StopUpdating(); } void -SourceBuffer::AppendDataErrored(nsresult aError) +SourceBuffer::AppendDataErrored(const MediaResult& aError) { MOZ_ASSERT(mUpdating); mPendingAppend.Complete(); - switch (aError) { - case NS_ERROR_ABORT: + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_CANCELED: // Nothing further to do as the trackbuffer has been shutdown. // or append was aborted and abort() has handled all the events. break; default: - AppendError(true); + AppendError(aError); break; } } void -SourceBuffer::AppendError(bool aDecoderError) +SourceBuffer::AppendError(const MediaResult& aDecodeError) { MOZ_ASSERT(NS_IsMainThread()); ResetParserState(); mUpdating = false; QueueAsyncSimpleEvent("error"); QueueAsyncSimpleEvent("updateend"); - if (aDecoderError) { - Optional<MediaSourceEndOfStreamError> decodeError( - MediaSourceEndOfStreamError::Decode); - ErrorResult dummy; - mMediaSource->EndOfStream(decodeError, dummy); - } + MOZ_ASSERT(NS_FAILED(aDecodeError)); + + mMediaSource->EndOfStream(aDecodeError); } already_AddRefed<MediaByteBuffer> SourceBuffer::PrepareAppend(const uint8_t* aData, uint32_t aLength, ErrorResult& aRv) { typedef TrackBuffersManager::EvictDataResult Result; if (!IsAttached() || mUpdating) {
--- a/dom/media/mediasource/SourceBuffer.h +++ b/dom/media/mediasource/SourceBuffer.h @@ -150,26 +150,26 @@ private: // Shared implementation of AppendBuffer overloads. void AppendData(const uint8_t* aData, uint32_t aLength, ErrorResult& aRv); // Implement the "Append Error Algorithm". // Will call endOfStream() with "decode" error if aDecodeError is true. // 3.5.3 Append Error Algorithm // http://w3c.github.io/media-source/#sourcebuffer-append-error - void AppendError(bool aDecoderError); + void AppendError(const MediaResult& aDecodeError); // Implements the "Prepare Append Algorithm". Returns MediaByteBuffer object // on success or nullptr (with aRv set) on error. already_AddRefed<MediaByteBuffer> PrepareAppend(const uint8_t* aData, uint32_t aLength, ErrorResult& aRv); void AppendDataCompletedWithSuccess(SourceBufferTask::AppendBufferResult aResult); - void AppendDataErrored(nsresult aError); + void AppendDataErrored(const MediaResult& aError); RefPtr<MediaSource> mMediaSource; RefPtr<TrackBuffersManager> mTrackBuffersManager; SourceBufferAttributes mCurrentAttributes; bool mUpdating;
--- a/dom/media/mediasource/SourceBufferTask.h +++ b/dom/media/mediasource/SourceBufferTask.h @@ -7,33 +7,34 @@ #ifndef MOZILLA_SOURCEBUFFERTASK_H_ #define MOZILLA_SOURCEBUFFERTASK_H_ #include "mozilla/MozPromise.h" #include "mozilla/Pair.h" #include "mozilla/RefPtr.h" #include "SourceBufferAttributes.h" #include "TimeUnits.h" +#include "MediaResult.h" namespace mozilla { class SourceBufferTask { public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SourceBufferTask); enum class Type { AppendBuffer, Abort, Reset, RangeRemoval, EvictData, Detach }; typedef Pair<bool, SourceBufferAttributes> AppendBufferResult; - typedef MozPromise<AppendBufferResult, nsresult, /* IsExclusive = */ true> AppendPromise; + typedef MozPromise<AppendBufferResult, MediaResult, /* IsExclusive = */ true> AppendPromise; typedef MozPromise<bool, nsresult, /* IsExclusive = */ true> RangeRemovalPromise; virtual Type GetType() const = 0; template<typename ReturnType> ReturnType* As() { MOZ_ASSERT(this->GetType() == ReturnType::sType);
--- a/dom/media/mediasource/TrackBuffersManager.cpp +++ b/dom/media/mediasource/TrackBuffersManager.cpp @@ -282,27 +282,31 @@ TrackBuffersManager::EvictData(const Tim return EvictDataResult::NO_DATA_EVICTED; } if (toEvict <= 512*1024) { // Don't bother evicting less than 512KB. mEvictionState = EvictionState::NO_EVICTION_NEEDED; return EvictDataResult::CANT_EVICT; } - if (mBufferFull && mEvictionState == EvictionState::EVICTION_COMPLETED) { - return EvictDataResult::BUFFER_FULL; - } + EvictDataResult result; - MSE_DEBUG("Reaching our size limit, schedule eviction of %lld bytes", toEvict); - - mEvictionState = EvictionState::EVICTION_NEEDED; - + if (mBufferFull && mEvictionState == EvictionState::EVICTION_COMPLETED) { + // Our buffer is currently full. We will make another eviction attempt. + // However, the current appendBuffer will fail as we can't know ahead of + // time if the eviction will later succeed. + result = EvictDataResult::BUFFER_FULL; + } else { + mEvictionState = EvictionState::EVICTION_NEEDED; + result = EvictDataResult::NO_DATA_EVICTED; + } + MSE_DEBUG("Reached our size limit, schedule eviction of %lld bytes", toEvict); QueueTask(new EvictDataTask(aPlaybackTime, toEvict)); - return EvictDataResult::NO_DATA_EVICTED; + return result; } TimeIntervals TrackBuffersManager::Buffered() { MSE_DEBUG(""); // http://w3c.github.io/media-source/index.html#widl-SourceBuffer-buffered // 2. Let highest end time be the largest track buffer ranges end time across all the track buffers managed by this SourceBuffer object. @@ -712,17 +716,17 @@ TrackBuffersManager::SegmentParserLoop() [self] (bool aNeedMoreData) { self->mProcessingRequest.Complete(); if (aNeedMoreData) { self->NeedMoreData(); } else { self->ScheduleSegmentParserLoop(); } }, - [self] (nsresult aRejectValue) { + [self] (const MediaResult& aRejectValue) { self->mProcessingRequest.Complete(); self->RejectAppend(aRejectValue, __func__); })); return; } } } @@ -738,19 +742,19 @@ TrackBuffersManager::NeedMoreData() *mSourceBufferAttributes), __func__); mSourceBufferAttributes = nullptr; mCurrentTask = nullptr; ProcessTasks(); } void -TrackBuffersManager::RejectAppend(nsresult aRejectValue, const char* aName) +TrackBuffersManager::RejectAppend(const MediaResult& aRejectValue, const char* aName) { - MSE_DEBUG("rv=%d", aRejectValue); + MSE_DEBUG("rv=%u", aRejectValue.Code()); MOZ_DIAGNOSTIC_ASSERT(mCurrentTask && mCurrentTask->GetType() == SourceBufferTask::Type::AppendBuffer); mCurrentTask->As<AppendBufferTask>()->mPromise.Reject(aRejectValue, __func__); mSourceBufferAttributes = nullptr; mCurrentTask = nullptr; ProcessTasks(); } @@ -1090,22 +1094,22 @@ TrackBuffersManager::OnDemuxerInitDone(n // 4. Set append state to WAITING_FOR_SEGMENT. SetAppendState(AppendState::WAITING_FOR_SEGMENT); // 5. Jump to the loop top step above. ScheduleSegmentParserLoop(); } void -TrackBuffersManager::OnDemuxerInitFailed(DemuxerFailureReason aFailure) +TrackBuffersManager::OnDemuxerInitFailed(const MediaResult& aError) { - MOZ_ASSERT(aFailure != DemuxerFailureReason::WAITING_FOR_DATA); + MOZ_ASSERT(aError != NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA); mDemuxerInitRequest.Complete(); - RejectAppend(NS_ERROR_FAILURE, __func__); + RejectAppend(aError, __func__); } RefPtr<TrackBuffersManager::CodedFrameProcessingPromise> TrackBuffersManager::CodedFrameProcessing() { MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(mProcessingPromise.IsEmpty()); @@ -1144,39 +1148,32 @@ TrackBuffersManager::CodedFrameProcessin DoDemuxVideo(); return p; } void TrackBuffersManager::OnDemuxFailed(TrackType aTrack, - DemuxerFailureReason aFailure) + const MediaResult& aError) { MOZ_ASSERT(OnTaskQueue()); - MSE_DEBUG("Failed to demux %s, failure:%d", - aTrack == TrackType::kVideoTrack ? "video" : "audio", aFailure); - switch (aFailure) { - case DemuxerFailureReason::END_OF_STREAM: - case DemuxerFailureReason::WAITING_FOR_DATA: + MSE_DEBUG("Failed to demux %s, failure:%u", + aTrack == TrackType::kVideoTrack ? "video" : "audio", aError.Code()); + switch (aError.Code()) { + case NS_ERROR_DOM_MEDIA_END_OF_STREAM: + case NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA: if (aTrack == TrackType::kVideoTrack) { DoDemuxAudio(); } else { CompleteCodedFrameProcessing(); } break; - case DemuxerFailureReason::DEMUXER_ERROR: - RejectProcessing(NS_ERROR_FAILURE, __func__); - break; - case DemuxerFailureReason::CANCELED: - case DemuxerFailureReason::SHUTDOWN: - RejectProcessing(NS_ERROR_ABORT, __func__); - break; default: - MOZ_ASSERT(false); + RejectProcessing(aError, __func__); break; } } void TrackBuffersManager::DoDemuxVideo() { MOZ_ASSERT(OnTaskQueue()); @@ -1318,17 +1315,17 @@ TrackBuffersManager::CompleteCodedFrameP // 7. Set append state to WAITING_FOR_SEGMENT. SetAppendState(AppendState::WAITING_FOR_SEGMENT); // 8. Jump to the loop top step above. ResolveProcessing(false, __func__); } void -TrackBuffersManager::RejectProcessing(nsresult aRejectValue, const char* aName) +TrackBuffersManager::RejectProcessing(const MediaResult& aRejectValue, const char* aName) { mProcessingPromise.RejectIfExists(aRejectValue, __func__); } void TrackBuffersManager::ResolveProcessing(bool aResolveValue, const char* aName) { mProcessingPromise.ResolveIfExists(aResolveValue, __func__); @@ -2152,53 +2149,53 @@ TrackBuffersManager::GetSample(TrackInfo // TODO, check that we have continuous data based on the sanitized buffered // range instead. return nullptr; } already_AddRefed<MediaRawData> TrackBuffersManager::GetSample(TrackInfo::TrackType aTrack, const TimeUnit& aFuzz, - GetSampleResult& aResult) + MediaResult& aResult) { MOZ_ASSERT(OnTaskQueue()); auto& trackData = GetTracksData(aTrack); const TrackBuffer& track = GetTrackBuffer(aTrack); - aResult = GetSampleResult::WAITING_FOR_DATA; + aResult = NS_ERROR_DOM_MEDIA_WAITING_FOR_DATA; if (!track.Length()) { - aResult = GetSampleResult::EOS; + aResult = NS_ERROR_DOM_MEDIA_END_OF_STREAM; return nullptr; } if (trackData.mNextGetSampleIndex.isNothing() && trackData.mNextSampleTimecode == TimeUnit()) { // First demux, get first sample. trackData.mNextGetSampleIndex = Some(0u); } if (trackData.mNextGetSampleIndex.isSome()) { if (trackData.mNextGetSampleIndex.ref() >= track.Length()) { - aResult = GetSampleResult::EOS; + aResult = NS_ERROR_DOM_MEDIA_END_OF_STREAM; return nullptr; } const MediaRawData* sample = GetSample(aTrack, trackData.mNextGetSampleIndex.ref(), trackData.mNextSampleTimecode, trackData.mNextSampleTime, aFuzz); if (!sample) { return nullptr; } RefPtr<MediaRawData> p = sample->Clone(); if (!p) { - aResult = GetSampleResult::ERROR; + aResult = MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); return nullptr; } trackData.mNextGetSampleIndex.ref()++; // Estimate decode timestamp and timestamp of the next sample. TimeUnit nextSampleTimecode = TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration); TimeUnit nextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime()); @@ -2214,50 +2211,50 @@ TrackBuffersManager::GetSample(TrackInfo TimeUnit::FromMicroseconds(nextSample->mTimecode); trackData.mNextSampleTime = TimeUnit::FromMicroseconds(nextSample->mTime); } else { // Next sample isn't available yet. Use estimates. trackData.mNextSampleTimecode = nextSampleTimecode; trackData.mNextSampleTime = nextSampleTime; } - aResult = GetSampleResult::NO_ERROR; + aResult = NS_OK; return p.forget(); } if (trackData.mNextSampleTimecode.ToMicroseconds() > track.LastElement()->mTimecode + track.LastElement()->mDuration) { // The next element is past our last sample. We're done. trackData.mNextGetSampleIndex = Some(uint32_t(track.Length())); - aResult = GetSampleResult::EOS; + aResult = NS_ERROR_DOM_MEDIA_END_OF_STREAM; return nullptr; } // Our previous index has been overwritten, attempt to find the new one. int32_t pos = FindCurrentPosition(aTrack, aFuzz); if (pos < 0) { MSE_DEBUG("Couldn't find sample (pts:%lld dts:%lld)", trackData.mNextSampleTime.ToMicroseconds(), trackData.mNextSampleTimecode.ToMicroseconds()); return nullptr; } const RefPtr<MediaRawData>& sample = track[pos]; RefPtr<MediaRawData> p = sample->Clone(); if (!p) { // OOM - aResult = GetSampleResult::ERROR; + aResult = MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); return nullptr; } trackData.mNextGetSampleIndex = Some(uint32_t(pos)+1); trackData.mNextSampleTimecode = TimeUnit::FromMicroseconds(sample->mTimecode + sample->mDuration); trackData.mNextSampleTime = TimeUnit::FromMicroseconds(sample->GetEndTime()); - aResult = GetSampleResult::NO_ERROR; + aResult = NS_OK; return p.forget(); } int32_t TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack, const TimeUnit& aFuzz) { MOZ_ASSERT(OnTaskQueue());
--- a/dom/media/mediasource/TrackBuffersManager.h +++ b/dom/media/mediasource/TrackBuffersManager.h @@ -10,16 +10,17 @@ #include "mozilla/Atomics.h" #include "mozilla/Maybe.h" #include "mozilla/Monitor.h" #include "AutoTaskQueue.h" #include "mozilla/dom/SourceBufferBinding.h" #include "MediaData.h" #include "MediaDataDemuxer.h" +#include "MediaResult.h" #include "MediaSourceDecoder.h" #include "SourceBufferTask.h" #include "TimeUnits.h" #include "nsAutoPtr.h" #include "nsProxyRelease.h" #include "nsString.h" #include "nsTArray.h" @@ -149,51 +150,43 @@ public: media::TimeUnit Seek(TrackInfo::TrackType aTrack, const media::TimeUnit& aTime, const media::TimeUnit& aFuzz); uint32_t SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack, const media::TimeUnit& aTimeThreadshold, const media::TimeUnit& aFuzz, bool& aFound); - enum class GetSampleResult - { - NO_ERROR, - ERROR, - WAITING_FOR_DATA, - EOS - }; - already_AddRefed<MediaRawData> GetSample(TrackInfo::TrackType aTrack, const media::TimeUnit& aFuzz, - GetSampleResult& aResult); + MediaResult& aResult); int32_t FindCurrentPosition(TrackInfo::TrackType aTrack, const media::TimeUnit& aFuzz); media::TimeUnit GetNextRandomAccessPoint(TrackInfo::TrackType aTrack, const media::TimeUnit& aFuzz); void AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes); private: - typedef MozPromise<bool, nsresult, /* IsExclusive = */ true> CodedFrameProcessingPromise; + typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true> CodedFrameProcessingPromise; // for MediaSourceDemuxer::GetMozDebugReaderData friend class MediaSourceDemuxer; ~TrackBuffersManager(); // All following functions run on the taskqueue. RefPtr<AppendPromise> DoAppendData(RefPtr<MediaByteBuffer> aData, SourceBufferAttributes aAttributes); void ScheduleSegmentParserLoop(); void SegmentParserLoop(); void InitializationSegmentReceived(); void ShutdownDemuxers(); void CreateDemuxerforMIMEType(); void ResetDemuxingState(); void NeedMoreData(); - void RejectAppend(nsresult aRejectValue, const char* aName); + void RejectAppend(const MediaResult& aRejectValue, const char* aName); // Will return a promise that will be resolved once all frames of the current // media segment have been processed. RefPtr<CodedFrameProcessingPromise> CodedFrameProcessing(); void CompleteCodedFrameProcessing(); // Called by ResetParserState. void CompleteResetParserState(); RefPtr<RangeRemovalPromise> CodedFrameRemovalWithPromise(media::TimeInterval aInterval); @@ -238,35 +231,35 @@ private: RefPtr<MediaByteBuffer> mPendingInputBuffer; RefPtr<SourceBufferResource> mCurrentInputBuffer; RefPtr<MediaDataDemuxer> mInputDemuxer; // Length already processed in current media segment. uint32_t mProcessedInput; Maybe<media::TimeUnit> mLastParsedEndTime; void OnDemuxerInitDone(nsresult); - void OnDemuxerInitFailed(DemuxerFailureReason aFailure); + void OnDemuxerInitFailed(const MediaResult& aFailure); void OnDemuxerResetDone(nsresult); MozPromiseRequestHolder<MediaDataDemuxer::InitPromise> mDemuxerInitRequest; bool mEncrypted; - void OnDemuxFailed(TrackType aTrack, DemuxerFailureReason aFailure); + void OnDemuxFailed(TrackType aTrack, const MediaResult& aError); void DoDemuxVideo(); void OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples); - void OnVideoDemuxFailed(DemuxerFailureReason aFailure) + void OnVideoDemuxFailed(const MediaResult& aError) { mVideoTracks.mDemuxRequest.Complete(); - OnDemuxFailed(TrackType::kVideoTrack, aFailure); + OnDemuxFailed(TrackType::kVideoTrack, aError); } void DoDemuxAudio(); void OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples); - void OnAudioDemuxFailed(DemuxerFailureReason aFailure) + void OnAudioDemuxFailed(const MediaResult& aError) { mAudioTracks.mDemuxRequest.Complete(); - OnDemuxFailed(TrackType::kAudioTrack, aFailure); + OnDemuxFailed(TrackType::kAudioTrack, aError); } void DoEvictData(const media::TimeUnit& aPlaybackTime, int64_t aSizeToEvict); struct TrackData { TrackData() : mNumTracks(0) @@ -374,17 +367,17 @@ private: uint32_t FindSampleIndex(const TrackBuffer& aTrackBuffer, const media::TimeInterval& aInterval); const MediaRawData* GetSample(TrackInfo::TrackType aTrack, size_t aIndex, const media::TimeUnit& aExpectedDts, const media::TimeUnit& aExpectedPts, const media::TimeUnit& aFuzz); void UpdateBufferedRanges(); - void RejectProcessing(nsresult aRejectValue, const char* aName); + void RejectProcessing(const MediaResult& aRejectValue, const char* aName); void ResolveProcessing(bool aResolveValue, const char* aName); MozPromiseRequestHolder<CodedFrameProcessingPromise> mProcessingRequest; MozPromiseHolder<CodedFrameProcessingPromise> mProcessingPromise; // Trackbuffers definition. nsTArray<TrackData*> GetTracksList(); TrackData& GetTracksData(TrackType aTrack) {
--- a/dom/media/moz.build +++ b/dom/media/moz.build @@ -116,16 +116,17 @@ EXPORTS += [ 'MediaFormatReader.h', 'MediaInfo.h', 'MediaMetadataManager.h', 'MediaPrefs.h', 'MediaQueue.h', 'MediaRecorder.h', 'MediaResource.h', 'MediaResourceCallback.h', + 'MediaResult.h', 'MediaSegment.h', 'MediaStatistics.h', 'MediaStreamGraph.h', 'MediaStreamListener.h', 'MediaStreamVideoSink.h', 'MediaTimer.h', 'MediaTrack.h', 'MediaTrackList.h',
--- a/dom/media/ogg/OggDemuxer.cpp +++ b/dom/media/ogg/OggDemuxer.cpp @@ -210,34 +210,29 @@ OggDemuxer::StartTime(TrackInfo::TrackTy return OggState(aType).mStartTime.refOr(TimeUnit::FromMicroseconds(0)).ToMicroseconds(); } RefPtr<OggDemuxer::InitPromise> OggDemuxer::Init() { int ret = ogg_sync_init(OggSyncState(TrackInfo::kAudioTrack)); if (ret != 0) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__); } ret = ogg_sync_init(OggSyncState(TrackInfo::kVideoTrack)); if (ret != 0) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__); } - /* - if (InitBufferedState() != NS_OK) { - return InitPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, __func__); - } - */ if (ReadMetadata() != NS_OK) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } if (!GetNumberTracks(TrackInfo::kAudioTrack) && !GetNumberTracks(TrackInfo::kVideoTrack)) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } return InitPromise::CreateAndResolve(NS_OK, __func__); } bool OggDemuxer::HasTrackType(TrackInfo::TrackType aType) const { @@ -1478,17 +1473,17 @@ OggTrackDemuxer::Seek(TimeUnit aTime) if (sample != nullptr) { seekTime = TimeUnit::FromMicroseconds(sample->mTime); OGG_DEBUG("%p seeked to time %lld", this, seekTime.ToMicroseconds()); } mQueuedSample = sample; return SeekPromise::CreateAndResolve(seekTime, __func__); } else { - return SeekPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return SeekPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } } RefPtr<MediaRawData> OggTrackDemuxer::NextSample() { if (mQueuedSample) { RefPtr<MediaRawData> nextSample = mQueuedSample; @@ -1511,30 +1506,30 @@ OggTrackDemuxer::NextSample() return data; } RefPtr<OggTrackDemuxer::SamplesPromise> OggTrackDemuxer::GetSamples(int32_t aNumSamples) { RefPtr<SamplesHolder> samples = new SamplesHolder; if (!aNumSamples) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, __func__); } while (aNumSamples) { RefPtr<MediaRawData> sample(NextSample()); if (!sample) { break; } samples->mSamples.AppendElement(sample); aNumSamples--; } if (samples->mSamples.IsEmpty()) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::END_OF_STREAM, __func__); + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } else { return SamplesPromise::CreateAndResolve(samples, __func__); } } void OggTrackDemuxer::Reset() { @@ -1558,17 +1553,17 @@ OggTrackDemuxer::SkipToNextRandomAccessP } } if (found) { OGG_DEBUG("next sample: %f (parsed: %d)", TimeUnit::FromMicroseconds(sample->mTime).ToSeconds(), parsed); return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); } else { - SkipFailureHolder failure(DemuxerFailureReason::END_OF_STREAM, parsed); + SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed); return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__); } } TimeIntervals OggTrackDemuxer::GetBuffered() { return mParent->GetBuffered(mType);
--- a/dom/media/omx/MediaOmxReader.cpp +++ b/dom/media/omx/MediaOmxReader.cpp @@ -170,17 +170,17 @@ MediaOmxReader::Shutdown() p->Then(AbstractThread::MainThread(), __func__, this, &MediaOmxReader::ReleaseDecoder, &MediaOmxReader::ReleaseDecoder); return p; } void MediaOmxReader::ReleaseResources() { mMediaResourceRequest.DisconnectIfExists(); - mMetadataPromise.RejectIfExists(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); ResetDecode(); // Before freeing a video codec, all video buffers needed to be released // even from graphics pipeline. VideoFrameContainer* container = mDecoder->GetVideoFrameContainer(); if (container) { container->ClearCurrentFrame(); } @@ -216,17 +216,17 @@ MediaOmxReader::AsyncReadMetadata() { MOZ_ASSERT(OnTaskQueue()); EnsureActive(); // Initialize the internal OMX Decoder. nsresult rv = InitOmxDecoder(); if (NS_FAILED(rv)) { return MediaDecoderReader::MetadataPromise::CreateAndReject( - ReadMetadataFailureReason::METADATA_ERROR, __func__); + NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3); if (isMP3) { // When read sdcard's file on b2g platform at constructor, // the mDecoder->GetResource()->GetLength() would return -1. // Delay set the total duration on this function. mMP3FrameParser.SetLength(mDecoder->GetResource()->GetLength()); @@ -238,29 +238,29 @@ MediaOmxReader::AsyncReadMetadata() RefPtr<MediaOmxReader> self = this; mMediaResourceRequest.Begin(mOmxDecoder->AllocateMediaResources() ->Then(OwnerThread(), __func__, [self] (bool) -> void { self->mMediaResourceRequest.Complete(); self->HandleResourceAllocated(); }, [self] (bool) -> void { self->mMediaResourceRequest.Complete(); - self->mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + self->mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); })); return p; } void MediaOmxReader::HandleResourceAllocated() { EnsureActive(); // After resources are available, set the metadata. if (!mOmxDecoder->EnsureMetadata()) { - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } bool isMP3 = mDecoder->GetResource()->GetContentType().EqualsASCII(AUDIO_MP3); if (isMP3 && mMP3FrameParser.IsMP3()) { // Check if the MP3 frame parser found a duration. mLastParserDuration = mMP3FrameParser.GetDuration(); } @@ -284,17 +284,17 @@ void MediaOmxReader::HandleResourceAlloc &width, &height); nsIntRect pictureRect(0, 0, width, height); // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. nsIntSize displaySize(displayWidth, displayHeight); nsIntSize frameSize(width, height); if (!IsValidVideoRegion(frameSize, pictureRect, displaySize)) { - mMetadataPromise.Reject(ReadMetadataFailureReason::METADATA_ERROR, __func__); + mMetadataPromise.Reject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); return; } // Video track's frame sizes will not overflow. Activate the video track. mHasVideo = true; mInfo.mVideo.mDisplay = displaySize; mPicture = pictureRect; mInitialFrame = frameSize;
--- a/dom/media/platforms/PDMFactory.cpp +++ b/dom/media/platforms/PDMFactory.cpp @@ -262,17 +262,17 @@ PDMFactory::CreateDecoderWithPDM(Platfor TimeDuration::FromMilliseconds(MediaPrefs::PDMFuzzingInterval())); callbackWrapper->SetDontDelayInputExhausted(!MediaPrefs::PDMFuzzingDelayInputExhausted()); callback = callbackWrapper.get(); } CreateDecoderParams params = aParams; params.mCallback = callback; - if (MP4Decoder::IsH264(config.mMimeType)) { + if (MP4Decoder::IsH264(config.mMimeType) && !aParams.mUseBlankDecoder) { RefPtr<H264Converter> h = new H264Converter(aPDM, params); const nsresult rv = h->GetLastError(); if (NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_INITIALIZED) { // The H264Converter either successfully created the wrapped decoder, // or there wasn't enough AVCC data to do so. Otherwise, there was some // problem, for example WMF DLLs were missing. m = h.forget(); }
--- a/dom/media/platforms/PlatformDecoderModule.h +++ b/dom/media/platforms/PlatformDecoderModule.h @@ -9,16 +9,17 @@ #include "MediaDecoderReader.h" #include "mozilla/MozPromise.h" #include "mozilla/layers/LayersTypes.h" #include "nsTArray.h" #include "mozilla/RefPtr.h" #include "GMPService.h" #include <queue> +#include "MediaResult.h" namespace mozilla { class TrackInfo; class AudioInfo; class VideoInfo; class MediaRawData; class DecoderDoctorDiagnostics; @@ -149,34 +150,29 @@ protected: // On Windows the task queue's threads in have MSCOM initialized with // COINIT_MULTITHREADED. // It is safe to store a reference to aConfig. // This is called on the decode task queue. virtual already_AddRefed<MediaDataDecoder> CreateAudioDecoder(const CreateDecoderParams& aParams) = 0; }; -enum class MediaDataDecoderError : uint8_t{ - FATAL_ERROR, - DECODE_ERROR -}; - // A callback used by MediaDataDecoder to return output/errors to the // MediaFormatReader. // Implementation is threadsafe, and can be called on any thread. class MediaDataDecoderCallback { public: virtual ~MediaDataDecoderCallback() {} // Called by MediaDataDecoder when a sample has been decoded. virtual void Output(MediaData* aData) = 0; // Denotes an error in the decoding process. The reader will stop calling // the decoder. - virtual void Error(MediaDataDecoderError aError) = 0; + virtual void Error(const MediaResult& aError) = 0; // Denotes that the last input sample has been inserted into the decoder, // and no more output can be produced unless more input is sent. // A frame decoding session is completed once InputExhausted has been called. // MediaDataDecoder::Input will not be called again until InputExhausted has // been called. virtual void InputExhausted() = 0; @@ -214,23 +210,18 @@ public: // If an error occurs at any point after the Init promise has been // completed, then Error() must be called on the associated // MediaDataDecoderCallback. class MediaDataDecoder { protected: virtual ~MediaDataDecoder() {}; public: - enum class DecoderFailureReason : uint8_t { - INIT_ERROR, - CANCELED - }; - typedef TrackInfo::TrackType TrackType; - typedef MozPromise<TrackType, DecoderFailureReason, /* IsExclusive = */ true> InitPromise; + typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true> InitPromise; NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDecoder) // Initialize the decoder. The decoder should be ready to decode once // promise resolves. The decoder should do any initialization here, rather // than in its constructor or PlatformDecoderModule::Create*Decoder(), // so that if the MediaFormatReader needs to shutdown during initialization, // it can call Shutdown() to cancel this operation. Any initialization
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp +++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp @@ -6,16 +6,17 @@ #include "ImageContainer.h" #include "MediaDecoderReader.h" #include "MediaInfo.h" #include "mozilla/CheckedInt.h" #include "mozilla/mozalloc.h" // for operator new, and new (fallible) #include "mozilla/RefPtr.h" #include "mozilla/TaskQueue.h" +#include "mp4_demuxer/AnnexB.h" #include "mp4_demuxer/H264.h" #include "MP4Decoder.h" #include "nsAutoPtr.h" #include "nsRect.h" #include "PlatformDecoderModule.h" #include "ReorderQueue.h" #include "TimeUnits.h" #include "VideoUtils.h" @@ -29,17 +30,19 @@ class BlankMediaDataDecoder : public Med public: BlankMediaDataDecoder(BlankMediaDataCreator* aCreator, const CreateDecoderParams& aParams) : mCreator(aCreator) , mCallback(aParams.mCallback) , mMaxRefFrames(aParams.mConfig.GetType() == TrackInfo::kVideoTrack && MP4Decoder::IsH264(aParams.mConfig.mMimeType) - ? mp4_demuxer::H264::ComputeMaxRefFrames(aParams.VideoConfig().mExtraData) + ? mp4_demuxer::AnnexB::HasSPS(aParams.VideoConfig().mExtraData) + ? mp4_demuxer::H264::ComputeMaxRefFrames(aParams.VideoConfig().mExtraData) + : 16 : 0) , mType(aParams.mConfig.GetType()) { } RefPtr<InitPromise> Init() override { return InitPromise::CreateAndResolve(mType, __func__); } @@ -74,17 +77,17 @@ public: { return "blank media data decoder"; } private: void OutputFrame(MediaData* aData) { if (!aData) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } // Frames come out in DTS order but we need to output them in PTS order. mReorderQueue.Push(aData); while (mReorderQueue.Length() > mMaxRefFrames) { mCallback->Output(mReorderQueue.Pop().get());
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp +++ b/dom/media/platforms/agnostic/OpusDecoder.cpp @@ -56,51 +56,51 @@ OpusDataDecoder::AppendCodecDelay(MediaB RefPtr<MediaDataDecoder::InitPromise> OpusDataDecoder::Init() { size_t length = mInfo.mCodecSpecificConfig->Length(); uint8_t *p = mInfo.mCodecSpecificConfig->Elements(); if (length < sizeof(uint64_t)) { OPUS_DEBUG("CodecSpecificConfig too short to read codecDelay!"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } int64_t codecDelay = BigEndian::readUint64(p); length -= sizeof(uint64_t); p += sizeof(uint64_t); if (NS_FAILED(DecodeHeader(p, length))) { OPUS_DEBUG("Error decoding header!"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } int r; mOpusDecoder = opus_multistream_decoder_create(mOpusParser->mRate, mOpusParser->mChannels, mOpusParser->mStreams, mOpusParser->mCoupledStreams, mMappingTable, &r); mSkip = mOpusParser->mPreSkip; mPaddingDiscarded = false; if (codecDelay != FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate).value()) { NS_WARNING("Invalid Opus header: CodecDelay and pre-skip do not match!"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (mInfo.mRate != (uint32_t)mOpusParser->mRate) { NS_WARNING("Invalid Opus header: container and codec rate do not match!"); } if (mInfo.mChannels != (uint32_t)mOpusParser->mChannels) { NS_WARNING("Invalid Opus header: container and codec channels do not match!"); } return r == OPUS_OK ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__) - : InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + : InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } nsresult OpusDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength) { MOZ_ASSERT(!mOpusParser); MOZ_ASSERT(!mOpusDecoder); MOZ_ASSERT(!mDecodedHeader); @@ -146,89 +146,83 @@ OpusDataDecoder::Input(MediaRawData* aSa void OpusDataDecoder::ProcessDecode(MediaRawData* aSample) { if (mIsFlushing) { return; } - DecodeError err = DoDecode(aSample); - switch (err) { - case DecodeError::FATAL_ERROR: - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); - return; - case DecodeError::DECODE_ERROR: - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); - break; - case DecodeError::DECODE_SUCCESS: - mCallback->InputExhausted(); - break; + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); + return; } + mCallback->InputExhausted(); } -OpusDataDecoder::DecodeError +MediaResult OpusDataDecoder::DoDecode(MediaRawData* aSample) { int64_t aDiscardPadding = 0; if (aSample->mExtraData) { aDiscardPadding = BigEndian::readInt64(aSample->mExtraData->Elements()); } uint32_t channels = mOpusParser->mChannels; if (mPaddingDiscarded) { // Discard padding should be used only on the final packet, so // decoding after a padding discard is invalid. OPUS_DEBUG("Opus error, discard padding on interstitial packet"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_FATAL_ERR; } if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) { // We are starting a new block. mFrames = 0; mLastFrameTime = Some(aSample->mTime); } // Maximum value is 63*2880, so there's no chance of overflow. int32_t frames_number = opus_packet_get_nb_frames(aSample->Data(), aSample->Size()); if (frames_number <= 0) { OPUS_DEBUG("Invalid packet header: r=%ld length=%ld", frames_number, aSample->Size()); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } int32_t samples = opus_packet_get_samples_per_frame(aSample->Data(), opus_int32(mOpusParser->mRate)); // A valid Opus packet must be between 2.5 and 120 ms long (48kHz). int32_t frames = frames_number*samples; if (frames < 120 || frames > 5760) { OPUS_DEBUG("Invalid packet frames: %ld", frames); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } AlignedAudioBuffer buffer(frames * channels); if (!buffer) { - return FATAL_ERROR; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } // Decode to the appropriate sample type. #ifdef MOZ_SAMPLE_TYPE_FLOAT32 int ret = opus_multistream_decode_float(mOpusDecoder, aSample->Data(), aSample->Size(), buffer.get(), frames, false); #else int ret = opus_multistream_decode(mOpusDecoder, aSample->Data(), aSample->Size(), buffer.get(), frames, false); #endif if (ret < 0) { - return DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } NS_ASSERTION(ret == frames, "Opus decoded too few audio samples"); CheckedInt64 startTime = aSample->mTime; // Trim the initial frames while the decoder is settling. if (mSkip > 0) { int32_t skipFrames = std::min<int32_t>(mSkip, frames); int32_t keepFrames = frames - skipFrames; @@ -239,31 +233,31 @@ OpusDataDecoder::DoDecode(MediaRawData* startTime = startTime + FramesToUsecs(skipFrames, mOpusParser->mRate); frames = keepFrames; mSkip -= skipFrames; } if (aDiscardPadding < 0) { // Negative discard padding is invalid. OPUS_DEBUG("Opus error, negative discard padding"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_FATAL_ERR; } if (aDiscardPadding > 0) { OPUS_DEBUG("OpusDecoder discardpadding %" PRId64 "", aDiscardPadding); CheckedInt64 discardFrames = TimeUnitToFrames(media::TimeUnit::FromNanoseconds(aDiscardPadding), mOpusParser->mRate); if (!discardFrames.isValid()) { NS_WARNING("Int overflow in DiscardPadding"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } if (discardFrames.value() > frames) { // Discarding more than the entire packet is invalid. OPUS_DEBUG("Opus error, discard padding larger than packet"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_FATAL_ERR; } OPUS_DEBUG("Opus decoder discarding %d of %d frames", int32_t(discardFrames.value()), frames); // Padding discard is only supposed to happen on the final packet. // Record the discard so we can return an error if another packet is // decoded. mPaddingDiscarded = true; int32_t keepFrames = frames - discardFrames.value(); @@ -288,35 +282,35 @@ OpusDataDecoder::DoDecode(MediaRawData* buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val)); } } #endif CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate); if (!duration.isValid()) { NS_WARNING("OpusDataDecoder: Int overflow converting WebM audio duration"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } CheckedInt64 time = startTime - FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) + FramesToUsecs(mFrames, mOpusParser->mRate); if (!time.isValid()) { NS_WARNING("OpusDataDecoder: Int overflow shifting tstamp by codec delay"); - return FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; }; mCallback->Output(new AudioData(aSample->mOffset, time.value(), duration.value(), frames, Move(buffer), mOpusParser->mChannels, mOpusParser->mRate)); mFrames += frames; - return DECODE_SUCCESS; + return NS_OK; } void OpusDataDecoder::ProcessDrain() { mCallback->DrainComplete(); }
--- a/dom/media/platforms/agnostic/OpusDecoder.h +++ b/dom/media/platforms/agnostic/OpusDecoder.h @@ -36,26 +36,20 @@ public: // Pack pre-skip/CodecDelay, given in microseconds, into a // MediaByteBuffer. The decoder expects this value to come // from the container (if any) and to precede the OpusHead // block in the CodecSpecificConfig buffer to verify the // values match. static void AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS); private: - enum DecodeError { - DECODE_SUCCESS, - DECODE_ERROR, - FATAL_ERROR - }; - nsresult DecodeHeader(const unsigned char* aData, size_t aLength); void ProcessDecode(MediaRawData* aSample); - DecodeError DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); void ProcessDrain(); const AudioInfo& mInfo; const RefPtr<TaskQueue> mTaskQueue; MediaDataDecoderCallback* mCallback; // Opus decoder state nsAutoPtr<OpusParser> mOpusParser;
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp +++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp @@ -73,32 +73,32 @@ TheoraDecoder::Init() th_comment_init(&mTheoraComment); th_info_init(&mTheoraInfo); nsTArray<unsigned char*> headers; nsTArray<size_t> headerLens; if (!XiphExtradataToHeaders(headers, headerLens, mInfo.mCodecSpecificConfig->Elements(), mInfo.mCodecSpecificConfig->Length())) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } for (size_t i = 0; i < headers.Length(); i++) { if (NS_FAILED(DoDecodeHeader(headers[i], headerLens[i]))) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } } if (mPacketCount != 3) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } mTheoraDecoderContext = th_decode_alloc(&mTheoraInfo, mTheoraSetupInfo); if (mTheoraDecoderContext) { return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); } else { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } } void TheoraDecoder::Flush() { MOZ_ASSERT(mCallback->OnReaderTaskQueue()); @@ -118,17 +118,17 @@ TheoraDecoder::DoDecodeHeader(const unsi int r = th_decode_headerin(&mTheoraInfo, &mTheoraComment, &mTheoraSetupInfo, &pkt); return r > 0 ? NS_OK : NS_ERROR_FAILURE; } -int +MediaResult TheoraDecoder::DoDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); const unsigned char* aData = aSample->Data(); size_t aLength = aSample->Size(); bool bos = mPacketCount == 0; @@ -176,35 +176,36 @@ TheoraDecoder::DoDecode(MediaRawData* aS aSample->mKeyframe, aSample->mTimecode, mInfo.ScaledImageRect(mTheoraInfo.frame_width, mTheoraInfo.frame_height)); if (!v) { LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld", mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height, mInfo.mImage.width, mInfo.mImage.height); - return -1; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } mCallback->Output(v); - return 0; + return NS_OK; } else { LOG("Theora Decode error: %d", ret); - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } } void TheoraDecoder::ProcessDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); if (mIsFlushing) { return; } - if (DoDecode(aSample) == -1) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); } else { mCallback->InputExhausted(); } } void TheoraDecoder::Input(MediaRawData* aSample) {
--- a/dom/media/platforms/agnostic/TheoraDecoder.h +++ b/dom/media/platforms/agnostic/TheoraDecoder.h @@ -36,17 +36,17 @@ public: { return "theora video decoder"; } private: nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength); void ProcessDecode(MediaRawData* aSample); - int DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); void ProcessDrain(); RefPtr<ImageContainer> mImageContainer; RefPtr<TaskQueue> mTaskQueue; MediaDataDecoderCallback* mCallback; Atomic<bool> mIsFlushing; // Theora header & decoder state
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp +++ b/dom/media/platforms/agnostic/VPXDecoder.cpp @@ -75,34 +75,34 @@ VPXDecoder::Init() } decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors()); vpx_codec_dec_cfg_t config; config.threads = decode_threads; config.w = config.h = 0; // set after decode if (!dx || vpx_codec_dec_init(&mVPX, dx, &config, 0)) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); } void VPXDecoder::Flush() { MOZ_ASSERT(mCallback->OnReaderTaskQueue()); mIsFlushing = true; nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () { // nothing to do for now. }); SyncRunnable::DispatchToThread(mTaskQueue, r); mIsFlushing = false; } -int +MediaResult VPXDecoder::DoDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); #if defined(DEBUG) vpx_codec_stream_info_t si; PodZero(&si); si.sz = sizeof(si); if (mCodec == Codec::VP8) { @@ -111,17 +111,17 @@ VPXDecoder::DoDecode(MediaRawData* aSamp vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si); } NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe, "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync"); #endif if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) { LOG("VPX Decode error: %s", vpx_codec_err_to_string(r)); - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } vpx_codec_iter_t iter = nullptr; vpx_image_t *img; while ((img = vpx_codec_get_frame(&mVPX, &iter))) { NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 || img->fmt == VPX_IMG_FMT_I444, @@ -152,17 +152,17 @@ VPXDecoder::DoDecode(MediaRawData* aSamp } else if (img->fmt == VPX_IMG_FMT_I444) { b.mPlanes[1].mHeight = img->d_h; b.mPlanes[1].mWidth = img->d_w; b.mPlanes[2].mHeight = img->d_h; b.mPlanes[2].mWidth = img->d_w; } else { LOG("VPX Unknown image format"); - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } RefPtr<VideoData> v = VideoData::CreateAndCopyData(mInfo, mImageContainer, aSample->mOffset, aSample->mTime, aSample->mDuration, @@ -171,32 +171,33 @@ VPXDecoder::DoDecode(MediaRawData* aSamp aSample->mTimecode, mInfo.ScaledImageRect(img->d_w, img->d_h)); if (!v) { LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld", img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height, mInfo.mImage.width, mInfo.mImage.height); - return -1; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } mCallback->Output(v); } - return 0; + return NS_OK; } void VPXDecoder::ProcessDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); if (mIsFlushing) { return; } - if (DoDecode(aSample) == -1) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); } else { mCallback->InputExhausted(); } } void VPXDecoder::Input(MediaRawData* aSample) {
--- a/dom/media/platforms/agnostic/VPXDecoder.h +++ b/dom/media/platforms/agnostic/VPXDecoder.h @@ -43,17 +43,17 @@ public: // identify VPX of the specified type. Does not parse general content type // strings, i.e. white space matters. static bool IsVPX(const nsACString& aMimeType, uint8_t aCodecMask=VP8|VP9); static bool IsVP8(const nsACString& aMimeType); static bool IsVP9(const nsACString& aMimeType); private: void ProcessDecode(MediaRawData* aSample); - int DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); void ProcessDrain(); const RefPtr<ImageContainer> mImageContainer; const RefPtr<TaskQueue> mTaskQueue; MediaDataDecoderCallback* mCallback; Atomic<bool> mIsFlushing; // VPx decoder state
--- a/dom/media/platforms/agnostic/VorbisDecoder.cpp +++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp @@ -67,48 +67,48 @@ VorbisDataDecoder::Init() PodZero(&mVorbisDsp); PodZero(&mVorbisBlock); AutoTArray<unsigned char*,4> headers; AutoTArray<size_t,4> headerLens; if (!XiphExtradataToHeaders(headers, headerLens, mInfo.mCodecSpecificConfig->Elements(), mInfo.mCodecSpecificConfig->Length())) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } for (size_t i = 0; i < headers.Length(); i++) { if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } } MOZ_ASSERT(mPacketCount == 3); int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo); if (r) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock); if (r) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) { LOG(LogLevel::Warning, ("Invalid Vorbis header: container and codec rate do not match!")); } if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) { LOG(LogLevel::Warning, ("Invalid Vorbis header: container and codec channels do not match!")); } AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels); if (!layout.IsValid()) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__); } nsresult VorbisDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength) { @@ -132,24 +132,26 @@ VorbisDataDecoder::Input(MediaRawData* a void VorbisDataDecoder::ProcessDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); if (mIsFlushing) { return; } - if (DoDecode(aSample) == -1) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); } else { mCallback->InputExhausted(); } } -int +MediaResult VorbisDataDecoder::DoDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); const unsigned char* aData = aSample->Data(); size_t aLength = aSample->Size(); int64_t aOffset = aSample->mOffset; uint64_t aTstampUsecs = aSample->mTime; @@ -162,67 +164,66 @@ VorbisDataDecoder::DoDecode(MediaRawData mFrames = 0; mLastFrameTime = Some(aSample->mTime); } ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS, aSample->mTimecode, mPacketCount++); if (vorbis_synthesis(&mVorbisBlock, &pkt) != 0) { - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } if (vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock) != 0) { - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } VorbisPCMValue** pcm = 0; int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); if (frames == 0) { - mCallback->InputExhausted(); - return 0; + return NS_OK; } while (frames > 0) { uint32_t channels = mVorbisDsp.vi->channels; uint32_t rate = mVorbisDsp.vi->rate; AlignedAudioBuffer buffer(frames*channels); if (!buffer) { - return -1; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } for (uint32_t j = 0; j < channels; ++j) { VorbisPCMValue* channel = pcm[j]; for (uint32_t i = 0; i < uint32_t(frames); ++i) { buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]); } } CheckedInt64 duration = FramesToUsecs(frames, rate); if (!duration.isValid()) { NS_WARNING("Int overflow converting WebM audio duration"); - return -1; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } CheckedInt64 total_duration = FramesToUsecs(mFrames, rate); if (!total_duration.isValid()) { NS_WARNING("Int overflow converting WebM audio total_duration"); - return -1; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } CheckedInt64 time = total_duration + aTstampUsecs; if (!time.isValid()) { NS_WARNING("Int overflow adding total_duration and aTstampUsecs"); - return -1; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; }; if (!mAudioConverter) { AudioConfig in(AudioConfig::ChannelLayout(channels, VorbisLayout(channels)), rate); AudioConfig out(channels, rate); if (!in.IsValid() || !out.IsValid()) { - return -1; + return NS_ERROR_DOM_MEDIA_FATAL_ERR; } mAudioConverter = MakeUnique<AudioConverter>(in, out); } MOZ_ASSERT(mAudioConverter->CanWorkInPlace()); AudioSampleBuffer data(Move(buffer)); data = mAudioConverter->Process(Move(data)); aTotalFrames += frames; @@ -230,23 +231,23 @@ VorbisDataDecoder::DoDecode(MediaRawData time.value(), duration.value(), frames, data.Forget(), channels, rate)); mFrames += frames; if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) { - return -1; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm); } - return aTotalFrames > 0 ? 1 : 0; + return NS_OK; } void VorbisDataDecoder::ProcessDrain() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); mCallback->DrainComplete(); }
--- a/dom/media/platforms/agnostic/VorbisDecoder.h +++ b/dom/media/platforms/agnostic/VorbisDecoder.h @@ -37,17 +37,17 @@ public: // Return true if mimetype is Vorbis static bool IsVorbis(const nsACString& aMimeType); static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels); private: nsresult DecodeHeader(const unsigned char* aData, size_t aLength); void ProcessDecode(MediaRawData* aSample); - int DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); void ProcessDrain(); const AudioInfo& mInfo; const RefPtr<TaskQueue> mTaskQueue; MediaDataDecoderCallback* mCallback; // Vorbis decoder state vorbis_info mVorbisInfo;
--- a/dom/media/platforms/agnostic/WAVDecoder.cpp +++ b/dom/media/platforms/agnostic/WAVDecoder.cpp @@ -60,36 +60,37 @@ RefPtr<MediaDataDecoder::InitPromise> WaveDataDecoder::Init() { return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__); } void WaveDataDecoder::Input(MediaRawData* aSample) { - if (!DoDecode(aSample)) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); } else { mCallback->InputExhausted(); } } -bool +MediaResult WaveDataDecoder::DoDecode(MediaRawData* aSample) { size_t aLength = aSample->Size(); ByteReader aReader(aSample->Data(), aLength); int64_t aOffset = aSample->mOffset; uint64_t aTstampUsecs = aSample->mTime; int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels; AlignedAudioBuffer buffer(frames * mInfo.mChannels); if (!buffer) { - return false; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } for (int i = 0; i < frames; ++i) { for (unsigned int j = 0; j < mInfo.mChannels; ++j) { if (mInfo.mProfile == 6) { //ALAW Data uint8_t v = aReader.ReadU8(); int16_t decoded = DecodeALawSample(v); buffer[i * mInfo.mChannels + j] = IntegerToAudioSample<AudioDataValue>(decoded); @@ -121,17 +122,17 @@ WaveDataDecoder::DoDecode(MediaRawData* mCallback->Output(new AudioData(aOffset, aTstampUsecs, duration, frames, Move(buffer), mInfo.mChannels, mInfo.mRate)); - return true; + return NS_OK; } void WaveDataDecoder::Drain() { mCallback->DrainComplete(); }
--- a/dom/media/platforms/agnostic/WAVDecoder.h +++ b/dom/media/platforms/agnostic/WAVDecoder.h @@ -26,16 +26,16 @@ public: void Drain() override; void Shutdown() override; const char* GetDescriptionName() const override { return "wave audio decoder"; } private: - bool DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); const AudioInfo& mInfo; MediaDataDecoderCallback* mCallback; }; } // namespace mozilla #endif
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp +++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp @@ -88,17 +88,18 @@ public: if (aDecrypted.mStatus == NoKeyErr) { // Key became unusable after we sent the sample to CDM to decrypt. // Call Input() again, so that the sample is enqueued for decryption // if the key becomes usable again. Input(aDecrypted.mSample); } else if (aDecrypted.mStatus != Ok) { if (mCallback) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } } else { MOZ_ASSERT(!mIsShutdown); // The Adobe GMP AAC decoder gets confused if we pass it non-encrypted // samples with valid crypto data. So clear the crypto data, since the // sample should be decrypted now anyway. If we don't do this and we're // using the Adobe GMP for unencrypted decoding of data that is decrypted // by gmp-clearkey, decoding will fail.
--- a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp +++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp @@ -3,16 +3,17 @@ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "GMPAudioDecoder.h" #include "nsServiceManagerUtils.h" #include "MediaInfo.h" #include "GMPDecoderModule.h" +#include "nsPrintfCString.h" namespace mozilla { #if defined(DEBUG) bool IsOnGMPThread() { nsCOMPtr<mozIGeckoMediaPluginService> mps = do_GetService("@mozilla.org/gecko-media-plugin-service;1"); MOZ_ASSERT(mps); @@ -26,56 +27,59 @@ bool IsOnGMPThread() void AudioCallbackAdapter::Decoded(const nsTArray<int16_t>& aPCM, uint64_t aTimeStamp, uint32_t aChannels, uint32_t aRate) { MOZ_ASSERT(IsOnGMPThread()); if (aRate == 0 || aChannels == 0) { NS_WARNING("Invalid rate or num channels returned on GMP audio samples"); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } size_t numFrames = aPCM.Length() / aChannels; MOZ_ASSERT((aPCM.Length() % aChannels) == 0); AlignedAudioBuffer audioData(aPCM.Length()); if (!audioData) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return; } for (size_t i = 0; i < aPCM.Length(); ++i) { audioData[i] = AudioSampleToFloat(aPCM[i]); } if (mMustRecaptureAudioPosition) { mAudioFrameSum = 0; auto timestamp = UsecsToFrames(aTimeStamp, aRate); if (!timestamp.isValid()) { NS_WARNING("Invalid timestamp"); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, + __func__)); return; } mAudioFrameOffset = timestamp.value(); mMustRecaptureAudioPosition = false; } auto timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, aRate); if (!timestamp.isValid()) { NS_WARNING("Invalid timestamp on audio samples"); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, + __func__)); return; } mAudioFrameSum += numFrames; auto duration = FramesToUsecs(numFrames, aRate); if (!duration.isValid()) { NS_WARNING("Invalid duration on audio samples"); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR, + __func__)); return; } RefPtr<AudioData> audio(new AudioData(mLastStreamOffset, timestamp.value(), duration.value(), numFrames, Move(audioData), @@ -111,24 +115,26 @@ AudioCallbackAdapter::ResetComplete() mMustRecaptureAudioPosition = true; mCallback->FlushComplete(); } void AudioCallbackAdapter::Error(GMPErr aErr) { MOZ_ASSERT(IsOnGMPThread()); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + nsPrintfCString("%s: %d", __func__, aErr))); } void AudioCallbackAdapter::Terminated() { NS_WARNING("AAC GMP decoder terminated."); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } GMPAudioDecoderParams::GMPAudioDecoderParams(const CreateDecoderParams& aParams) : mConfig(aParams.AudioConfig()) , mTaskQueue(aParams.mTaskQueue) , mCallback(nullptr) , mAdapter(nullptr) , mCrashHelper(aParams.mCrashHelper) @@ -185,17 +191,17 @@ GMPAudioDecoder::GetNodeId() } void GMPAudioDecoder::GMPInitDone(GMPAudioDecoderProxy* aGMP) { MOZ_ASSERT(IsOnGMPThread()); if (!aGMP) { - mInitPromise.RejectIfExists(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } if (mInitPromise.IsEmpty()) { // GMP must have been shutdown while we were waiting for Init operation // to complete. aGMP->Close(); return; } @@ -206,17 +212,17 @@ GMPAudioDecoder::GMPInitDone(GMPAudioDec nsresult rv = aGMP->InitDecode(kGMPAudioCodecAAC, mConfig.mChannels, mConfig.mBitDepth, mConfig.mRate, codecSpecific, mAdapter); if (NS_FAILED(rv)) { aGMP->Close(); - mInitPromise.Reject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } mGMP = aGMP; mInitPromise.Resolve(TrackInfo::kAudioTrack, __func__); } RefPtr<MediaDataDecoder::InitPromise> @@ -228,39 +234,41 @@ GMPAudioDecoder::Init() MOZ_ASSERT(mMPS); RefPtr<InitPromise> promise(mInitPromise.Ensure(__func__)); nsTArray<nsCString> tags; InitTags(tags); UniquePtr<GetGMPAudioDecoderCallback> callback(new GMPInitDoneCallback(this)); if (NS_FAILED(mMPS->GetGMPAudioDecoder(mCrashHelper, &tags, GetNodeId(), Move(callback)))) { - mInitPromise.Reject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return promise; } void GMPAudioDecoder::Input(MediaRawData* aSample) { MOZ_ASSERT(IsOnGMPThread()); RefPtr<MediaRawData> sample(aSample); if (!mGMP) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } mAdapter->SetLastStreamOffset(sample->mOffset); gmp::GMPAudioSamplesImpl samples(sample, mConfig.mChannels, mConfig.mRate); nsresult rv = mGMP->Decode(samples); if (NS_FAILED(rv)) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error( + MediaResult(rv, nsPrintfCString("%s: decode error (%d)", + __func__, rv))); } } void GMPAudioDecoder::Flush() { MOZ_ASSERT(IsOnGMPThread()); @@ -278,17 +286,17 @@ GMPAudioDecoder::Drain() if (!mGMP || NS_FAILED(mGMP->Drain())) { mCallback->DrainComplete(); } } void GMPAudioDecoder::Shutdown() { - mInitPromise.RejectIfExists(MediaDataDecoder::DecoderFailureReason::CANCELED, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); if (!mGMP) { return; } // Note this unblocks flush and drain operations waiting for callbacks. mGMP->Close(); mGMP = nullptr; }
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp +++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp @@ -49,17 +49,17 @@ VideoCallbackAdapter::Decoded(GMPVideoi4 decodedFrame->Duration(), b, false, -1, pictureRegion); if (v) { mCallback->Output(v); } else { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); } } void VideoCallbackAdapter::ReceivedDecodedReferenceFrame(const uint64_t aPictureId) { MOZ_ASSERT(IsOnGMPThread()); } @@ -90,25 +90,26 @@ VideoCallbackAdapter::ResetComplete() MOZ_ASSERT(IsOnGMPThread()); mCallback->FlushComplete(); } void VideoCallbackAdapter::Error(GMPErr aErr) { MOZ_ASSERT(IsOnGMPThread()); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + nsPrintfCString("%s: %d", __func__, aErr))); } void VideoCallbackAdapter::Terminated() { // Note that this *may* be called from the proxy thread also. NS_WARNING("GMP decoder terminated."); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); } GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams) : mConfig(aParams.VideoConfig()) , mTaskQueue(aParams.mTaskQueue) , mCallback(nullptr) , mAdapter(nullptr) , mImageContainer(aParams.mImageContainer) @@ -178,24 +179,24 @@ GMPVideoDecoder::GetNodeId() } GMPUniquePtr<GMPVideoEncodedFrame> GMPVideoDecoder::CreateFrame(MediaRawData* aSample) { GMPVideoFrame* ftmp = nullptr; GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp); if (GMP_FAILED(err)) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return nullptr; } GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp)); err = frame->CreateEmptyFrame(aSample->Size()); if (GMP_FAILED(err)) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return nullptr; } memcpy(frame->Buffer(), aSample->Data(), frame->Size()); // Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to // suit the GMP API. if (mConvertNALUnitLengths) { @@ -227,17 +228,17 @@ GMPVideoDecoder::GetConfig() const } void GMPVideoDecoder::GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost) { MOZ_ASSERT(IsOnGMPThread()); if (!aGMP) { - mInitPromise.RejectIfExists(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } MOZ_ASSERT(aHost); if (mInitPromise.IsEmpty()) { // GMP must have been shutdown while we were waiting for Init operation // to complete. aGMP->Close(); @@ -256,29 +257,29 @@ GMPVideoDecoder::GMPInitDone(GMPVideoDec mConfig.mExtraData->Length()); } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) { codec.mCodecType = kGMPVideoCodecVP8; } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) { codec.mCodecType = kGMPVideoCodecVP9; } else { // Unrecognized mime type aGMP->Close(); - mInitPromise.Reject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } codec.mWidth = mConfig.mImage.width; codec.mHeight = mConfig.mImage.height; nsresult rv = aGMP->InitDecode(codec, codecSpecific, mAdapter, PR_GetNumberOfProcessors()); if (NS_FAILED(rv)) { aGMP->Close(); - mInitPromise.Reject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } mGMP = aGMP; mHost = aHost; // GMP implementations have interpreted the meaning of GMP_BufferLength32 // differently. The OpenH264 GMP expects GMP_BufferLength32 to behave as @@ -302,44 +303,45 @@ GMPVideoDecoder::Init() MOZ_ASSERT(mMPS); RefPtr<InitPromise> promise(mInitPromise.Ensure(__func__)); nsTArray<nsCString> tags; InitTags(tags); UniquePtr<GetGMPVideoDecoderCallback> callback(new GMPInitDoneCallback(this)); if (NS_FAILED(mMPS->GetGMPVideoDecoder(mCrashHelper, &tags, GetNodeId(), Move(callback)))) { - mInitPromise.Reject(MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return promise; } void GMPVideoDecoder::Input(MediaRawData* aSample) { MOZ_ASSERT(IsOnGMPThread()); RefPtr<MediaRawData> sample(aSample); if (!mGMP) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); return; } mAdapter->SetLastStreamOffset(sample->mOffset); GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample); if (!frame) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return; } nsTArray<uint8_t> info; // No codec specific per-frame info to pass. nsresult rv = mGMP->Decode(Move(frame), false, info, 0); if (NS_FAILED(rv)) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); } } void GMPVideoDecoder::Flush() { MOZ_ASSERT(IsOnGMPThread()); @@ -357,17 +359,17 @@ GMPVideoDecoder::Drain() if (!mGMP || NS_FAILED(mGMP->Drain())) { mCallback->DrainComplete(); } } void GMPVideoDecoder::Shutdown() { - mInitPromise.RejectIfExists(MediaDataDecoder::DecoderFailureReason::CANCELED, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); // Note that this *may* be called from the proxy thread also. if (!mGMP) { return; } // Note this unblocks flush and drain operations waiting for callbacks. mGMP->Close(); mGMP = nullptr; }
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp +++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp @@ -5,17 +5,17 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "MediaDataDecoderProxy.h" #include "MediaData.h" namespace mozilla { void -MediaDataDecoderCallbackProxy::Error(MediaDataDecoderError aError) +MediaDataDecoderCallbackProxy::Error(const MediaResult& aError) { mProxyCallback->Error(aError); } void MediaDataDecoderCallbackProxy::FlushComplete() { mProxyDecoder->FlushComplete();
--- a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h +++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h @@ -69,17 +69,17 @@ public: , mProxyCallback(aCallback) { } void Output(MediaData* aData) override { mProxyCallback->Output(aData); } - void Error(MediaDataDecoderError aError) override; + void Error(const MediaResult& aError) override; void InputExhausted() override { mProxyCallback->InputExhausted(); } void DrainComplete() override { mProxyCallback->DrainComplete(); }
--- a/dom/media/platforms/android/MediaCodecDataDecoder.cpp +++ b/dom/media/platforms/android/MediaCodecDataDecoder.cpp @@ -70,21 +70,21 @@ public: return "Android MediaCodec video decoder"; } RefPtr<InitPromise> Init() override { mSurfaceTexture = AndroidSurfaceTexture::Create(); if (!mSurfaceTexture) { NS_WARNING("Failed to create SurfaceTexture for video decode\n"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (NS_FAILED(InitDecoder(mSurfaceTexture->JavaSurface()))) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); } void Cleanup() override { } @@ -255,25 +255,26 @@ MediaCodecDataDecoder::Init() TrackInfo::TrackType type = (mType == MediaData::AUDIO_DATA ? TrackInfo::TrackType::kAudioTrack : TrackInfo::TrackType::kVideoTrack); return NS_SUCCEEDED(rv) ? InitPromise::CreateAndResolve(type, __func__) : InitPromise::CreateAndReject( - MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__); + NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } nsresult MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface) { mDecoder = CreateDecoder(mMimeType); if (!mDecoder) { - INVOKE_CALLBACK(Error, MediaDataDecoderError::FATAL_ERROR); + INVOKE_CALLBACK(Error, + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return NS_ERROR_FAILURE; } nsresult rv; NS_ENSURE_SUCCESS(rv = mDecoder->Configure(mFormat, aSurface, nullptr, 0), rv); NS_ENSURE_SUCCESS(rv = mDecoder->Start(), rv); NS_ENSURE_SUCCESS(rv = ResetInputBuffers(), rv); @@ -290,17 +291,17 @@ static const int64_t kDecoderTimeout = 1 #define BREAK_ON_DECODER_ERROR() \ if (NS_FAILED(res)) { \ NS_WARNING("Exiting decoder loop due to exception"); \ if (mState == ModuleState::kDrainDecoder) { \ INVOKE_CALLBACK(DrainComplete); \ SetState(ModuleState::kDecoding); \ } \ - INVOKE_CALLBACK(Error, MediaDataDecoderError::FATAL_ERROR); \ + INVOKE_CALLBACK(Error, MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); \ break; \ } nsresult MediaCodecDataDecoder::GetInputBuffer( JNIEnv* aEnv, int aIndex, jni::Object::LocalRef* aBuffer) { MOZ_ASSERT(aEnv); @@ -530,17 +531,19 @@ MediaCodecDataDecoder::DecoderLoop() } else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) { res = ResetOutputBuffers(); BREAK_ON_DECODER_ERROR(); } else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) { res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat)); BREAK_ON_DECODER_ERROR(); } else if (outputStatus < 0) { NS_WARNING("Unknown error from decoder!"); - INVOKE_CALLBACK(Error, MediaDataDecoderError::DECODE_ERROR); + INVOKE_CALLBACK(Error, + MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, + __func__)); // Don't break here just in case it's recoverable. If it's not, other // stuff will fail later and we'll bail out. } else { // We have a valid buffer index >= 0 here. int32_t flags; nsresult res = bufferInfo->Flags(&flags); BREAK_ON_DECODER_ERROR();
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp +++ b/dom/media/platforms/android/RemoteDataDecoder.cpp @@ -75,18 +75,18 @@ public: HandleOutputFormatChanged(MediaFormat::Ref::From(aFormat)); } } void OnError(bool aIsFatal) { if (mDecoderCallback) { mDecoderCallback->Error(aIsFatal ? - MediaDataDecoderError::FATAL_ERROR : - MediaDataDecoderError::DECODE_ERROR); + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__) : + MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); } } void DisposeNative() { // TODO } @@ -191,29 +191,29 @@ public: { } RefPtr<InitPromise> Init() override { mSurfaceTexture = AndroidSurfaceTexture::Create(); if (!mSurfaceTexture) { NS_WARNING("Failed to create SurfaceTexture for video decode\n"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } // Register native methods. JavaCallbacksSupport::Init(); mJavaCallbacks = CodecProxy::NativeCallbacks::New(); JavaCallbacksSupport::AttachNative(mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this, mCallback)); mJavaDecoder = CodecProxy::Create(mFormat, mSurfaceTexture->JavaSurface(), mJavaCallbacks); if (mJavaDecoder == nullptr) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } mInputDurations.Clear(); return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); } void Flush() override @@ -301,17 +301,17 @@ public: JavaCallbacksSupport::Init(); mJavaCallbacks = CodecProxy::NativeCallbacks::New(); JavaCallbacksSupport::AttachNative(mJavaCallbacks, mozilla::MakeUnique<CallbacksSupport>(this, mCallback)); mJavaDecoder = CodecProxy::Create(mFormat, nullptr, mJavaCallbacks); if (mJavaDecoder == nullptr) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__); } private: class CallbacksSupport final : public JavaCallbacksSupport { @@ -377,17 +377,18 @@ private: } } void HandleOutputFormatChanged(MediaFormat::Param aFormat) override { aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &mOutputChannels); AudioConfig::ChannelLayout layout(mOutputChannels); if (!layout.IsValid()) { - mDecoderCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecoderCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); return; } aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &mOutputSampleRate); LOG("Audio output format changed: channels:%d sample rate:%d", mOutputChannels, mOutputSampleRate); } private: RemoteAudioDecoder* mDecoder; @@ -471,17 +472,17 @@ RemoteDataDecoder::Input(MediaRawData* a env->SetByteArrayRegion(data, 0, length, reinterpret_cast<const jbyte*>(aSample->Data())); jni::ByteArray::LocalRef bytes(env); bytes = jni::Object::LocalRef::Adopt(env, data); BufferInfo::LocalRef bufferInfo; nsresult rv = BufferInfo::New(&bufferInfo); if (NS_FAILED(rv)) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); return; } bufferInfo->Set(0, aSample->Size(), aSample->mTime, 0); mJavaDecoder->Input(bytes, bufferInfo); } } // mozilla
--- a/dom/media/platforms/apple/AppleATDecoder.cpp +++ b/dom/media/platforms/apple/AppleATDecoder.cpp @@ -52,17 +52,17 @@ AppleATDecoder::~AppleATDecoder() MOZ_ASSERT(!mConverter); } RefPtr<MediaDataDecoder::InitPromise> AppleATDecoder::Init() { if (!mFormatID) { NS_ERROR("Non recognised format"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__); } void AppleATDecoder::Input(MediaRawData* aSample) { @@ -189,28 +189,30 @@ AppleATDecoder::SubmitSample(MediaRawDat if (mIsFlushing) { return; } nsresult rv = NS_OK; if (!mConverter) { rv = SetupDecoder(aSample); if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); return; } } mQueuedSamples.AppendElement(aSample); if (rv == NS_OK) { for (size_t i = 0; i < mQueuedSamples.Length(); i++) { - if (NS_FAILED(DecodeSample(mQueuedSamples[i]))) { + rv = DecodeSample(mQueuedSamples[i]); + if (NS_FAILED(rv)) { mQueuedSamples.Clear(); - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(rv, __func__)); return; } } mQueuedSamples.Clear(); } mCallback->InputExhausted(); } @@ -257,17 +259,17 @@ AppleATDecoder::DecodeSample(MediaRawDat _PassthroughInputDataCallback, &userData, &numFrames /* in/out */, &decBuffer, packets.get()); if (rv && rv != kNoMoreDataErr) { LOG("Error decoding audio stream: %d\n", rv); - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } if (numFrames) { outputData.AppendElements(decoded.get(), numFrames * channels); } if (rv == kNoMoreDataErr) { break; @@ -278,34 +280,34 @@ AppleATDecoder::DecodeSample(MediaRawDat return NS_OK; } size_t numFrames = outputData.Length() / channels; int rate = mOutputFormat.mSampleRate; media::TimeUnit duration = FramesToTimeUnit(numFrames, rate); if (!duration.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } #ifdef LOG_SAMPLE_DECODE LOG("pushed audio at time %lfs; duration %lfs\n", (double)aSample->mTime / USECS_PER_S, duration.ToSeconds()); #endif AudioSampleBuffer data(outputData.Elements(), outputData.Length()); if (!data.Data()) { return NS_ERROR_OUT_OF_MEMORY; } if (mChannelLayout && !mAudioConverter) { AudioConfig in(*mChannelLayout.get(), rate); AudioConfig out(channels, rate); if (!in.IsValid() || !out.IsValid()) { - return NS_ERROR_FAILURE; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } mAudioConverter = MakeUnique<AudioConverter>(in, out); } if (mAudioConverter) { MOZ_ASSERT(mAudioConverter->CanWorkInPlace()); data = mAudioConverter->Process(Move(data)); }
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp +++ b/dom/media/platforms/apple/AppleVTDecoder.cpp @@ -66,17 +66,17 @@ RefPtr<MediaDataDecoder::InitPromise> AppleVTDecoder::Init() { nsresult rv = InitializeSession(); if (NS_SUCCEEDED(rv)) { return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__); } - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } void AppleVTDecoder::Input(MediaRawData* aSample) { MOZ_ASSERT(mCallback->OnReaderTaskQueue()); LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes", @@ -305,18 +305,18 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be."); VideoData::YCbCrBuffer buffer; // Lock the returned image data. CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly); if (rv != kCVReturnSuccess) { NS_ERROR("error locking pixel data"); - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); - return NS_ERROR_FAILURE; + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); + return NS_ERROR_OUT_OF_MEMORY; } // Y plane. buffer.mPlanes[0].mData = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0)); buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0); buffer.mPlanes[0].mWidth = width; buffer.mPlanes[0].mHeight = height; buffer.mPlanes[0].mOffset = 0; @@ -371,18 +371,18 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe visible); #else MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS"); #endif } if (!data) { NS_ERROR("Couldn't create VideoData for frame"); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); - return NS_ERROR_FAILURE; + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); + return NS_ERROR_OUT_OF_MEMORY; } // Frames come out in DTS order but we need to output them // in composition order. MonitorAutoLock mon(mMonitor); mReorderQueue.Push(data); if (mReorderQueue.Length() > mMaxRefFrames) { mCallback->Output(mReorderQueue.Pop().get()); @@ -415,17 +415,17 @@ TimingInfoFromSample(MediaRawData* aSamp timestamp.presentationTimeStamp = CMTimeMake(aSample->mTime, USECS_PER_S); timestamp.decodeTimeStamp = CMTimeMake(aSample->mTimecode, USECS_PER_S); return timestamp; } -nsresult +MediaResult AppleVTDecoder::DoDecode(MediaRawData* aSample) { AssertOnTaskQueueThread(); // For some reason this gives me a double-free error with stagefright. AutoCFRelease<CMBlockBufferRef> block = nullptr; AutoCFRelease<CMSampleBufferRef> sample = nullptr; VTDecodeInfoFlags infoFlags; @@ -441,37 +441,39 @@ AppleVTDecoder::DoDecode(MediaRawData* a kCFAllocatorNull, // Block allocator. NULL, // Block source. 0, // Data offset. aSample->Size(), false, block.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMBlockBuffer"); - return NS_ERROR_FAILURE; + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample); rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive()); if (rv != noErr) { NS_ERROR("Couldn't create CMSampleBuffer"); - return NS_ERROR_FAILURE; + mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__)); + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression; rv = VTDecompressionSessionDecodeFrame(mSession, sample, decodeFlags, CreateAppleFrameRef(aSample), &infoFlags); if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) { LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv); NS_WARNING("Couldn't pass frame to decoder"); - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); - return NS_ERROR_FAILURE; + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } return NS_OK; } nsresult AppleVTDecoder::InitializeSession() {
--- a/dom/media/platforms/apple/AppleVTDecoder.h +++ b/dom/media/platforms/apple/AppleVTDecoder.h @@ -91,17 +91,17 @@ private: const uint32_t mDisplayHeight; // Method to set up the decompression session. nsresult InitializeSession(); nsresult WaitForAsynchronousFrames(); CFDictionaryRef CreateDecoderSpecification(); CFDictionaryRef CreateDecoderExtensions(); // Method to pass a frame to VideoToolbox for decoding. - nsresult DoDecode(MediaRawData* aSample); + MediaResult DoDecode(MediaRawData* aSample); const RefPtr<TaskQueue> mTaskQueue; const uint32_t mMaxRefFrames; const RefPtr<layers::ImageContainer> mImageContainer; Atomic<bool> mIsShutDown; const bool mUseSoftwareImages; // Set on reader/decode thread calling Flush() to indicate that output is
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp +++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp @@ -28,17 +28,17 @@ FFmpegAudioDecoder<LIBAV_VER>::FFmpegAud } RefPtr<MediaDataDecoder::InitPromise> FFmpegAudioDecoder<LIBAV_VER>::Init() { nsresult rv = InitDecoder(); return rv == NS_OK ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__) - : InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + : InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } void FFmpegAudioDecoder<LIBAV_VER>::InitCodecContext() { MOZ_ASSERT(mCodecContext); // We do not want to set this value to 0 as FFmpeg by default will // use the number of cores, which with our mozlibavutil get_cpu_count @@ -112,94 +112,91 @@ CopyAndPackAudio(AVFrame* aFrame, uint32 *tmp++ = AudioSampleToFloat(data[channel][frame]); } } } return audio; } -FFmpegAudioDecoder<LIBAV_VER>::DecodeResult +MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample) { AVPacket packet; mLib->av_init_packet(&packet); packet.data = const_cast<uint8_t*>(aSample->Data()); packet.size = aSample->Size(); if (!PrepareFrame()) { NS_WARNING("FFmpeg audio decoder failed to allocate frame."); - return DecodeResult::FATAL_ERROR; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } int64_t samplePosition = aSample->mOffset; media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime); - bool didOutput = false; while (packet.size > 0) { int decoded; int bytesConsumed = mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet); if (bytesConsumed < 0) { NS_WARNING("FFmpeg audio decoder error."); - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } if (mFrame->format != AV_SAMPLE_FMT_FLT && mFrame->format != AV_SAMPLE_FMT_FLTP && mFrame->format != AV_SAMPLE_FMT_S16 && mFrame->format != AV_SAMPLE_FMT_S16P && mFrame->format != AV_SAMPLE_FMT_S32 && mFrame->format != AV_SAMPLE_FMT_S32P) { NS_WARNING("FFmpeg audio decoder outputs unsupported audio format."); - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } if (decoded) { uint32_t numChannels = mCodecContext->channels; AudioConfig::ChannelLayout layout(numChannels); if (!layout.IsValid()) { - return DecodeResult::FATAL_ERROR; + return NS_ERROR_DOM_MEDIA_FATAL_ERR; } uint32_t samplingRate = mCodecContext->sample_rate; AlignedAudioBuffer audio = CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples); media::TimeUnit duration = FramesToTimeUnit(mFrame->nb_samples, samplingRate); if (!audio || !duration.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } RefPtr<AudioData> data = new AudioData(samplePosition, pts.ToMicroseconds(), duration.ToMicroseconds(), mFrame->nb_samples, Move(audio), numChannels, samplingRate); mCallback->Output(data); - didOutput = true; pts += duration; if (!pts.IsValid()) { NS_WARNING("Invalid count of accumulated audio samples"); - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } } packet.data += bytesConsumed; packet.size -= bytesConsumed; samplePosition += bytesConsumed; } - - return didOutput ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME; + return NS_OK; } void FFmpegAudioDecoder<LIBAV_VER>::ProcessDrain() { ProcessFlush(); mCallback->DrainComplete(); }
--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h @@ -30,15 +30,15 @@ public: void InitCodecContext() override; static AVCodecID GetCodecId(const nsACString& aMimeType); const char* GetDescriptionName() const override { return "ffmpeg audio decoder"; } private: - DecodeResult DoDecode(MediaRawData* aSample) override; + MediaResult DoDecode(MediaRawData* aSample) override; void ProcessDrain() override; }; } // namespace mozilla #endif // __FFmpegAACDecoder_h__
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp +++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp @@ -104,29 +104,21 @@ FFmpegDataDecoder<LIBAV_VER>::Shutdown() void FFmpegDataDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample) { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); if (mIsFlushing) { return; } - switch (DoDecode(aSample)) { - case DecodeResult::DECODE_ERROR: - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); - break; - case DecodeResult::FATAL_ERROR: - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); - break; - case DecodeResult::DECODE_NO_FRAME: - case DecodeResult::DECODE_FRAME: - mCallback->InputExhausted(); - break; - default: - break; + MediaResult rv = DoDecode(aSample); + if (NS_FAILED(rv)) { + mCallback->Error(rv); + } else { + mCallback->InputExhausted(); } } void FFmpegDataDecoder<LIBAV_VER>::Input(MediaRawData* aSample) { mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>( this, &FFmpegDataDecoder::ProcessDecode, aSample));
--- a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h @@ -35,23 +35,16 @@ public: void Input(MediaRawData* aSample) override; void Flush() override; void Drain() override; void Shutdown() override; static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec); protected: - enum DecodeResult { - DECODE_FRAME, - DECODE_NO_FRAME, - DECODE_ERROR, - FATAL_ERROR - }; - // Flush and Drain operation, always run virtual void ProcessFlush(); virtual void ProcessShutdown(); virtual void InitCodecContext() {} AVFrame* PrepareFrame(); nsresult InitDecoder(); FFmpegLibWrapper* mLib; @@ -59,17 +52,17 @@ protected: AVCodecContext* mCodecContext; AVFrame* mFrame; RefPtr<MediaByteBuffer> mExtraData; AVCodecID mCodecID; private: void ProcessDecode(MediaRawData* aSample); - virtual DecodeResult DoDecode(MediaRawData* aSample) = 0; + virtual MediaResult DoDecode(MediaRawData* aSample) = 0; virtual void ProcessDrain() = 0; static StaticMutex sMonitor; const RefPtr<TaskQueue> mTaskQueue; // Set/cleared on reader thread calling Flush() to indicate that output is // not required and so input samples on mTaskQueue need not be processed. Atomic<bool> mIsFlushing; };
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp +++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp @@ -116,17 +116,17 @@ FFmpegVideoDecoder<LIBAV_VER>::FFmpegVid mExtraData = new MediaByteBuffer; mExtraData->AppendElements(*aConfig.mExtraData); } RefPtr<MediaDataDecoder::InitPromise> FFmpegVideoDecoder<LIBAV_VER>::Init() { if (NS_FAILED(InitDecoder())) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); } void FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext() { @@ -156,62 +156,68 @@ FFmpegVideoDecoder<LIBAV_VER>::InitCodec mCodecContext->get_format = ChoosePixelFormat; mCodecParser = mLib->av_parser_init(mCodecID); if (mCodecParser) { mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES; } } -FFmpegVideoDecoder<LIBAV_VER>::DecodeResult +MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample) { + bool gotFrame = false; + return DoDecode(aSample, &gotFrame); +} + +MediaResult +FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame) +{ uint8_t* inputData = const_cast<uint8_t*>(aSample->Data()); size_t inputSize = aSample->Size(); #if LIBAVCODEC_VERSION_MAJOR >= 54 if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8 #if LIBAVCODEC_VERSION_MAJOR >= 55 || mCodecID == AV_CODEC_ID_VP9 #endif )) { - bool gotFrame = false; while (inputSize) { uint8_t* data; int size; int len = mLib->av_parser_parse2(mCodecParser, mCodecContext, &data, &size, inputData, inputSize, aSample->mTime, aSample->mTimecode, aSample->mOffset); if (size_t(len) > inputSize) { - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; } inputData += len; inputSize -= len; if (size) { - switch (DoDecode(aSample, data, size)) { - case DecodeResult::DECODE_ERROR: - return DecodeResult::DECODE_ERROR; - case DecodeResult::DECODE_FRAME: - gotFrame = true; - break; - default: - break; + bool gotFrame = false; + MediaResult rv = DoDecode(aSample, data, size, &gotFrame); + if (NS_FAILED(rv)) { + return rv; + } + if (gotFrame && aGotFrame) { + *aGotFrame = true; } } } - return gotFrame ? DecodeResult::DECODE_FRAME : DecodeResult::DECODE_NO_FRAME; + return NS_OK; } #endif - return DoDecode(aSample, inputData, inputSize); + return DoDecode(aSample, inputData, inputSize, aGotFrame); } -FFmpegVideoDecoder<LIBAV_VER>::DecodeResult +MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, - uint8_t* aData, int aSize) + uint8_t* aData, int aSize, + bool* aGotFrame) { AVPacket packet; mLib->av_init_packet(&packet); packet.data = aData; packet.size = aSize; packet.dts = mLastInputDts = aSample->mTimecode; packet.pts = aSample->mTime; @@ -222,17 +228,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode( // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration) // As such we instead use a map using the dts as key that we will retrieve // later. // The map will have a typical size of 16 entry. mDurationMap.Insert(aSample->mTimecode, aSample->mDuration); if (!PrepareFrame()) { NS_WARNING("FFmpeg h264 decoder failed to allocate frame."); - return DecodeResult::FATAL_ERROR; + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } // Required with old version of FFmpeg/LibAV mFrame->reordered_opaque = AV_NOPTS_VALUE; int decoded; int bytesConsumed = mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet); @@ -240,90 +246,97 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode( FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d " "(Input: pts(%lld) dts(%lld) Output: pts(%lld) " "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))", bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts, mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts); if (bytesConsumed < 0) { NS_WARNING("FFmpeg video decoder error."); - return DecodeResult::DECODE_ERROR; + return NS_ERROR_DOM_MEDIA_DECODE_ERR; + } + + if (!decoded) { + if (aGotFrame) { + *aGotFrame = false; + } + return NS_OK; } // If we've decoded a frame then we need to output it - if (decoded) { - int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts); - // Retrieve duration from dts. - // We use the first entry found matching this dts (this is done to - // handle damaged file with multiple frames with the same dts) + int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts); + // Retrieve duration from dts. + // We use the first entry found matching this dts (this is done to + // handle damaged file with multiple frames with the same dts) - int64_t duration; - if (!mDurationMap.Find(mFrame->pkt_dts, duration)) { - NS_WARNING("Unable to retrieve duration from map"); - duration = aSample->mDuration; - // dts are probably incorrectly reported ; so clear the map as we're - // unlikely to find them in the future anyway. This also guards - // against the map becoming extremely big. - mDurationMap.Clear(); - } - FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld", - pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque); + int64_t duration; + if (!mDurationMap.Find(mFrame->pkt_dts, duration)) { + NS_WARNING("Unable to retrieve duration from map"); + duration = aSample->mDuration; + // dts are probably incorrectly reported ; so clear the map as we're + // unlikely to find them in the future anyway. This also guards + // against the map becoming extremely big. + mDurationMap.Clear(); + } + FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld", + pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque); - VideoData::YCbCrBuffer b; - b.mPlanes[0].mData = mFrame->data[0]; - b.mPlanes[1].mData = mFrame->data[1]; - b.mPlanes[2].mData = mFrame->data[2]; + VideoData::YCbCrBuffer b; + b.mPlanes[0].mData = mFrame->data[0]; + b.mPlanes[1].mData = mFrame->data[1]; + b.mPlanes[2].mData = mFrame->data[2]; - b.mPlanes[0].mStride = mFrame->linesize[0]; - b.mPlanes[1].mStride = mFrame->linesize[1]; - b.mPlanes[2].mStride = mFrame->linesize[2]; + b.mPlanes[0].mStride = mFrame->linesize[0]; + b.mPlanes[1].mStride = mFrame->linesize[1]; + b.mPlanes[2].mStride = mFrame->linesize[2]; - b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0; - b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0; - b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0; + b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0; + b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0; + b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0; - b.mPlanes[0].mWidth = mFrame->width; - b.mPlanes[0].mHeight = mFrame->height; - if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) { - b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width; - b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height; - } else { - b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1; - b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1; - } + b.mPlanes[0].mWidth = mFrame->width; + b.mPlanes[0].mHeight = mFrame->height; + if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) { + b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width; + b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height; + } else { + b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1; + b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1; + } - RefPtr<VideoData> v = - VideoData::CreateAndCopyData(mInfo, - mImageContainer, - aSample->mOffset, - pts, - duration, - b, - !!mFrame->key_frame, - -1, - mInfo.ScaledImageRect(mFrame->width, - mFrame->height)); + RefPtr<VideoData> v = + VideoData::CreateAndCopyData(mInfo, + mImageContainer, + aSample->mOffset, + pts, + duration, + b, + !!mFrame->key_frame, + -1, + mInfo.ScaledImageRect(mFrame->width, + mFrame->height)); - if (!v) { - NS_WARNING("image allocation error."); - return DecodeResult::FATAL_ERROR; - } - mCallback->Output(v); - return DecodeResult::DECODE_FRAME; + if (!v) { + NS_WARNING("image allocation error."); + return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__); } - return DecodeResult::DECODE_NO_FRAME; + mCallback->Output(v); + if (aGotFrame) { + *aGotFrame = true; + } + return NS_OK; } void FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain() { RefPtr<MediaRawData> empty(new MediaRawData()); empty->mTimecode = mLastInputDts; - while (DoDecode(empty) == DecodeResult::DECODE_FRAME) { - } + bool gotFrame = false; + while (NS_SUCCEEDED(DoDecode(empty, &gotFrame)) && gotFrame); mCallback->DrainComplete(); } void FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush() { mPtsContext.Reset(); mDurationMap.Clear();
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h +++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h @@ -41,18 +41,19 @@ public: return "ffvpx video decoder"; #else return "ffmpeg video decoder"; #endif } static AVCodecID GetCodecId(const nsACString& aMimeType); private: - DecodeResult DoDecode(MediaRawData* aSample) override; - DecodeResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize); + MediaResult DoDecode(MediaRawData* aSample) override; + MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame); + MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame); void ProcessDrain() override; void ProcessFlush() override; void OutputDelayedFrames(); /** * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image. * Currently it only supports Planar YUV420, which appears to be the only * non-hardware accelerated image format that FFmpeg's H264 decoder is
--- a/dom/media/platforms/gonk/GonkAudioDecoderManager.cpp +++ b/dom/media/platforms/gonk/GonkAudioDecoderManager.cpp @@ -52,17 +52,17 @@ GonkAudioDecoderManager::~GonkAudioDecod } RefPtr<MediaDataDecoder::InitPromise> GonkAudioDecoderManager::Init() { if (InitMediaCodecProxy()) { return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__); } else { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } } bool GonkAudioDecoderManager::InitMediaCodecProxy() { status_t rv = OK; if (!InitLoopers(MediaData::AUDIO_DATA)) {
--- a/dom/media/platforms/gonk/GonkMediaDataDecoder.cpp +++ b/dom/media/platforms/gonk/GonkMediaDataDecoder.cpp @@ -136,17 +136,17 @@ nsresult GonkDecoderManager::Shutdown() { if (mDecoder.get()) { mDecoder->stop(); mDecoder->ReleaseMediaResources(); mDecoder = nullptr; } - mInitPromise.RejectIfExists(DecoderFailureReason::CANCELED, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); return NS_OK; } size_t GonkDecoderManager::NumQueuedSamples() { MutexAutoLock lock(mMutex); @@ -170,31 +170,33 @@ GonkDecoderManager::ProcessInput(bool aE mToDo->setInt32("input-eos", 1); } mDecoder->requestActivityNotification(mToDo); } else if (aEndOfStream) { mToDo->setInt32("input-eos", 1); } } else { GMDD_LOG("input processed: error#%d", rv); - mDecodeCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } } void GonkDecoderManager::ProcessFlush() { MOZ_ASSERT(OnTaskLooper()); mLastTime = INT64_MIN; MonitorAutoLock lock(mFlushMonitor); mWaitOutput.Clear(); if (mDecoder->flush() != OK) { GMDD_LOG("flush error"); - mDecodeCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } mIsFlushing = false; lock.NotifyAll(); } // Use output timestamp to determine which output buffer is already returned // and remove corresponding info, except for EOS, from the waiting list. // This method handles the cases that audio decoder sends multiple output @@ -220,17 +222,18 @@ void GonkDecoderManager::ProcessToDo(bool aEndOfStream) { MOZ_ASSERT(OnTaskLooper()); MOZ_ASSERT(mToDo.get() != nullptr); mToDo.clear(); if (NumQueuedSamples() > 0 && ProcessQueuedSamples() < 0) { - mDecodeCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); return; } while (mWaitOutput.Length() > 0) { RefPtr<MediaData> output; WaitOutputInfo wait = mWaitOutput.ElementAt(0); nsresult rv = Output(wait.mOffset, output); if (rv == NS_OK) { @@ -247,17 +250,18 @@ GonkDecoderManager::ProcessToDo(bool aEn MOZ_ASSERT(mWaitOutput.Length() == 1); mWaitOutput.RemoveElementAt(0); mDecodeCallback->DrainComplete(); ResetEOS(); return; } else if (rv == NS_ERROR_NOT_AVAILABLE) { break; } else { - mDecodeCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); return; } } if (!aEndOfStream && NumQueuedSamples() <= MIN_QUEUED_SAMPLES) { mDecodeCallback->InputExhausted(); // No need to shedule todo task this time because InputExhausted() will // cause Input() to be invoked and do it for us. @@ -275,17 +279,18 @@ GonkDecoderManager::ProcessToDo(bool aEn void GonkDecoderManager::ResetEOS() { // After eos, android::MediaCodec needs to be flushed to receive next input mWaitOutput.Clear(); if (mDecoder->flush() != OK) { GMDD_LOG("flush error"); - mDecodeCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } } void GonkDecoderManager::onMessageReceived(const sp<AMessage> &aMessage) { switch (aMessage->what()) { case kNotifyProcessInput:
--- a/dom/media/platforms/gonk/GonkMediaDataDecoder.h +++ b/dom/media/platforms/gonk/GonkMediaDataDecoder.h @@ -18,17 +18,16 @@ class MediaCodecProxy; namespace mozilla { class MediaRawData; // Manage the data flow from inputting encoded data and outputting decode data. class GonkDecoderManager : public android::AHandler { public: typedef TrackInfo::TrackType TrackType; typedef MediaDataDecoder::InitPromise InitPromise; - typedef MediaDataDecoder::DecoderFailureReason DecoderFailureReason; virtual ~GonkDecoderManager() {} virtual RefPtr<InitPromise> Init() = 0; virtual const char* GetDescriptionName() const = 0; // Asynchronously send sample into mDecoder. If out of input buffer, aSample // will be queued for later re-send.
--- a/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp +++ b/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp @@ -125,35 +125,35 @@ GonkVideoDecoderManager::Init() char propValue[PROPERTY_VALUE_MAX]; property_get("ro.moz.omx.hw.max_width", propValue, "-1"); maxWidth = -1 == atoi(propValue) ? MAX_VIDEO_WIDTH : atoi(propValue); property_get("ro.moz.omx.hw.max_height", propValue, "-1"); maxHeight = -1 == atoi(propValue) ? MAX_VIDEO_HEIGHT : atoi(propValue) ; if (uint32_t(mConfig.mImage.width * mConfig.mImage.height) > maxWidth * maxHeight) { GVDM_LOG("Video resolution exceeds hw codec capability"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } // Validate the container-reported frame and pictureRect sizes. This ensures // that our video frame creation code doesn't overflow. if (!IsValidVideoRegion(mConfig.mImage, mConfig.ImageRect(), mConfig.mDisplay)) { GVDM_LOG("It is not a valid region"); - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue(); MOZ_ASSERT(mReaderTaskQueue); if (mDecodeLooper.get() != nullptr) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } if (!InitLoopers(MediaData::VIDEO_DATA)) { - return InitPromise::CreateAndReject(DecoderFailureReason::INIT_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); } RefPtr<InitPromise> p = mInitPromise.Ensure(__func__); android::sp<GonkVideoDecoderManager> self = this; mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper, mConfig.mMimeType.get(), false); @@ -667,28 +667,28 @@ GonkVideoDecoderManager::codecReserved() rv = mDecoder->Input(mConfig.mCodecSpecificConfig->Elements(), mConfig.mCodecSpecificConfig->Length(), 0, android::MediaCodec::BUFFER_FLAG_CODECCONFIG, CODECCONFIG_TIMEOUT_US); } if (rv != OK) { GVDM_LOG("Failed to configure codec!!!!"); - mInitPromise.Reject(DecoderFailureReason::INIT_ERROR, __func__); + mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); return; } mInitPromise.Resolve(TrackType::kVideoTrack, __func__); } void GonkVideoDecoderManager::codecCanceled() { GVDM_LOG("codecCanceled"); - mInitPromise.RejectIfExists(DecoderFailureReason::CANCELED, __func__); + mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__); } // Called on GonkDecoderManager::mTaskLooper thread. void GonkVideoDecoderManager::onMessageReceived(const sp<AMessage> &aMessage) { switch (aMessage->what()) { case kNotifyPostReleaseBuffer:
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp +++ b/dom/media/platforms/omx/OmxDataDecoder.cpp @@ -164,17 +164,17 @@ OmxDataDecoder::Init() mTrackInfo.get()) ->Then(mOmxTaskQueue, __func__, [self] () { // Omx state should be OMX_StateIdle. self->mOmxState = self->mOmxLayer->GetState(); MOZ_ASSERT(self->mOmxState != OMX_StateIdle); }, [self] () { - self->RejectInitPromise(DecoderFailureReason::INIT_ERROR, __func__); + self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); }); return p; } void OmxDataDecoder::Input(MediaRawData* aSample) { @@ -425,19 +425,19 @@ OmxDataDecoder::EmptyBufferDone(BufferDa void OmxDataDecoder::EmptyBufferFailure(OmxBufferFailureHolder aFailureHolder) { NotifyError(aFailureHolder.mError, __func__); } void -OmxDataDecoder::NotifyError(OMX_ERRORTYPE aOmxError, const char* aLine, MediaDataDecoderError aError) +OmxDataDecoder::NotifyError(OMX_ERRORTYPE aOmxError, const char* aLine, const MediaResult& aError) { - LOG("NotifyError %d (%d) at %s", aOmxError, aError, aLine); + LOG("NotifyError %d (%d) at %s", aOmxError, aError.Code(), aLine); mCallback->Error(aError); } void OmxDataDecoder::FillAndEmptyBuffers() { MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn()); MOZ_ASSERT(mOmxState == OMX_StateExecuting); @@ -546,23 +546,23 @@ OmxDataDecoder::ResolveInitPromise(const NS_NewRunnableFunction([self, aMethodName] () { MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn()); self->mInitPromise.ResolveIfExists(self->mTrackInfo->GetType(), aMethodName); }); mReaderTaskQueue->Dispatch(r.forget()); } void -OmxDataDecoder::RejectInitPromise(DecoderFailureReason aReason, const char* aMethodName) +OmxDataDecoder::RejectInitPromise(MediaResult aError, const char* aMethodName) { RefPtr<OmxDataDecoder> self = this; nsCOMPtr<nsIRunnable> r = - NS_NewRunnableFunction([self, aReason, aMethodName] () { + NS_NewRunnableFunction([self, aError, aMethodName] () { MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn()); - self->mInitPromise.RejectIfExists(aReason, aMethodName); + self->mInitPromise.RejectIfExists(aError, aMethodName); }); mReaderTaskQueue->Dispatch(r.forget()); } void OmxDataDecoder::OmxStateRunner() { MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn()); @@ -578,40 +578,40 @@ OmxDataDecoder::OmxStateRunner() mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateIdle, nullptr) ->Then(mOmxTaskQueue, __func__, [self] () { // Current state should be OMX_StateIdle. self->mOmxState = self->mOmxLayer->GetState(); MOZ_ASSERT(self->mOmxState == OMX_StateIdle); }, [self] () { - self->RejectInitPromise(DecoderFailureReason::INIT_ERROR, __func__); + self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); }); // Allocate input and output buffers. OMX_DIRTYPE types[] = {OMX_DIRTYPE::OMX_DirInput, OMX_DIRTYPE::OMX_DirOutput}; for(const auto id : types) { if (NS_FAILED(AllocateBuffers(id))) { LOG("Failed to allocate buffer on port %d", id); - RejectInitPromise(DecoderFailureReason::INIT_ERROR, __func__); + RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); break; } } } else if (mOmxState == OMX_StateIdle) { RefPtr<OmxDataDecoder> self = this; mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateExecuting, nullptr) ->Then(mOmxTaskQueue, __func__, [self] () { self->mOmxState = self->mOmxLayer->GetState(); MOZ_ASSERT(self->mOmxState == OMX_StateExecuting); self->ResolveInitPromise(__func__); }, [self] () { - self->RejectInitPromise(DecoderFailureReason::INIT_ERROR, __func__); + self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); }); } else if (mOmxState == OMX_StateExecuting) { // Configure codec once it gets OMX_StateExecuting state. FillCodecConfigDataToOmx(); } else { MOZ_ASSERT(0); } } @@ -685,17 +685,18 @@ OmxDataDecoder::Event(OMX_EVENTTYPE aEve } LOG("Got OMX_EventPortSettingsChanged event"); break; } default: { // Got error during decoding, send msg to MFR skipping to next key frame. if (aEvent == OMX_EventError && mOmxState == OMX_StateExecuting) { - NotifyError((OMX_ERRORTYPE)aData1, __func__, MediaDataDecoderError::DECODE_ERROR); + NotifyError((OMX_ERRORTYPE)aData1, __func__, + MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); return true; } LOG("WARNING: got none handle event: %d, aData1: %d, aData2: %d", aEvent, aData1, aData2); return false; } }
--- a/dom/media/platforms/omx/OmxDataDecoder.h +++ b/dom/media/platforms/omx/OmxDataDecoder.h @@ -82,33 +82,33 @@ public: // Return true if event is handled. bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2); protected: void InitializationTask(); void ResolveInitPromise(const char* aMethodName); - void RejectInitPromise(DecoderFailureReason aReason, const char* aMethodName); + void RejectInitPromise(MediaResult aError, const char* aMethodName); void OmxStateRunner(); void FillAndEmptyBuffers(); void FillBufferDone(BufferData* aData); void FillBufferFailure(OmxBufferFailureHolder aFailureHolder); void EmptyBufferDone(BufferData* aData); void EmptyBufferFailure(OmxBufferFailureHolder aFailureHolder); void NotifyError(OMX_ERRORTYPE aOmxError, const char* aLine, - MediaDataDecoderError aError = MediaDataDecoderError::FATAL_ERROR); + const MediaResult& aError = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR)); // Configure audio/video codec. // Some codec may just ignore this and rely on codec specific data in // FillCodecConfigDataToOmx(). void ConfigCodec(); // Sending codec specific data to OMX component. OMX component could send a // OMX_EventPortSettingsChanged back to client. And then client needs to
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp +++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp @@ -118,17 +118,17 @@ WMFMediaDataDecoder::ProcessDecode(Media if (mIsFlushing) { // Skip sample, to be released by runnable. return; } HRESULT hr = mMFTManager->Input(aSample); if (FAILED(hr)) { NS_WARNING("MFTManager rejected sample"); - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); if (!mRecordedError) { SendTelemetry(hr); mRecordedError = true; } return; } mLastStreamOffset = aSample->mOffset; @@ -145,17 +145,17 @@ WMFMediaDataDecoder::ProcessOutput() output) { mHasSuccessfulOutput = true; mCallback->Output(output); } if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { mCallback->InputExhausted(); } else if (FAILED(hr)) { NS_WARNING("WMFMediaDataDecoder failed to output data"); - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__)); if (!mRecordedError) { SendTelemetry(hr); mRecordedError = true; } } } void
--- a/dom/media/platforms/wrappers/FuzzingWrapper.cpp +++ b/dom/media/platforms/wrappers/FuzzingWrapper.cpp @@ -166,29 +166,27 @@ DecoderCallbackFuzzingWrapper::Output(Me } // Passing the data straight through, no need to dispatch to another queue, // callback should deal with that. mCallback->Output(aData); } void -DecoderCallbackFuzzingWrapper::Error(MediaDataDecoderError aError) +DecoderCallbackFuzzingWrapper::Error(const MediaResult& aError) { if (!mTaskQueue->IsCurrentThreadIn()) { - mTaskQueue->Dispatch( - NewRunnableMethod<MediaDataDecoderError>(this, - &DecoderCallbackFuzzingWrapper::Error, - aError)); + mTaskQueue->Dispatch(NewRunnableMethod<MediaResult>( + this, &DecoderCallbackFuzzingWrapper::Error, aError)); return; } CFW_LOGV(""); MOZ_ASSERT(mCallback); ClearDelayedOutput(); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(aError); } void DecoderCallbackFuzzingWrapper::InputExhausted() { if (!mTaskQueue->IsCurrentThreadIn()) { mTaskQueue->Dispatch(NewRunnableMethod(this, &DecoderCallbackFuzzingWrapper::InputExhausted)); return;
--- a/dom/media/platforms/wrappers/FuzzingWrapper.h +++ b/dom/media/platforms/wrappers/FuzzingWrapper.h @@ -55,17 +55,17 @@ public: // in lots of frames being decoded and queued for delayed output! void SetDontDelayInputExhausted(bool aDontDelayInputExhausted); private: virtual ~DecoderCallbackFuzzingWrapper(); // MediaDataDecoderCallback implementation. void Output(MediaData* aData) override; - void Error(MediaDataDecoderError aError) override; + void Error(const MediaResult& aError) override; void InputExhausted() override; void DrainComplete() override; void ReleaseMediaResources() override; bool OnReaderTaskQueue() override; MediaDataDecoderCallback* mCallback; // Settings for minimum frame output interval & InputExhausted,
--- a/dom/media/platforms/wrappers/H264Converter.cpp +++ b/dom/media/platforms/wrappers/H264Converter.cpp @@ -50,17 +50,17 @@ H264Converter::Init() } void H264Converter::Input(MediaRawData* aSample) { if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) { // We need AVCC content to be able to later parse the SPS. // This is a no-op if the data is already AVCC. - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } if (mInitPromiseRequest.Exists()) { if (mNeedKeyframe) { if (!aSample->mKeyframe) { // Frames dropped, we need a new one. mCallback->InputExhausted(); @@ -83,28 +83,28 @@ H264Converter::Input(MediaRawData* aSamp // Ignore for the time being, the MediaRawData will be dropped. mCallback->InputExhausted(); return; } } else { rv = CheckForSPSChange(aSample); } if (NS_FAILED(rv)) { - mCallback->Error(MediaDataDecoderError::DECODE_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } if (mNeedKeyframe && !aSample->mKeyframe) { mCallback->InputExhausted(); return; } if (!mNeedAVCC && !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample)) { - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); return; } mNeedKeyframe = false; aSample->mExtraData = mCurrentConfig.mExtraData; mDecoder->Input(aSample); @@ -244,20 +244,21 @@ H264Converter::OnDecoderInitDone(const T } if (!gotInput) { mCallback->InputExhausted(); } mMediaRawSamples.Clear(); } void -H264Converter::OnDecoderInitFailed(MediaDataDecoder::DecoderFailureReason aReason) +H264Converter::OnDecoderInitFailed(MediaResult aError) { mInitPromiseRequest.Complete(); - mCallback->Error(MediaDataDecoderError::FATAL_ERROR); + mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + __func__)); } nsresult H264Converter::CheckForSPSChange(MediaRawData* aSample) { RefPtr<MediaByteBuffer> extra_data = mp4_demuxer::AnnexB::ExtractExtraData(aSample); if (!mp4_demuxer::AnnexB::HasSPS(extra_data) ||
--- a/dom/media/platforms/wrappers/H264Converter.h +++ b/dom/media/platforms/wrappers/H264Converter.h @@ -46,17 +46,17 @@ private: // Returns NS_ERROR_FAILURE if error is permanent and can't be recovered and // will set mError accordingly. nsresult CreateDecoder(DecoderDoctorDiagnostics* aDiagnostics); nsresult CreateDecoderAndInit(MediaRawData* aSample); nsresult CheckForSPSChange(MediaRawData* aSample); void UpdateConfigFromExtraData(MediaByteBuffer* aExtraData); void OnDecoderInitDone(const TrackType aTrackType); - void OnDecoderInitFailed(MediaDataDecoder::DecoderFailureReason aReason); + void OnDecoderInitFailed(MediaResult aError); RefPtr<PlatformDecoderModule> mPDM; VideoInfo mOriginalConfig; VideoInfo mCurrentConfig; layers::LayersBackend mLayersBackend; RefPtr<layers::ImageContainer> mImageContainer; const RefPtr<TaskQueue> mTaskQueue; nsTArray<RefPtr<MediaRawData>> mMediaRawSamples;
--- a/dom/media/test/test_decode_error.html +++ b/dom/media/test/test_decode_error.html @@ -22,16 +22,18 @@ function startTest(test, token) { var v = document.createElement("video"); manager.started(token); v.addEventListener("error", function (event) { var el = event.currentTarget; is(event.type, "error", "Expected event of type 'error'"); ok(el.error, "Element 'error' attr expected to have a value"); ok(el.error instanceof MediaError, "Element 'error' attr expected to be MediaError"); is(el.error.code, MediaError.MEDIA_ERR_DECODE, "Expected a decode error"); + ok(typeof el.error.message === 'string' || el.error.essage instanceof String, "Element 'message' attr expected to be a string"); + ok(el.error.message.length > 0, "Element 'message' attr has content"); el._sawError = true; manager.finished(token); }, false); v.addEventListener("loadeddata", function () { ok(false, "Unexpected loadeddata event"); manager.finished(token); }, false); @@ -41,16 +43,21 @@ function startTest(test, token) { ok(false, "Unexpected ended event"); manager.finished(token); }, false); v.src = test.name; // implicitly starts a load. } SimpleTest.waitForExplicitFinish(); -SpecialPowers.pushPrefEnv({"set": [["media.cache_size", 40000]]}, beginTest); +SpecialPowers.pushPrefEnv({ + "set": [ + ["media.cache_size", 40000], + ["dom.MediaError.message.enabled", true] + ] +}, beginTest); function beginTest() { manager.runTests(gDecodeErrorTests, startTest); } </script> </pre> </body> </html>
--- a/dom/media/wave/WaveDemuxer.cpp +++ b/dom/media/wave/WaveDemuxer.cpp @@ -38,17 +38,17 @@ WAVDemuxer::InitInternal() return mTrackDemuxer->Init(); } RefPtr<WAVDemuxer::InitPromise> WAVDemuxer::Init() { if (!InitInternal()) { return InitPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); + NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } return InitPromise::CreateAndResolve(NS_OK, __func__); } bool WAVDemuxer::HasTrackType(TrackInfo::TrackType aType) const { return aType == TrackInfo::kAudioTrack; @@ -335,34 +335,31 @@ WAVTrackDemuxer::ScanUntil(const TimeUni } return SeekPosition(); } RefPtr<WAVTrackDemuxer::SamplesPromise> WAVTrackDemuxer::GetSamples(int32_t aNumSamples) { - if (!aNumSamples) { - return SamplesPromise::CreateAndReject( - DemuxerFailureReason::DEMUXER_ERROR, __func__); - } + MOZ_ASSERT(aNumSamples); RefPtr<SamplesHolder> datachunks = new SamplesHolder(); while (aNumSamples--) { RefPtr<MediaRawData> datachunk = GetNextChunk(FindNextChunk()); if (!datachunk) { break; } datachunks->mSamples.AppendElement(datachunk); } if (datachunks->mSamples.IsEmpty()) { return SamplesPromise::CreateAndReject( - DemuxerFailureReason::END_OF_STREAM, __func__); + NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } return SamplesPromise::CreateAndResolve(datachunks, __func__); } void WAVTrackDemuxer::Reset() { @@ -372,17 +369,17 @@ WAVTrackDemuxer::Reset() mRIFFParser.Reset(); mFmtParser.Reset(); } RefPtr<WAVTrackDemuxer::SkipAccessPointPromise> WAVTrackDemuxer::SkipToNextRandomAccessPoint(TimeUnit aTimeThreshold) { return SkipAccessPointPromise::CreateAndReject( - SkipFailureHolder(DemuxerFailureReason::DEMUXER_ERROR, 0), __func__); + SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__); } int64_t WAVTrackDemuxer::GetResourceOffset() const { return mOffset; }
--- a/dom/media/webaudio/MediaBufferDecoder.cpp +++ b/dom/media/webaudio/MediaBufferDecoder.cpp @@ -127,20 +127,20 @@ private: nsCOMPtr<nsIRunnable> event = new ReportResultTask(mDecodeJob, &WebAudioDecodeJob::OnFailure, aErrorCode); NS_DispatchToMainThread(event); } } void Decode(); void OnMetadataRead(MetadataHolder* aMetadata); - void OnMetadataNotRead(ReadMetadataFailureReason aReason); + void OnMetadataNotRead(const MediaResult& aError); void RequestSample(); void SampleDecoded(MediaData* aData); - void SampleNotDecoded(MediaDecoderReader::NotDecodedReason aReason); + void SampleNotDecoded(const MediaResult& aError); void FinishDecode(); void AllocateBuffer(); void CallbackTheResult(); void Cleanup() { MOZ_ASSERT(NS_IsMainThread()); // MediaDecoderReader expects that BufferDecoder is alive. @@ -305,17 +305,17 @@ MediaDecodeTask::OnMetadataRead(Metadata Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec); }); AbstractThread::MainThread()->Dispatch(task.forget()); RequestSample(); } void -MediaDecodeTask::OnMetadataNotRead(ReadMetadataFailureReason aReason) +MediaDecodeTask::OnMetadataNotRead(const MediaResult& aReason) { mDecoderReader->Shutdown(); ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent); } void MediaDecodeTask::RequestSample() { @@ -332,25 +332,24 @@ MediaDecodeTask::SampleDecoded(MediaData if (!mFirstFrameDecoded) { mDecoderReader->ReadUpdatedMetadata(&mMediaInfo); mFirstFrameDecoded = true; } RequestSample(); } void -MediaDecodeTask::SampleNotDecoded(MediaDecoderReader::NotDecodedReason aReason) +MediaDecodeTask::SampleNotDecoded(const MediaResult& aError) { MOZ_ASSERT(!NS_IsMainThread()); - if (aReason == MediaDecoderReader::DECODE_ERROR) { + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { + FinishDecode(); + } else { mDecoderReader->Shutdown(); ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent); - } else { - MOZ_ASSERT(aReason == MediaDecoderReader::END_OF_STREAM); - FinishDecode(); } } void MediaDecodeTask::FinishDecode() { mDecoderReader->Shutdown();
--- a/dom/media/webm/WebMDemuxer.cpp +++ b/dom/media/webm/WebMDemuxer.cpp @@ -183,22 +183,22 @@ WebMDemuxer::~WebMDemuxer() } RefPtr<WebMDemuxer::InitPromise> WebMDemuxer::Init() { InitBufferedState(); if (NS_FAILED(ReadMetadata())) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } if (!GetNumberTracks(TrackInfo::kAudioTrack) && !GetNumberTracks(TrackInfo::kVideoTrack)) { - return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); + return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__); } return InitPromise::CreateAndResolve(NS_OK, __func__); } void WebMDemuxer::InitBufferedState() { @@ -950,35 +950,33 @@ WebMTrackDemuxer::NextSample() } return nullptr; } RefPtr<WebMTrackDemuxer::SamplesPromise> WebMTrackDemuxer::GetSamples(int32_t aNumSamples) { RefPtr<SamplesHolder> samples = new SamplesHolder; - if (!aNumSamples) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__); - } + MOZ_ASSERT(aNumSamples); while (aNumSamples) { RefPtr<MediaRawData> sample(NextSample()); if (!sample) { break; } if (mNeedKeyframe && !sample->mKeyframe) { continue; } mNeedKeyframe = false; samples->mSamples.AppendElement(sample); aNumSamples--; } if (samples->mSamples.IsEmpty()) { - return SamplesPromise::CreateAndReject(DemuxerFailureReason::END_OF_STREAM, __func__); + return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__); } else { UpdateSamples(samples->mSamples); return SamplesPromise::CreateAndResolve(samples, __func__); } } void WebMTrackDemuxer::SetNextKeyFrameTime() @@ -1104,17 +1102,17 @@ WebMTrackDemuxer::SkipToNextRandomAccess } SetNextKeyFrameTime(); if (found) { WEBM_DEBUG("next sample: %f (parsed: %d)", media::TimeUnit::FromMicroseconds(sampleTime).ToSeconds(), parsed); return SkipAccessPointPromise::CreateAndResolve(parsed, __func__); } else { - SkipFailureHolder failure(DemuxerFailureReason::END_OF_STREAM, parsed); + SkipFailureHolder failure(NS_ERROR_DOM_MEDIA_END_OF_STREAM, parsed); return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__); } } media::TimeIntervals WebMTrackDemuxer::GetBuffered() { return mParent->GetBuffered();
--- a/dom/webidl/MediaError.webidl +++ b/dom/webidl/MediaError.webidl @@ -14,9 +14,11 @@ interface MediaError { const unsigned short MEDIA_ERR_ABORTED = 1; const unsigned short MEDIA_ERR_NETWORK = 2; const unsigned short MEDIA_ERR_DECODE = 3; const unsigned short MEDIA_ERR_SRC_NOT_SUPPORTED = 4; [Constant] readonly attribute unsigned short code; + [Pref="dom.MediaError.message.enabled"] + readonly attribute DOMString message; };
--- a/ipc/glue/GeckoChildProcessHost.cpp +++ b/ipc/glue/GeckoChildProcessHost.cpp @@ -18,16 +18,20 @@ #include "SharedMemoryBasic.h" #endif #include "MainThreadUtils.h" #include "mozilla/Sprintf.h" #include "prenv.h" #include "nsXPCOMPrivate.h" +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) +#include "nsAppDirectoryServiceDefs.h" +#endif + #include "nsExceptionHandler.h" #include "nsDirectoryServiceDefs.h" #include "nsIFile.h" #include "nsPrintfCString.h" #include "mozilla/ClearOnShutdown.h" #include "mozilla/ipc/BrowserProcessSubThread.h" @@ -603,16 +607,30 @@ AddAppDirToCommandLine(std::vector<std:: aCmdLine.AppendLooseValue(wpath); #else nsAutoCString path; MOZ_ALWAYS_SUCCEEDS(appDir->GetNativePath(path)); aCmdLine.push_back("-appdir"); aCmdLine.push_back(path.get()); #endif } + +#if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX) + // Full path to the profile dir + nsCOMPtr<nsIFile> profileDir; + rv = directoryService->Get(NS_APP_USER_PROFILE_50_DIR, + NS_GET_IID(nsIFile), + getter_AddRefs(profileDir)); + if (NS_SUCCEEDED(rv)) { + nsAutoCString path; + MOZ_ALWAYS_SUCCEEDS(profileDir->GetNativePath(path)); + aCmdLine.push_back("-profile"); + aCmdLine.push_back(path.get()); + } +#endif } } } #if defined(XP_WIN) && defined(MOZ_SANDBOX) static void MaybeAddNsprLogFileAccess(std::vector<std::wstring>& aAllowedFilesReadWrite) {
--- a/js/src/devtools/automation/winbuildenv.sh +++ b/js/src/devtools/automation/winbuildenv.sh @@ -5,17 +5,17 @@ mk_add_options() { echo "$@" } topsrcdir="$SOURCE" # Tooltool installs in parent of topsrcdir for spidermonkey builds. # Resolve that path since the mozconfigs assume tooltool installs in # topsrcdir. -VSPATH="$(cd ${topsrcdir}/.. && pwd)/vs2015u2" +VSPATH="$(cd ${topsrcdir}/.. && pwd)/vs2015u3" # When running on a developer machine, several variables will already # have the right settings and we will need to keep them since the # Windows mozconfigs overwrite them. echo "export ORIGINAL_INCLUDE=$INCLUDE" echo "export ORIGINAL_LIB=$LIB" echo "export ORIGINAL_LIBPATH=$LIBPATH"
--- a/js/src/old-configure.in +++ b/js/src/old-configure.in @@ -786,16 +786,20 @@ case "$target" in # behavior doesn't seem useful, so we turn it off. CXXFLAGS="$CXXFLAGS -Wno-microsoft-include" # We normally error out on unknown pragmas, but since clang-cl # claims to be MSVC, it would be difficult to add # #if defined(_MSC_VER) && !defined(__clang__) everywhere we # use such pragmas, so just ignore them. CFLAGS="$CFLAGS -Wno-unknown-pragmas" CXXFLAGS="$CXXFLAGS -Wno-unknown-pragmas" + # We get errors about various #pragma intrinsic directives from + # clang-cl, and we don't need to hear about those. + CFLAGS="$CFLAGS -Wno-ignored-pragmas" + CXXFLAGS="$CXXFLAGS -Wno-ignored-pragmas" # clang-cl's Intrin.h marks things like _ReadWriteBarrier # as __attribute((__deprecated__)). This is nice to know, # but since we don't get the equivalent warning from MSVC, # let's just ignore it. CFLAGS="$CFLAGS -Wno-deprecated-declarations" CXXFLAGS="$CXXFLAGS -Wno-deprecated-declarations" # We use a function like: # __declspec(noreturn) __inline void f() {}
--- a/layout/generic/nsSubDocumentFrame.cpp +++ b/layout/generic/nsSubDocumentFrame.cpp @@ -36,20 +36,23 @@ #include "nsIObjectLoadingContent.h" #include "nsLayoutUtils.h" #include "FrameLayerBuilder.h" #include "nsPluginFrame.h" #include "nsContentUtils.h" #include "nsIPermissionManager.h" #include "nsServiceManagerUtils.h" #include "nsIDOMMutationEvent.h" +#include "mozilla/Preferences.h" using namespace mozilla; using mozilla::layout::RenderFrameParent; +static bool sShowPreviousPage = true; + static nsIDocument* GetDocumentFromView(nsView* aView) { NS_PRECONDITION(aView, ""); nsViewManager* vm = aView->GetViewManager(); nsIPresShell* ps = vm ? vm->GetPresShell() : nullptr; return ps ? ps->GetDocument() : nullptr; @@ -102,16 +105,22 @@ void nsSubDocumentFrame::Init(nsIContent* aContent, nsContainerFrame* aParent, nsIFrame* aPrevInFlow) { // determine if we are a <frame> or <iframe> nsCOMPtr<nsIDOMHTMLFrameElement> frameElem = do_QueryInterface(aContent); mIsInline = frameElem ? false : true; + static bool addedShowPreviousPage = false; + if (!addedShowPreviousPage) { + Preferences::AddBoolVarCache(&sShowPreviousPage, "layout.show_previous_page", true); + addedShowPreviousPage = true; + } + nsAtomicContainerFrame::Init(aContent, aParent, aPrevInFlow); // We are going to create an inner view. If we need a view for the // OuterFrame but we wait for the normal view creation path in // nsCSSFrameConstructor, then we will lose because the inner view's // parent will already have been set to some outer view (e.g., the // canvas) when it really needs to have this frame's view as its // parent. So, create this frame's view right away, whether we @@ -222,17 +231,17 @@ nsSubDocumentFrame::GetSubdocumentPresSh // being the old page that will probably have a frame. nsView* nextView = subdocView->GetNextSibling(); nsIFrame* frame = nullptr; if (nextView) { frame = nextView->GetFrame(); } if (frame) { nsIPresShell* ps = frame->PresContext()->PresShell(); - if (!presShell || (ps && !ps->IsPaintingSuppressed())) { + if (!presShell || (ps && !ps->IsPaintingSuppressed() && sShowPreviousPage)) { subdocView = nextView; subdocRootFrame = frame; presShell = ps; } } if (!presShell) { // If we don't have a frame we use this roundabout way to get the pres shell. if (!mFrameLoader)
--- a/media/mtransport/test/ice_unittest.cpp +++ b/media/mtransport/test/ice_unittest.cpp @@ -2875,16 +2875,34 @@ TEST_F(WebRtcIceConnectTest, TestConnect p2_->SetExpectedTypes(NrIceCandidate::Type::ICE_RELAYED, NrIceCandidate::Type::ICE_RELAYED); SetTurnServer(turn_server_, kDefaultStunServerPort, turn_user_, turn_password_); ASSERT_TRUE(Gather()); Connect(); } +TEST_F(WebRtcIceConnectTest, TestConnectSymmetricNatAndNoNat) { + p1_ = MakeUnique<IceTestPeer>("P1", test_utils_, true, false, false); + p1_->UseNat(); + p1_->SetFilteringType(TestNat::PORT_DEPENDENT); + p1_->SetMappingType(TestNat::PORT_DEPENDENT); + + p2_ = MakeUnique<IceTestPeer>("P2", test_utils_, false, false, false); + initted_ = true; + + AddStream(1); + p1_->SetExpectedTypes(NrIceCandidate::Type::ICE_PEER_REFLEXIVE, + NrIceCandidate::Type::ICE_HOST); + p2_->SetExpectedTypes(NrIceCandidate::Type::ICE_HOST, + NrIceCandidate::Type::ICE_PEER_REFLEXIVE); + ASSERT_TRUE(Gather()); + Connect(); +} + TEST_F(WebRtcIceConnectTest, TestGatherNatBlocksUDP) { if (turn_server_.empty()) return; UseNat(); BlockUdp(); AddStream(1); std::vector<NrIceTurnServer> turn_servers;
--- a/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java +++ b/mobile/android/base/java/org/mozilla/gecko/GeckoApp.java @@ -2516,16 +2516,21 @@ public abstract class GeckoApp @Override public void run() { if (tab.doBack()) { return; } if (tab.isExternal()) { moveTaskToBack(true); + Tab nextSelectedTab = Tabs.getInstance().getNextTab(tab); + if (nextSelectedTab != null) { + int nextSelectedTabId = nextSelectedTab.getId(); + GeckoAppShell.notifyObservers("Tab:KeepZombified", Integer.toString(nextSelectedTabId)); + } tabs.closeTab(tab); return; } final int parentId = tab.getParentId(); final Tab parent = tabs.getTab(parentId); if (parent != null) { // The back button should always return to the parent (not a sibling).
--- a/mobile/android/base/java/org/mozilla/gecko/tabqueue/TabReceivedService.java +++ b/mobile/android/base/java/org/mozilla/gecko/tabqueue/TabReceivedService.java @@ -11,16 +11,18 @@ import org.mozilla.gecko.R; import org.mozilla.gecko.db.BrowserContract; import android.app.IntentService; import android.app.PendingIntent; import android.content.Intent; import android.content.SharedPreferences; import android.content.res.Resources; import android.database.Cursor; +import android.media.RingtoneManager; +import android.net.Uri; import android.support.annotation.Nullable; import android.support.annotation.WorkerThread; import android.support.v4.app.NotificationCompat; import android.support.v4.app.NotificationManagerCompat; import android.util.Log; /** * An IntentService that displays a notification for a tab sent to this device. @@ -62,16 +64,23 @@ public class TabReceivedService extends final NotificationCompat.Builder builder = new NotificationCompat.Builder(this); builder.setSmallIcon(R.drawable.flat_icon); builder.setContentTitle(notificationTitle); builder.setWhen(System.currentTimeMillis()); builder.setAutoCancel(true); builder.setContentText(uri); builder.setContentIntent(contentIntent); + // Trigger "heads-up" notification mode on supported Android versions. + builder.setPriority(NotificationCompat.PRIORITY_HIGH); + final Uri notificationSoundUri = RingtoneManager.getDefaultUri(RingtoneManager.TYPE_NOTIFICATION); + if (notificationSoundUri != null) { + builder.setSound(notificationSoundUri); + } + final SharedPreferences prefs = GeckoSharedPrefs.forApp(this); final int notificationId = getNextNotificationId(prefs.getInt(PREF_NOTIFICATION_ID, 0)); final NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this); notificationManager.notify(notificationId, builder.build()); // Save the ID last so if the Service is killed and the Intent is redelivered, // the ID is unlikely to have been updated and we would re-use the the old one. // This would prevent two identical notifications from appearing if the
--- a/mobile/android/components/LoginManagerPrompter.js +++ b/mobile/android/components/LoginManagerPrompter.js @@ -124,16 +124,17 @@ LoginManagerPrompter.prototype = { /* * promptToSavePassword * */ promptToSavePassword : function (aLogin) { this._showSaveLoginNotification(aLogin); Services.telemetry.getHistogramById("PWMGR_PROMPT_REMEMBER_ACTION").add(PROMPT_DISPLAYED); + Services.obs.notifyObservers(aLogin, "passwordmgr-prompt-save", null); }, /* * _showLoginNotification * * Displays a notification doorhanger. * @param aBody * String message to be displayed in the doorhanger @@ -223,16 +224,18 @@ LoginManagerPrompter.prototype = { * Called when we think we detect a password change for an existing * login, when the form being submitted contains multiple password * fields. * */ promptToChangePassword : function (aOldLogin, aNewLogin) { this._showChangeLoginNotification(aOldLogin, aNewLogin.password); Services.telemetry.getHistogramById("PWMGR_PROMPT_UPDATE_ACTION").add(PROMPT_DISPLAYED); + let oldGUID = aOldLogin.QueryInterface(Ci.nsILoginMetaInfo).guid; + Services.obs.notifyObservers(aNewLogin, "passwordmgr-prompt-change", oldGUID); }, /* * _showChangeLoginNotification * * Shows the Change Password notification doorhanger. * */
--- a/mobile/android/components/SessionStore.js +++ b/mobile/android/components/SessionStore.js @@ -81,16 +81,21 @@ SessionStore.prototype = { // The index where the most recently closed tab was in the tabs array // when it was closed. _lastClosedTabIndex: -1, // Whether or not to send notifications for changes to the closed tabs. _notifyClosedTabs: false, + // If we're simultaneously closing both a tab and Firefox, we don't want + // to bother reloading the newly selected tab if it is zombified. + // The Java UI will tell us which tab to watch out for. + _keepAsZombieTabId: -1, + init: function ss_init() { loggingEnabled = Services.prefs.getBoolPref("browser.sessionstore.debug_logging"); // Get file references this._sessionFile = Services.dirsvc.get("ProfD", Ci.nsILocalFile); this._sessionFileBackup = this._sessionFile.clone(); this._sessionFilePrevious = this._sessionFile.clone(); this._sessionFileTemp = this._sessionFile.clone(); @@ -136,16 +141,17 @@ SessionStore.prototype = { observerService.addObserver(this, "domwindowopened", true); observerService.addObserver(this, "domwindowclosed", true); observerService.addObserver(this, "browser:purge-session-history", true); observerService.addObserver(this, "quit-application-requested", true); observerService.addObserver(this, "quit-application-proceeding", true); observerService.addObserver(this, "quit-application", true); observerService.addObserver(this, "Session:Restore", true); observerService.addObserver(this, "Session:NotifyLocationChange", true); + observerService.addObserver(this, "Tab:KeepZombified", true); observerService.addObserver(this, "application-background", true); observerService.addObserver(this, "application-foreground", true); observerService.addObserver(this, "ClosedTabs:StartNotifications", true); observerService.addObserver(this, "ClosedTabs:StopNotifications", true); observerService.addObserver(this, "last-pb-context-exited", true); observerService.addObserver(this, "Session:RestoreRecentTabs", true); observerService.addObserver(this, "Tabs:OpenMultiple", true); break; @@ -279,16 +285,23 @@ SessionStore.prototype = { if (data.shouldNotifyTabsOpenedToJava) { Messaging.sendRequest({ type: "Tabs:TabsOpened" }); } break; } + case "Tab:KeepZombified": { + if (aData >= 0) { + this._keepAsZombieTabId = aData; + log("Tab:KeepZombified " + aData); + } + break; + } case "application-background": // We receive this notification when Android's onPause callback is // executed. After onPause, the application may be terminated at any // point without notice; therefore, we must synchronously write out any // pending save state to ensure that this data does not get lost. log("application-background"); // Tab events dispatched immediately before the application was backgrounded // might actually arrive after this point, therefore save them without delay. @@ -296,16 +309,25 @@ SessionStore.prototype = { this._minSaveDelay = MINIMUM_SAVE_DELAY_BACKGROUND; // A small delay allows successive tab events to be batched together. this.flushPendingState(); break; case "application-foreground": // Reset minimum interval between session store writes back to default. log("application-foreground"); this._interval = Services.prefs.getIntPref("browser.sessionstore.interval"); this._minSaveDelay = MINIMUM_SAVE_DELAY; + + // If we skipped restoring a zombified tab before backgrounding, + // we might have to do it now instead. + let window = Services.wm.getMostRecentWindow("navigator:browser"); + let tab = window.BrowserApp.selectedTab; + + if (tab.browser.__SS_restore) { + this._restoreZombieTab(tab.browser, tab.id); + } break; case "ClosedTabs:StartNotifications": this._notifyClosedTabs = true; log("ClosedTabs:StartNotifications"); this._sendClosedTabsToJava(Services.wm.getMostRecentWindow("navigator:browser")); break; case "ClosedTabs:StopNotifications": this._notifyClosedTabs = false; @@ -383,30 +405,38 @@ SessionStore.prototype = { // before trying to restore this data. log("load for tab " + window.BrowserApp.getTabForBrowser(browser).id); if (browser.__SS_restoreDataOnLoad) { delete browser.__SS_restoreDataOnLoad; this._restoreTextData(browser.__SS_data.formdata, browser); } break; } - case "pageshow": { + case "pageshow": + case "AboutReaderContentReady": { let browser = aEvent.currentTarget; // Skip subframe pageshows. if (browser.contentDocument !== aEvent.originalTarget) { return; } + if (browser.currentURI.spec.startsWith("about:reader") && + !browser.contentDocument.body.classList.contains("loaded")) { + // Don't restore the scroll position of an about:reader page at this point; + // wait for the custom event dispatched from AboutReader.jsm instead. + return; + } + // Restoring the scroll position needs to happen after the zoom level has been // restored, which is done by the MobileViewportManager either on first paint // or on load, whichever comes first. // In the latter case, our load handler runs before the MVM's one, which is the // wrong way around, so we have to use a later event instead. - log("pageshow for tab " + window.BrowserApp.getTabForBrowser(browser).id); + log(aEvent.type + " for tab " + window.BrowserApp.getTabForBrowser(browser).id); if (browser.__SS_restoreDataOnPageshow) { delete browser.__SS_restoreDataOnPageshow; this._restoreScrollPosition(browser.__SS_data.scrolldata, browser); } else { // We're not restoring, capture the initial scroll position on pageshow. this.onTabScroll(window, browser); } break; @@ -509,16 +539,17 @@ SessionStore.prototype = { aBrowser.addEventListener("DOMTitleChanged", this, true); // Use load to restore text data aBrowser.addEventListener("load", this, true); // Gecko might set the initial zoom level after the JS "load" event, // so we have to restore zoom and scroll position after that. aBrowser.addEventListener("pageshow", this, true); + aBrowser.addEventListener("AboutReaderContentReady", this, true); // Use a combination of events to watch for text data changes aBrowser.addEventListener("change", this, true); aBrowser.addEventListener("input", this, true); aBrowser.addEventListener("DOMAutoComplete", this, true); // Record the current scroll position and zoom level. aBrowser.addEventListener("scroll", this, true); @@ -532,16 +563,17 @@ SessionStore.prototype = { this._updateCrashReportURL(aWindow); }, onTabRemove: function ss_onTabRemove(aWindow, aBrowser, aNoNotification) { //