--- a/browser/components/loop/standalone/content/js/webapp.js
+++ b/browser/components/loop/standalone/content/js/webapp.js
@@ -112,18 +112,17 @@ loop.webapp = (function($, _, OT, mozL10
);
}
});
var ConversationBranding = React.createClass({displayName: 'ConversationBranding',
render: function() {
return (
React.DOM.h1({className: "standalone-header-title"},
- React.DOM.strong(null, mozL10n.get("brandShortname")),
- mozL10n.get("clientShortname2")
+ React.DOM.strong(null, mozL10n.get("clientShortname2"))
)
);
}
});
/**
* The Firefox Marketplace exposes a web page that contains a postMesssage
* based API that wraps a small set of functionality from the WebApps API
@@ -452,16 +451,17 @@ loop.webapp = (function($, _, OT, mozL10
}
},
render: function() {
var tosLinkName = mozL10n.get("terms_of_use_link_text");
var privacyNoticeName = mozL10n.get("privacy_notice_link_text");
var tosHTML = mozL10n.get("legal_text_and_links", {
+ "clientShortname": mozL10n.get("clientShortname2"),
"terms_of_use_url": "<a target=_blank href='" +
mozL10n.get("legal_website") + "'>" +
tosLinkName + "</a>",
"privacy_notice_url": "<a target=_blank href='" +
mozL10n.get("privacy_website") + "'>" + privacyNoticeName + "</a>"
});
var tosClasses = React.addons.classSet({
--- a/browser/components/loop/standalone/content/js/webapp.jsx
+++ b/browser/components/loop/standalone/content/js/webapp.jsx
@@ -112,18 +112,17 @@ loop.webapp = (function($, _, OT, mozL10
);
}
});
var ConversationBranding = React.createClass({
render: function() {
return (
<h1 className="standalone-header-title">
- <strong>{mozL10n.get("brandShortname")}</strong>
- {mozL10n.get("clientShortname2")}
+ <strong>{mozL10n.get("clientShortname2")}</strong>
</h1>
);
}
});
/**
* The Firefox Marketplace exposes a web page that contains a postMesssage
* based API that wraps a small set of functionality from the WebApps API
@@ -452,16 +451,17 @@ loop.webapp = (function($, _, OT, mozL10
}
},
render: function() {
var tosLinkName = mozL10n.get("terms_of_use_link_text");
var privacyNoticeName = mozL10n.get("privacy_notice_link_text");
var tosHTML = mozL10n.get("legal_text_and_links", {
+ "clientShortname": mozL10n.get("clientShortname2"),
"terms_of_use_url": "<a target=_blank href='" +
mozL10n.get("legal_website") + "'>" +
tosLinkName + "</a>",
"privacy_notice_url": "<a target=_blank href='" +
mozL10n.get("privacy_website") + "'>" + privacyNoticeName + "</a>"
});
var tosClasses = React.addons.classSet({
--- a/browser/components/loop/standalone/content/l10n/loop.en-US.properties
+++ b/browser/components/loop/standalone/content/l10n/loop.en-US.properties
@@ -33,17 +33,17 @@ call_url_unavailable_notification_headin
call_url_unavailable_notification_message2=Sorry, this URL is not available. It may be expired or entered incorrectly.
promote_firefox_hello_heading=Download {{brandShortname}} to make free audio and video calls!
get_firefox_button=Get {{brandShortname}}
initiate_call_button_label2=Ready to start your conversation?
initiate_audio_video_call_button2=Start
initiate_audio_video_call_tooltip2=Start a video conversation
initiate_audio_call_button2=Voice conversation
initiate_call_cancel_button=Cancel
-legal_text_and_links=By using this product you agree to the {{terms_of_use_url}} and {{privacy_notice_url}}
+legal_text_and_links=By using {{clientShortname}} you agree to the {{terms_of_use_url}} and {{privacy_notice_url}}
terms_of_use_link_text=Terms of use
privacy_notice_link_text=Privacy notice
invite_header_text=Invite someone to join you.
## LOCALIZATION NOTE(brandShortname): This should not be localized and
## should remain "Firefox" for all locales.
brandShortname=Firefox
## LOCALIZATION NOTE(clientShortname2): This should not be localized and
--- a/content/html/content/public/HTMLMediaElement.h
+++ b/content/html/content/public/HTMLMediaElement.h
@@ -280,16 +280,23 @@ public:
/**
* Called when there's been an error fetching the resource. This decides
* whether it's appropriate to fire an error event.
*/
void NotifyLoadError();
void NotifyMediaTrackEnabled(MediaTrack* aTrack);
+ /**
+ * Called by a DOMMediaStream when it has tracks available.
+ * This allows us to setup audio and video outputs after the stream
+ * has already reported that playback started, in case they are added late.
+ */
+ void NotifyMediaStreamTracksAvailable(DOMMediaStream* aStream);
+
virtual bool IsNodeOfType(uint32_t aFlags) const MOZ_OVERRIDE;
/**
* Returns the current load ID. Asynchronous events store the ID that was
* current when they were enqueued, and if it has changed when they come to
* fire, they consider themselves cancelled, and don't fire.
*/
uint32_t GetCurrentLoadID() { return mCurrentLoadID; }
@@ -611,16 +618,17 @@ public:
return FinishDecoderSetup(aDecoder, aStream, nullptr, nullptr);
}
protected:
virtual ~HTMLMediaElement();
class MediaLoadListener;
class StreamListener;
+ class MediaStreamTracksAvailableCallback;
virtual void GetItemValueText(nsAString& text) MOZ_OVERRIDE;
virtual void SetItemValueText(const nsAString& text) MOZ_OVERRIDE;
class WakeLockBoolWrapper {
public:
explicit WakeLockBoolWrapper(bool val = false)
: mValue(val), mCanPlay(true), mOuter(nullptr) {}
@@ -1013,16 +1021,19 @@ protected:
// These events get re-dispatched when the bfcache is exited.
nsTArray<nsString> mPendingEvents;
// Media loading flags. See:
// http://www.whatwg.org/specs/web-apps/current-work/#video)
nsMediaNetworkState mNetworkState;
nsMediaReadyState mReadyState;
+ // Last value passed from codec or stream source to UpdateReadyStateForData.
+ NextFrameStatus mLastNextFrameStatus;
+
enum LoadAlgorithmState {
// No load algorithm instance is waiting for a source to be added to the
// media in order to continue loading.
NOT_WAITING,
// We've run the load algorithm, and we tried all source children of the
// media element, and failed to load any successfully. We're waiting for
// another source element to be added to the media element, and will try
// to load any such element when its added.
--- a/content/html/content/src/HTMLMediaElement.cpp
+++ b/content/html/content/src/HTMLMediaElement.cpp
@@ -66,16 +66,18 @@
#include "nsURIHashKey.h"
#include "nsJSUtils.h"
#include "MediaStreamGraph.h"
#include "nsIScriptError.h"
#include "nsHostObjectProtocolHandler.h"
#include "mozilla/dom/MediaSource.h"
#include "MediaMetadataManager.h"
#include "MediaSourceDecoder.h"
+#include "AudioStreamTrack.h"
+#include "VideoStreamTrack.h"
#include "AudioChannelService.h"
#include "mozilla/dom/power/PowerManagerService.h"
#include "mozilla/dom/WakeLock.h"
#include "mozilla/dom/AudioTrack.h"
#include "mozilla/dom/AudioTrackList.h"
@@ -659,17 +661,20 @@ void HTMLMediaElement::AbortExistingLoad
mLoadedDataFired = false;
mAutoplaying = true;
mIsLoadingFromSourceChildren = false;
mSuspendedAfterFirstFrame = false;
mAllowSuspendAfterFirstFrame = true;
mHaveQueuedSelectResource = false;
mSuspendedForPreloadNone = false;
mDownloadSuspendedByCache = false;
+ mHasAudio = false;
+ mHasVideo = false;
mSourcePointer = nullptr;
+ mLastNextFrameStatus = NEXT_FRAME_UNINITIALIZED;
mTags = nullptr;
if (mNetworkState != nsIDOMHTMLMediaElement::NETWORK_EMPTY) {
NS_ASSERTION(!mDecoder && !mSrcStream, "How did someone setup a new stream/decoder already?");
ChangeNetworkState(nsIDOMHTMLMediaElement::NETWORK_EMPTY);
ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_NOTHING);
mPaused = true;
@@ -893,16 +898,49 @@ void HTMLMediaElement::NotifyMediaTrackE
} else {
SetMutedInternal(mMuted & ~MUTED_BY_AUDIO_TRACK);
}
} else if (VideoTrack* track = aTrack->AsVideoTrack()) {
mDisableVideo = !track->Selected();
}
}
+void HTMLMediaElement::NotifyMediaStreamTracksAvailable(DOMMediaStream* aStream)
+{
+ if (!mSrcStream || mSrcStream != aStream) {
+ return;
+ }
+
+ bool oldHasAudio = mHasAudio;
+ bool oldHasVideo = mHasVideo;
+
+ nsAutoTArray<nsRefPtr<AudioStreamTrack>,1> audioTracks;
+ aStream->GetAudioTracks(audioTracks);
+ nsAutoTArray<nsRefPtr<VideoStreamTrack>,1> videoTracks;
+ aStream->GetVideoTracks(videoTracks);
+
+ mHasAudio = !audioTracks.IsEmpty();
+ mHasVideo = !videoTracks.IsEmpty();
+
+ if (!oldHasAudio && mHasAudio) {
+ GetSrcMediaStream()->AddAudioOutput(this);
+ GetSrcMediaStream()->SetAudioOutputVolume(this, float(mMuted ? 0.0 : mVolume));
+ }
+ if (!oldHasVideo && mHasVideo ) {
+ VideoFrameContainer* container = GetVideoFrameContainer();
+ if (container) {
+ GetSrcMediaStream()->AddVideoOutput(container);
+ }
+ // mHasVideo changed so make sure the screen wakelock is updated
+ NotifyOwnerDocumentActivityChanged();
+ }
+
+ CheckAutoplayDataReady();
+}
+
void HTMLMediaElement::LoadFromSourceChildren()
{
NS_ASSERTION(mDelayingLoadEvent,
"Should delay load event (if in document) during load");
NS_ASSERTION(mIsLoadingFromSourceChildren,
"Must remember we're loading from source children");
nsIDocument* parentDoc = OwnerDoc()->GetParentDocument();
@@ -1982,16 +2020,17 @@ HTMLMediaElement::LookupMediaElementURIT
}
HTMLMediaElement::HTMLMediaElement(already_AddRefed<mozilla::dom::NodeInfo>& aNodeInfo)
: nsGenericHTMLElement(aNodeInfo),
mSrcStreamListener(nullptr),
mCurrentLoadID(0),
mNetworkState(nsIDOMHTMLMediaElement::NETWORK_EMPTY),
mReadyState(nsIDOMHTMLMediaElement::HAVE_NOTHING),
+ mLastNextFrameStatus(NEXT_FRAME_UNINITIALIZED),
mLoadWaitStatus(NOT_WAITING),
mVolume(1.0),
mPreloadAction(PRELOAD_UNDEFINED),
mMediaSize(-1,-1),
mLastCurrentTime(0.0),
mFragmentStart(-1.0),
mFragmentEnd(-1.0),
mDefaultPlaybackRate(1.0),
@@ -2476,25 +2515,33 @@ nsresult HTMLMediaElement::BindToTree(ns
if (aDocument) {
mAutoplayEnabled =
IsAutoplayEnabled() && (!aDocument || !aDocument->IsStaticDocument()) &&
!IsEditable();
// The preload action depends on the value of the autoplay attribute.
// It's value may have changed, so update it.
UpdatePreloadAction();
}
+ if (mDecoder) {
+ mDecoder->SetDormantIfNecessary(false);
+ }
return rv;
}
void HTMLMediaElement::UnbindFromTree(bool aDeep,
bool aNullParent)
{
if (!mPaused && mNetworkState != nsIDOMHTMLMediaElement::NETWORK_EMPTY)
Pause();
+
+ if (mDecoder) {
+ mDecoder->SetDormantIfNecessary(true);
+ }
+
nsGenericHTMLElement::UnbindFromTree(aDeep, aNullParent);
}
/* static */
CanPlayStatus
HTMLMediaElement::GetCanPlay(const nsAString& aType)
{
nsContentTypeParser parser(aType);
@@ -2802,16 +2849,38 @@ private:
bool mHaveCurrentData;
bool mBlocked;
// mMutex protects the fields below; they can be accessed on any thread
Mutex mMutex;
bool mPendingNotifyOutput;
};
+class HTMLMediaElement::MediaStreamTracksAvailableCallback:
+ public DOMMediaStream::OnTracksAvailableCallback
+{
+public:
+ explicit MediaStreamTracksAvailableCallback(HTMLMediaElement* aElement,
+ DOMMediaStream::TrackTypeHints aExpectedTracks = 0):
+ DOMMediaStream::OnTracksAvailableCallback(aExpectedTracks),
+ mElement(aElement)
+ {}
+ virtual void NotifyTracksAvailable(DOMMediaStream* aStream)
+ {
+ NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
+
+ if (!mElement) {
+ return;
+ }
+ mElement->NotifyMediaStreamTracksAvailable(aStream);
+ }
+private:
+ HTMLMediaElement* mElement;
+};
+
void HTMLMediaElement::SetupSrcMediaStreamPlayback(DOMMediaStream* aStream)
{
NS_ASSERTION(!mSrcStream && !mSrcStreamListener, "Should have been ended already");
mSrcStream = aStream;
nsRefPtr<MediaStream> stream = mSrcStream->GetStream();
if (stream) {
@@ -2823,53 +2892,55 @@ void HTMLMediaElement::SetupSrcMediaStre
mSrcStreamListener = new StreamListener(this);
GetSrcMediaStream()->AddListener(mSrcStreamListener);
if (mPaused) {
GetSrcMediaStream()->ChangeExplicitBlockerCount(1);
}
if (mPausedForInactiveDocumentOrChannel) {
GetSrcMediaStream()->ChangeExplicitBlockerCount(1);
}
+
+ mSrcStream->OnTracksAvailable(new MediaStreamTracksAvailableCallback(this, DOMMediaStream::HINT_CONTENTS_AUDIO));
+ mSrcStream->OnTracksAvailable(new MediaStreamTracksAvailableCallback(this, DOMMediaStream::HINT_CONTENTS_VIDEO));
+
+ MediaInfo mediaInfo;
+ mediaInfo.mAudio.mHasAudio = mHasAudio;
+ mediaInfo.mVideo.mHasVideo = mHasVideo;
+ MetadataLoaded(&mediaInfo, nullptr);
+
+ DispatchAsyncEvent(NS_LITERAL_STRING("suspend"));
+ mNetworkState = nsIDOMHTMLMediaElement::NETWORK_IDLE;
+
ChangeDelayLoadStatus(false);
- GetSrcMediaStream()->AddAudioOutput(this);
- GetSrcMediaStream()->SetAudioOutputVolume(this, float(mMuted ? 0.0 : mVolume));
- VideoFrameContainer* container = GetVideoFrameContainer();
- if (container) {
- GetSrcMediaStream()->AddVideoOutput(container);
- }
// Note: we must call DisconnectTrackListListeners(...) before dropping
// mSrcStream
mSrcStream->ConstructMediaTracks(AudioTracks(), VideoTracks());
- ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_METADATA);
- DispatchAsyncEvent(NS_LITERAL_STRING("durationchange"));
- DispatchAsyncEvent(NS_LITERAL_STRING("loadedmetadata"));
- ChangeNetworkState(nsIDOMHTMLMediaElement::NETWORK_IDLE);
AddRemoveSelfReference();
// FirstFrameLoaded() will be called when the stream has current data.
}
void HTMLMediaElement::EndSrcMediaStreamPlayback()
{
MediaStream* stream = GetSrcMediaStream();
if (stream) {
stream->RemoveListener(mSrcStreamListener);
}
mSrcStream->DisconnectTrackListListeners(AudioTracks(), VideoTracks());
// Kill its reference to this element
mSrcStreamListener->Forget();
mSrcStreamListener = nullptr;
- if (stream) {
+ if (stream && mHasAudio) {
stream->RemoveAudioOutput(this);
}
VideoFrameContainer* container = GetVideoFrameContainer();
if (container) {
- if (stream) {
+ if (stream && mHasVideo) {
stream->RemoveVideoOutput(container);
}
container->ClearCurrentFrame();
}
if (mPaused && stream) {
stream->ChangeExplicitBlockerCount(-1);
}
if (mPausedForInactiveDocumentOrChannel && stream) {
@@ -2911,23 +2982,29 @@ void HTMLMediaElement::MetadataLoaded(co
// delete the VideoFrameContainer. This happens when the src is changed to an
// audio only file.
if (!aInfo->HasVideo() && mVideoFrameContainer) {
// call ForgetElement() such that callbacks from |mVideoFrameContainer|
// won't reach us anymore.
mVideoFrameContainer->ForgetElement();
mVideoFrameContainer = nullptr;
}
+
+ if (IsVideo()) {
+ // Update the screen wakelock in case mHasVideo changed
+ NotifyOwnerDocumentActivityChanged();
+ }
}
void HTMLMediaElement::FirstFrameLoaded()
{
NS_ASSERTION(!mSuspendedAfterFirstFrame, "Should not have already suspended");
ChangeDelayLoadStatus(false);
+ UpdateReadyStateForData(NEXT_FRAME_UNAVAILABLE);
if (mDecoder && mAllowSuspendAfterFirstFrame && mPaused &&
!HasAttr(kNameSpaceID_None, nsGkAtoms::autoplay) &&
mPreloadAction == HTMLMediaElement::PRELOAD_METADATA) {
mSuspendedAfterFirstFrame = true;
mDecoder->Suspend();
}
}
@@ -3080,20 +3157,28 @@ void HTMLMediaElement::DownloadStalled()
bool HTMLMediaElement::ShouldCheckAllowOrigin()
{
return mCORSMode != CORS_NONE;
}
void HTMLMediaElement::UpdateReadyStateForData(MediaDecoderOwner::NextFrameStatus aNextFrame)
{
+ mLastNextFrameStatus = aNextFrame;
+
if (mReadyState < nsIDOMHTMLMediaElement::HAVE_METADATA) {
// aNextFrame might have a next frame because the decoder can advance
// on its own thread before MetadataLoaded gets a chance to run.
// The arrival of more data can't change us out of this readyState.
+
+ return;
+ }
+
+ if (!mHasAudio && !mHasVideo) {
+ // No tracks available yet, don't advance from HAVE_METADATA
return;
}
if (aNextFrame == MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_SEEKING) {
ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_METADATA);
return;
}
@@ -3106,16 +3191,24 @@ void HTMLMediaElement::UpdateReadyStateF
// this transition if the decoder is in ended state; the readyState
// should remain at HAVE_CURRENT_DATA in this case.
// Note that this state transition includes the case where we finished
// downloaded the whole data stream.
ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_ENOUGH_DATA);
return;
}
+ if (mReadyState < nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA && mHasVideo) {
+ VideoFrameContainer* container = GetVideoFrameContainer();
+ if (container && mMediaSize == nsIntSize(-1,-1)) {
+ // No frame has been set yet. Don't advance.
+ return;
+ }
+ }
+
if (aNextFrame != MediaDecoderOwner::NEXT_FRAME_AVAILABLE) {
ChangeReadyState(nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA);
if (!mWaitingFired && aNextFrame == MediaDecoderOwner::NEXT_FRAME_UNAVAILABLE_BUFFERING) {
FireTimeUpdate(false);
DispatchAsyncEvent(NS_LITERAL_STRING("waiting"));
mWaitingFired = true;
}
return;
@@ -3246,24 +3339,25 @@ void HTMLMediaElement::ChangeNetworkStat
} else if (mNetworkState == nsIDOMHTMLMediaElement::NETWORK_IDLE && !mError) {
// Fire 'suspend' event when entering NETWORK_IDLE and no error presented.
DispatchAsyncEvent(NS_LITERAL_STRING("suspend"));
}
}
bool HTMLMediaElement::CanActivateAutoplay()
{
- // For stream inputs, we activate autoplay on HAVE_CURRENT_DATA because
+ // For stream inputs, we activate autoplay on HAVE_METADATA because
// this element itself might be blocking the stream from making progress by
// being paused.
return !mPausedForInactiveDocumentOrChannel &&
mAutoplaying &&
mPaused &&
((mDecoder && mReadyState >= nsIDOMHTMLMediaElement::HAVE_ENOUGH_DATA) ||
- (mSrcStream && mReadyState >= nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA)) &&
+ (mSrcStream && mReadyState >= nsIDOMHTMLMediaElement::HAVE_METADATA)) &&
+ (mHasAudio || mHasVideo) &&
HasAttr(kNameSpaceID_None, nsGkAtoms::autoplay) &&
mAutoplayEnabled &&
!IsEditable();
}
void HTMLMediaElement::CheckAutoplayDataReady()
{
if (CanActivateAutoplay()) {
@@ -3282,34 +3376,24 @@ void HTMLMediaElement::CheckAutoplayData
GetSrcMediaStream()->ChangeExplicitBlockerCount(-1);
}
DispatchAsyncEvent(NS_LITERAL_STRING("play"));
}
}
VideoFrameContainer* HTMLMediaElement::GetVideoFrameContainer()
{
- // If we have loaded the metadata, and the size of the video is still
- // (-1, -1), the media has no video. Don't go a create a video frame
- // container.
- if (mReadyState >= nsIDOMHTMLMediaElement::HAVE_METADATA &&
- mMediaSize == nsIntSize(-1, -1)) {
- return nullptr;
- }
+ if (mVideoFrameContainer)
+ return mVideoFrameContainer;
// Only video frames need an image container.
if (!IsVideo()) {
return nullptr;
}
- mHasVideo = true;
-
- if (mVideoFrameContainer)
- return mVideoFrameContainer;
-
mVideoFrameContainer =
new VideoFrameContainer(this, LayerManager::CreateAsynchronousImageContainer());
return mVideoFrameContainer;
}
nsresult HTMLMediaElement::DispatchEvent(const nsAString& aName)
{
@@ -3412,16 +3496,17 @@ void HTMLMediaElement::NotifyDecoderPrin
mMediaKeys->Shutdown();
}
#endif
}
void HTMLMediaElement::UpdateMediaSize(nsIntSize size)
{
mMediaSize = size;
+ UpdateReadyStateForData(mLastNextFrameStatus);
}
void HTMLMediaElement::SuspendOrResumeElement(bool aPauseElement, bool aSuspendEvents)
{
if (aPauseElement != mPausedForInactiveDocumentOrChannel) {
mPausedForInactiveDocumentOrChannel = aPauseElement;
if (aPauseElement) {
#ifdef MOZ_EME
--- a/content/media/DOMMediaStream.cpp
+++ b/content/media/DOMMediaStream.cpp
@@ -10,16 +10,17 @@
#include "mozilla/dom/AudioNode.h"
#include "mozilla/dom/AudioTrack.h"
#include "mozilla/dom/AudioTrackList.h"
#include "mozilla/dom/VideoTrack.h"
#include "mozilla/dom/VideoTrackList.h"
#include "MediaStreamGraph.h"
#include "AudioStreamTrack.h"
#include "VideoStreamTrack.h"
+#include "MediaEngine.h"
using namespace mozilla;
using namespace mozilla::dom;
NS_IMPL_CYCLE_COLLECTION_INHERITED(DOMMediaStream,
DOMEventTargetHelper,
mWindow,
mTracks,
@@ -68,18 +69,22 @@ public:
DOMMediaStream* stream = mListener->GetStream();
if (!stream) {
return NS_OK;
}
nsRefPtr<MediaStreamTrack> track;
if (mEvents & MediaStreamListener::TRACK_EVENT_CREATED) {
- track = stream->CreateDOMTrack(mID, mType);
- stream->NotifyMediaStreamTrackCreated(track);
+ track = stream->BindDOMTrack(mID, mType);
+ if (!track) {
+ stream->CreateDOMTrack(mID, mType);
+ track = stream->BindDOMTrack(mID, mType);
+ stream->NotifyMediaStreamTrackCreated(track);
+ }
} else {
track = stream->GetDOMTrackFor(mID);
}
if (mEvents & MediaStreamListener::TRACK_EVENT_ENDED) {
if (track) {
track->NotifyEnded();
stream->NotifyMediaStreamTrackEnded(track);
} else {
@@ -297,16 +302,28 @@ DOMMediaStream::AddPrincipalChangeObserv
}
bool
DOMMediaStream::RemovePrincipalChangeObserver(PrincipalChangeObserver* aObserver)
{
return mPrincipalChangeObservers.RemoveElement(aObserver);
}
+void
+DOMMediaStream::SetHintContents(TrackTypeHints aHintContents)
+{
+ mHintContents = aHintContents;
+ if (aHintContents & HINT_CONTENTS_AUDIO) {
+ CreateDOMTrack(kAudioTrack, MediaSegment::AUDIO);
+ }
+ if (aHintContents & HINT_CONTENTS_VIDEO) {
+ CreateDOMTrack(kVideoTrack, MediaSegment::VIDEO);
+ }
+}
+
MediaStreamTrack*
DOMMediaStream::CreateDOMTrack(TrackID aTrackID, MediaSegment::Type aType)
{
MediaStreamTrack* track;
switch (aType) {
case MediaSegment::AUDIO:
track = new AudioStreamTrack(this, aTrackID);
mTrackTypesAvailable |= HINT_CONTENTS_AUDIO;
@@ -315,18 +332,52 @@ DOMMediaStream::CreateDOMTrack(TrackID a
track = new VideoStreamTrack(this, aTrackID);
mTrackTypesAvailable |= HINT_CONTENTS_VIDEO;
break;
default:
MOZ_CRASH("Unhandled track type");
}
mTracks.AppendElement(track);
- CheckTracksAvailable();
+ return track;
+}
+MediaStreamTrack*
+DOMMediaStream::BindDOMTrack(TrackID aTrackID, MediaSegment::Type aType)
+{
+ MediaStreamTrack* track = nullptr;
+ switch (aType) {
+ case MediaSegment::AUDIO: {
+ for (size_t i = 0; i < mTracks.Length(); ++i) {
+ track = mTracks[i]->AsAudioStreamTrack();
+ if (track) {
+ track->BindTrackID(aTrackID);
+ MOZ_ASSERT(mTrackTypesAvailable & HINT_CONTENTS_AUDIO);
+ break;
+ }
+ }
+ break;
+ }
+ case MediaSegment::VIDEO: {
+ for (size_t i = 0; i < mTracks.Length(); ++i) {
+ track = mTracks[i]->AsVideoStreamTrack();
+ if (track) {
+ track->BindTrackID(aTrackID);
+ MOZ_ASSERT(mTrackTypesAvailable & HINT_CONTENTS_VIDEO);
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Unhandled track type");
+ }
+ if (track) {
+ CheckTracksAvailable();
+ }
return track;
}
MediaStreamTrack*
DOMMediaStream::GetDOMTrackFor(TrackID aTrackID)
{
for (uint32_t i = 0; i < mTracks.Length(); ++i) {
MediaStreamTrack* t = mTracks[i];
--- a/content/media/DOMMediaStream.h
+++ b/content/media/DOMMediaStream.h
@@ -170,17 +170,17 @@ public:
// Indicate what track types we eventually expect to add to this stream
enum {
HINT_CONTENTS_AUDIO = 1 << 0,
HINT_CONTENTS_VIDEO = 1 << 1,
HINT_CONTENTS_UNKNOWN = 1 << 2
};
TrackTypeHints GetHintContents() const { return mHintContents; }
- void SetHintContents(TrackTypeHints aHintContents) { mHintContents = aHintContents; }
+ void SetHintContents(TrackTypeHints aHintContents);
TrackTypeHints GetTrackTypesAvailable() const { return mTrackTypesAvailable; }
/**
* Create an nsDOMMediaStream whose underlying stream is a SourceMediaStream.
*/
static already_AddRefed<DOMMediaStream>
CreateSourceStream(nsIDOMWindow* aWindow, TrackTypeHints aHintContents);
@@ -192,17 +192,18 @@ public:
CreateTrackUnionStream(nsIDOMWindow* aWindow, TrackTypeHints aHintContents = 0);
void SetLogicalStreamStartTime(StreamTime aTime)
{
mLogicalStreamStartTime = aTime;
}
// Notifications from StreamListener.
- // CreateDOMTrack should only be called when it's safe to run script.
+ // BindDOMTrack should only be called when it's safe to run script.
+ MediaStreamTrack* BindDOMTrack(TrackID aTrackID, MediaSegment::Type aType);
MediaStreamTrack* CreateDOMTrack(TrackID aTrackID, MediaSegment::Type aType);
MediaStreamTrack* GetDOMTrackFor(TrackID aTrackID);
class OnTracksAvailableCallback {
public:
explicit OnTracksAvailableCallback(uint8_t aExpectedTracks = 0)
: mExpectedTracks(aExpectedTracks) {}
virtual ~OnTracksAvailableCallback() {}
--- a/content/media/MediaStreamTrack.h
+++ b/content/media/MediaStreamTrack.h
@@ -34,16 +34,17 @@ public:
NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaStreamTrack,
DOMEventTargetHelper)
DOMMediaStream* GetParentObject() const { return mStream; }
virtual JSObject* WrapObject(JSContext* aCx) MOZ_OVERRIDE = 0;
DOMMediaStream* GetStream() const { return mStream; }
TrackID GetTrackID() const { return mTrackID; }
+ void BindTrackID(TrackID aTrackID) { mTrackID = aTrackID; }
virtual AudioStreamTrack* AsAudioStreamTrack() { return nullptr; }
virtual VideoStreamTrack* AsVideoStreamTrack() { return nullptr; }
// WebIDL
virtual void GetKind(nsAString& aKind) = 0;
void GetId(nsAString& aID);
void GetLabel(nsAString& aLabel) { aLabel.Truncate(); }
bool Enabled() { return mEnabled; }
copy from content/media/TrackUnionStream.h
copy to content/media/TrackUnionStream.cpp
--- a/content/media/TrackUnionStream.h
+++ b/content/media/TrackUnionStream.cpp
@@ -1,50 +1,77 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#ifndef MOZILLA_TRACKUNIONSTREAM_H_
-#define MOZILLA_TRACKUNIONSTREAM_H_
+#include "MediaStreamGraphImpl.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/unused.h"
-#include "MediaStreamGraph.h"
+#include "AudioSegment.h"
+#include "VideoSegment.h"
+#include "nsContentUtils.h"
+#include "nsIAppShell.h"
+#include "nsIObserver.h"
+#include "nsPrintfCString.h"
+#include "nsServiceManagerUtils.h"
+#include "nsWidgetsCID.h"
+#include "prerror.h"
+#include "prlog.h"
+#include "mozilla/Attributes.h"
+#include "TrackUnionStream.h"
+#include "ImageContainer.h"
+#include "AudioChannelService.h"
+#include "AudioNodeEngine.h"
+#include "AudioNodeStream.h"
+#include "AudioNodeExternalInputStream.h"
#include <algorithm>
+#include "DOMMediaStream.h"
+#include "GeckoProfiler.h"
+#include "mozilla/unused.h"
+#ifdef MOZ_WEBRTC
+#include "AudioOutputObserver.h"
+#endif
+
+using namespace mozilla::layers;
+using namespace mozilla::dom;
+using namespace mozilla::gfx;
namespace mozilla {
#ifdef PR_LOGGING
-#define STREAM_LOG(type, msg) PR_LOG(gMediaStreamGraphLog, type, msg)
+PRLogModuleInfo* gTrackUnionStreamLog;
+#define STREAM_LOG(type, msg) PR_LOG(gTrackUnionStreamLog, type, msg)
#else
#define STREAM_LOG(type, msg)
#endif
-/**
- * See MediaStreamGraph::CreateTrackUnionStream.
- * This file is only included by MediaStreamGraph.cpp so it's OK to put the
- * entire implementation in this header file.
- */
-class TrackUnionStream : public ProcessedMediaStream {
-public:
- explicit TrackUnionStream(DOMMediaStream* aWrapper) :
- ProcessedMediaStream(aWrapper),
- mFilterCallback(nullptr)
- {}
+TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
+ ProcessedMediaStream(aWrapper),
+ mFilterCallback(nullptr)
+{
+#ifdef PR_LOGGING
+ if (!gTrackUnionStreamLog) {
+ gTrackUnionStreamLog = PR_NewLogModule("TrackUnionStream");
+ }
+#endif
+}
- virtual void RemoveInput(MediaInputPort* aPort) MOZ_OVERRIDE
+ void TrackUnionStream::RemoveInput(MediaInputPort* aPort)
{
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
if (mTrackMap[i].mInputPort == aPort) {
EndTrack(i);
mTrackMap.RemoveElementAt(i);
}
}
ProcessedMediaStream::RemoveInput(aPort);
}
- virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE
+ void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
{
if (IsFinishedOnGraphThread()) {
return;
}
nsAutoTArray<bool,8> mappedTracksFinished;
nsAutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
mappedTracksFinished.AppendElement(true);
@@ -112,60 +139,35 @@ public:
if (allHaveCurrentData) {
// We can make progress if we're not blocked
mHasCurrentData = true;
}
}
// Consumers may specify a filtering callback to apply to every input track.
// Returns true to allow the track to act as an input; false to reject it entirely.
- typedef bool (*TrackIDFilterCallback)(StreamBuffer::Track*);
- void SetTrackIDFilter(TrackIDFilterCallback aCallback) {
+
+ void TrackUnionStream::SetTrackIDFilter(TrackIDFilterCallback aCallback)
+ {
mFilterCallback = aCallback;
}
// Forward SetTrackEnabled(output_track_id, enabled) to the Source MediaStream,
// translating the output track ID into the correct ID in the source.
- virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) MOZ_OVERRIDE {
+ void TrackUnionStream::ForwardTrackEnabled(TrackID aOutputID, bool aEnabled)
+ {
for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
if (mTrackMap[i].mOutputTrackID == aOutputID) {
mTrackMap[i].mInputPort->GetSource()->
SetTrackEnabled(mTrackMap[i].mInputTrackID, aEnabled);
}
}
}
-protected:
- TrackIDFilterCallback mFilterCallback;
-
- // Only non-ended tracks are allowed to persist in this map.
- struct TrackMapEntry {
- // mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
- // 0 if we haven't consumed any yet.
- TrackTicks mEndOfConsumedInputTicks;
- // mEndOfLastInputIntervalInInputStream is the timestamp for the end of the
- // previous interval which was unblocked for both the input and output
- // stream, in the input stream's timeline, or -1 if there wasn't one.
- StreamTime mEndOfLastInputIntervalInInputStream;
- // mEndOfLastInputIntervalInOutputStream is the timestamp for the end of the
- // previous interval which was unblocked for both the input and output
- // stream, in the output stream's timeline, or -1 if there wasn't one.
- StreamTime mEndOfLastInputIntervalInOutputStream;
- MediaInputPort* mInputPort;
- // We keep track IDs instead of track pointers because
- // tracks can be removed without us being notified (e.g.
- // when a finished track is forgotten.) When we need a Track*,
- // we call StreamBuffer::FindTrack, which will return null if
- // the track has been deleted.
- TrackID mInputTrackID;
- TrackID mOutputTrackID;
- nsAutoPtr<MediaSegment> mSegment;
- };
-
- uint32_t AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack,
+ uint32_t TrackUnionStream::AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack,
GraphTime aFrom)
{
// Use the ID of the source track if it's not already assigned to a track,
// otherwise allocate a new unique ID.
TrackID id = aTrack->GetID();
TrackID maxTrackID = 0;
for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
TrackID outID = mTrackMap[i].mOutputTrackID;
@@ -208,34 +210,36 @@ protected:
map->mEndOfLastInputIntervalInInputStream = -1;
map->mEndOfLastInputIntervalInOutputStream = -1;
map->mInputPort = aPort;
map->mInputTrackID = aTrack->GetID();
map->mOutputTrackID = track->GetID();
map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
return mTrackMap.Length() - 1;
}
- void EndTrack(uint32_t aIndex)
+
+ void TrackUnionStream::EndTrack(uint32_t aIndex)
{
StreamBuffer::Track* outputTrack = mBuffer.FindTrack(mTrackMap[aIndex].mOutputTrackID);
if (!outputTrack || outputTrack->IsEnded())
return;
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
TrackTicks offset = outputTrack->GetSegment()->GetDuration();
nsAutoPtr<MediaSegment> segment;
segment = outputTrack->GetSegment()->CreateEmptyClone();
l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
outputTrack->GetRate(), offset,
MediaStreamListener::TRACK_EVENT_ENDED,
*segment);
}
outputTrack->SetEnded();
}
- void CopyTrackData(StreamBuffer::Track* aInputTrack,
+
+ void TrackUnionStream::CopyTrackData(StreamBuffer::Track* aInputTrack,
uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
bool* aOutputTrackFinished)
{
TrackMapEntry* map = &mTrackMap[aMapIndex];
StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");
TrackRate rate = outputTrack->GetRate();
@@ -360,15 +364,9 @@ protected:
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
outputTrack->GetRate(), startTicks, 0,
*segment);
}
outputTrack->GetSegment()->AppendFrom(segment);
}
}
-
- nsTArray<TrackMapEntry> mTrackMap;
-};
-
}
-
-#endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
--- a/content/media/TrackUnionStream.h
+++ b/content/media/TrackUnionStream.h
@@ -6,137 +6,35 @@
#ifndef MOZILLA_TRACKUNIONSTREAM_H_
#define MOZILLA_TRACKUNIONSTREAM_H_
#include "MediaStreamGraph.h"
#include <algorithm>
namespace mozilla {
-#ifdef PR_LOGGING
-#define STREAM_LOG(type, msg) PR_LOG(gMediaStreamGraphLog, type, msg)
-#else
-#define STREAM_LOG(type, msg)
-#endif
-
/**
* See MediaStreamGraph::CreateTrackUnionStream.
- * This file is only included by MediaStreamGraph.cpp so it's OK to put the
- * entire implementation in this header file.
*/
class TrackUnionStream : public ProcessedMediaStream {
public:
- explicit TrackUnionStream(DOMMediaStream* aWrapper) :
- ProcessedMediaStream(aWrapper),
- mFilterCallback(nullptr)
- {}
+ explicit TrackUnionStream(DOMMediaStream* aWrapper);
- virtual void RemoveInput(MediaInputPort* aPort) MOZ_OVERRIDE
- {
- for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
- if (mTrackMap[i].mInputPort == aPort) {
- EndTrack(i);
- mTrackMap.RemoveElementAt(i);
- }
- }
- ProcessedMediaStream::RemoveInput(aPort);
- }
- virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE
- {
- if (IsFinishedOnGraphThread()) {
- return;
- }
- nsAutoTArray<bool,8> mappedTracksFinished;
- nsAutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
- for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
- mappedTracksFinished.AppendElement(true);
- mappedTracksWithMatchingInputTracks.AppendElement(false);
- }
- bool allFinished = true;
- bool allHaveCurrentData = true;
- for (uint32_t i = 0; i < mInputs.Length(); ++i) {
- MediaStream* stream = mInputs[i]->GetSource();
- if (!stream->IsFinishedOnGraphThread()) {
- // XXX we really should check whether 'stream' has finished within time aTo,
- // not just that it's finishing when all its queued data eventually runs
- // out.
- allFinished = false;
- }
- if (!stream->HasCurrentData()) {
- allHaveCurrentData = false;
- }
- for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer());
- !tracks.IsEnded(); tracks.Next()) {
- bool found = false;
- for (uint32_t j = 0; j < mTrackMap.Length(); ++j) {
- TrackMapEntry* map = &mTrackMap[j];
- if (map->mInputPort == mInputs[i] && map->mInputTrackID == tracks->GetID()) {
- bool trackFinished;
- StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
- if (!outputTrack || outputTrack->IsEnded()) {
- trackFinished = true;
- } else {
- CopyTrackData(tracks.get(), j, aFrom, aTo, &trackFinished);
- }
- mappedTracksFinished[j] = trackFinished;
- mappedTracksWithMatchingInputTracks[j] = true;
- found = true;
- break;
- }
- }
- if (!found && (!mFilterCallback || mFilterCallback(tracks.get()))) {
- bool trackFinished = false;
- uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom);
- CopyTrackData(tracks.get(), mapIndex, aFrom, aTo, &trackFinished);
- mappedTracksFinished.AppendElement(trackFinished);
- mappedTracksWithMatchingInputTracks.AppendElement(true);
- }
- }
- }
- for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
- if (mappedTracksFinished[i]) {
- EndTrack(i);
- } else {
- allFinished = false;
- }
- if (!mappedTracksWithMatchingInputTracks[i]) {
- mTrackMap.RemoveElementAt(i);
- }
- }
- if (allFinished && mAutofinish && (aFlags & ALLOW_FINISH)) {
- // All streams have finished and won't add any more tracks, and
- // all our tracks have actually finished and been removed from our map,
- // so we're finished now.
- FinishOnGraphThread();
- } else {
- mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime(aTo));
- }
- if (allHaveCurrentData) {
- // We can make progress if we're not blocked
- mHasCurrentData = true;
- }
- }
+ virtual void RemoveInput(MediaInputPort* aPort) MOZ_OVERRIDE;
+ virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE;
// Consumers may specify a filtering callback to apply to every input track.
// Returns true to allow the track to act as an input; false to reject it entirely.
typedef bool (*TrackIDFilterCallback)(StreamBuffer::Track*);
- void SetTrackIDFilter(TrackIDFilterCallback aCallback) {
- mFilterCallback = aCallback;
- }
+
+ void SetTrackIDFilter(TrackIDFilterCallback aCallback);
// Forward SetTrackEnabled(output_track_id, enabled) to the Source MediaStream,
// translating the output track ID into the correct ID in the source.
- virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) MOZ_OVERRIDE {
- for (int32_t i = mTrackMap.Length() - 1; i >= 0; --i) {
- if (mTrackMap[i].mOutputTrackID == aOutputID) {
- mTrackMap[i].mInputPort->GetSource()->
- SetTrackEnabled(mTrackMap[i].mInputTrackID, aEnabled);
- }
- }
- }
+ virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) MOZ_OVERRIDE;
protected:
TrackIDFilterCallback mFilterCallback;
// Only non-ended tracks are allowed to persist in this map.
struct TrackMapEntry {
// mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
// 0 if we haven't consumed any yet.
@@ -156,219 +54,20 @@ protected:
// we call StreamBuffer::FindTrack, which will return null if
// the track has been deleted.
TrackID mInputTrackID;
TrackID mOutputTrackID;
nsAutoPtr<MediaSegment> mSegment;
};
uint32_t AddTrack(MediaInputPort* aPort, StreamBuffer::Track* aTrack,
- GraphTime aFrom)
- {
- // Use the ID of the source track if it's not already assigned to a track,
- // otherwise allocate a new unique ID.
- TrackID id = aTrack->GetID();
- TrackID maxTrackID = 0;
- for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
- TrackID outID = mTrackMap[i].mOutputTrackID;
- maxTrackID = std::max(maxTrackID, outID);
- }
- // Note: we might have removed it here, but it might still be in the
- // StreamBuffer if the TrackUnionStream sees its input stream flip from
- // A to B, where both A and B have a track with the same ID
- while (1) {
- // search until we find one not in use here, and not in mBuffer
- if (!mBuffer.FindTrack(id)) {
- break;
- }
- id = ++maxTrackID;
- }
-
- TrackRate rate = aTrack->GetRate();
- // Round up the track start time so the track, if anything, starts a
- // little later than the true time. This means we'll have enough
- // samples in our input stream to go just beyond the destination time.
- TrackTicks outputStart = TimeToTicksRoundUp(rate, GraphTimeToStreamTime(aFrom));
-
- nsAutoPtr<MediaSegment> segment;
- segment = aTrack->GetSegment()->CreateEmptyClone();
- for (uint32_t j = 0; j < mListeners.Length(); ++j) {
- MediaStreamListener* l = mListeners[j];
- l->NotifyQueuedTrackChanges(Graph(), id, rate, outputStart,
- MediaStreamListener::TRACK_EVENT_CREATED,
- *segment);
- }
- segment->AppendNullData(outputStart);
- StreamBuffer::Track* track =
- &mBuffer.AddTrack(id, rate, outputStart, segment.forget());
- STREAM_LOG(PR_LOG_DEBUG, ("TrackUnionStream %p adding track %d for input stream %p track %d, start ticks %lld",
- this, id, aPort->GetSource(), aTrack->GetID(),
- (long long)outputStart));
-
- TrackMapEntry* map = mTrackMap.AppendElement();
- map->mEndOfConsumedInputTicks = 0;
- map->mEndOfLastInputIntervalInInputStream = -1;
- map->mEndOfLastInputIntervalInOutputStream = -1;
- map->mInputPort = aPort;
- map->mInputTrackID = aTrack->GetID();
- map->mOutputTrackID = track->GetID();
- map->mSegment = aTrack->GetSegment()->CreateEmptyClone();
- return mTrackMap.Length() - 1;
- }
- void EndTrack(uint32_t aIndex)
- {
- StreamBuffer::Track* outputTrack = mBuffer.FindTrack(mTrackMap[aIndex].mOutputTrackID);
- if (!outputTrack || outputTrack->IsEnded())
- return;
- for (uint32_t j = 0; j < mListeners.Length(); ++j) {
- MediaStreamListener* l = mListeners[j];
- TrackTicks offset = outputTrack->GetSegment()->GetDuration();
- nsAutoPtr<MediaSegment> segment;
- segment = outputTrack->GetSegment()->CreateEmptyClone();
- l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
- outputTrack->GetRate(), offset,
- MediaStreamListener::TRACK_EVENT_ENDED,
- *segment);
- }
- outputTrack->SetEnded();
- }
+ GraphTime aFrom);
+ void EndTrack(uint32_t aIndex);
void CopyTrackData(StreamBuffer::Track* aInputTrack,
uint32_t aMapIndex, GraphTime aFrom, GraphTime aTo,
- bool* aOutputTrackFinished)
- {
- TrackMapEntry* map = &mTrackMap[aMapIndex];
- StreamBuffer::Track* outputTrack = mBuffer.FindTrack(map->mOutputTrackID);
- MOZ_ASSERT(outputTrack && !outputTrack->IsEnded(), "Can't copy to ended track");
-
- TrackRate rate = outputTrack->GetRate();
- MediaSegment* segment = map->mSegment;
- MediaStream* source = map->mInputPort->GetSource();
-
- GraphTime next;
- *aOutputTrackFinished = false;
- for (GraphTime t = aFrom; t < aTo; t = next) {
- MediaInputPort::InputInterval interval = map->mInputPort->GetNextInputInterval(t);
- interval.mEnd = std::min(interval.mEnd, aTo);
- StreamTime inputEnd = source->GraphTimeToStreamTime(interval.mEnd);
- TrackTicks inputTrackEndPoint = aInputTrack->GetEnd();
-
- if (aInputTrack->IsEnded() &&
- aInputTrack->GetEndTimeRoundDown() <= inputEnd) {
- *aOutputTrackFinished = true;
- }
-
- if (interval.mStart >= interval.mEnd)
- break;
- next = interval.mEnd;
-
- // Ticks >= startTicks and < endTicks are in the interval
- StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
- TrackTicks startTicks = outputTrack->GetEnd();
- StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
- MOZ_ASSERT(startTicks == TimeToTicksRoundUp(rate, outputStart), "Samples missing");
- TrackTicks endTicks = TimeToTicksRoundUp(rate, outputEnd);
- TrackTicks ticks = endTicks - startTicks;
- StreamTime inputStart = source->GraphTimeToStreamTime(interval.mStart);
-
- if (interval.mInputIsBlocked) {
- // Maybe the input track ended?
- segment->AppendNullData(ticks);
- STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
- this, (long long)ticks, outputTrack->GetID()));
- } else {
- // Figuring out which samples to use from the input stream is tricky
- // because its start time and our start time may differ by a fraction
- // of a tick. Assuming the input track hasn't ended, we have to ensure
- // that 'ticks' samples are gathered, even though a tick boundary may
- // occur between outputStart and outputEnd but not between inputStart
- // and inputEnd.
- // These are the properties we need to ensure:
- // 1) Exactly 'ticks' ticks of output are produced, i.e.
- // inputEndTicks - inputStartTicks = ticks.
- // 2) inputEndTicks <= aInputTrack->GetSegment()->GetDuration().
- // 3) In any sequence of intervals where neither stream is blocked,
- // the content of the input track we use is a contiguous sequence of
- // ticks with no gaps or overlaps.
- if (map->mEndOfLastInputIntervalInInputStream != inputStart ||
- map->mEndOfLastInputIntervalInOutputStream != outputStart) {
- // Start of a new series of intervals where neither stream is blocked.
- map->mEndOfConsumedInputTicks = TimeToTicksRoundDown(rate, inputStart) - 1;
- }
- TrackTicks inputStartTicks = map->mEndOfConsumedInputTicks;
- TrackTicks inputEndTicks = inputStartTicks + ticks;
- map->mEndOfConsumedInputTicks = inputEndTicks;
- map->mEndOfLastInputIntervalInInputStream = inputEnd;
- map->mEndOfLastInputIntervalInOutputStream = outputEnd;
- // Now we prove that the above properties hold:
- // Property #1: trivial by construction.
- // Property #3: trivial by construction. Between every two
- // intervals where both streams are not blocked, the above if condition
- // is false and mEndOfConsumedInputTicks advances exactly to match
- // the ticks that were consumed.
- // Property #2:
- // Let originalOutputStart be the value of outputStart and originalInputStart
- // be the value of inputStart when the body of the "if" block was last
- // executed.
- // Let allTicks be the sum of the values of 'ticks' computed since then.
- // The interval [originalInputStart/rate, inputEnd/rate) is the
- // same length as the interval [originalOutputStart/rate, outputEnd/rate),
- // so the latter interval can have at most one more integer in it. Thus
- // TimeToTicksRoundUp(rate, outputEnd) - TimeToTicksRoundUp(rate, originalOutputStart)
- // <= TimeToTicksRoundDown(rate, inputEnd) - TimeToTicksRoundDown(rate, originalInputStart) + 1
- // Then
- // inputEndTicks = TimeToTicksRoundDown(rate, originalInputStart) - 1 + allTicks
- // = TimeToTicksRoundDown(rate, originalInputStart) - 1 + TimeToTicksRoundUp(rate, outputEnd) - TimeToTicksRoundUp(rate, originalOutputStart)
- // <= TimeToTicksRoundDown(rate, originalInputStart) - 1 + TimeToTicksRoundDown(rate, inputEnd) - TimeToTicksRoundDown(rate, originalInputStart) + 1
- // = TimeToTicksRoundDown(rate, inputEnd)
- // <= inputEnd/rate
- // (now using the fact that inputEnd <= track->GetEndTimeRoundDown() for a non-ended track)
- // <= TicksToTimeRoundDown(rate, aInputTrack->GetSegment()->GetDuration())/rate
- // <= rate*aInputTrack->GetSegment()->GetDuration()/rate
- // = aInputTrack->GetSegment()->GetDuration()
- // as required.
- // Note that while the above proof appears to be generally right, if we are suffering
- // from a lot of underrun, then in rare cases inputStartTicks >> inputTrackEndPoint.
- // As such, we still need to verify the sanity of property #2 and use null data as
- // appropriate.
-
- if (inputStartTicks < 0) {
- // Data before the start of the track is just null.
- // We have to add a small amount of delay to ensure that there is
- // always a sample available if we see an interval that contains a
- // tick boundary on the output stream's timeline but does not contain
- // a tick boundary on the input stream's timeline. 1 tick delay is
- // necessary and sufficient.
- segment->AppendNullData(-inputStartTicks);
- inputStartTicks = 0;
- }
- if (inputEndTicks > inputStartTicks) {
- if (inputEndTicks <= inputTrackEndPoint) {
- segment->AppendSlice(*aInputTrack->GetSegment(), inputStartTicks, inputEndTicks);
- STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data to track %d",
- this, ticks, outputTrack->GetID()));
- } else {
- if (inputStartTicks < inputTrackEndPoint) {
- segment->AppendSlice(*aInputTrack->GetSegment(), inputStartTicks, inputTrackEndPoint);
- ticks -= inputTrackEndPoint - inputStartTicks;
- }
- segment->AppendNullData(ticks);
- STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of input data and %lld of null data to track %d",
- this, inputTrackEndPoint - inputStartTicks, ticks, outputTrack->GetID()));
- }
- }
- }
- ApplyTrackDisabling(outputTrack->GetID(), segment);
- for (uint32_t j = 0; j < mListeners.Length(); ++j) {
- MediaStreamListener* l = mListeners[j];
- l->NotifyQueuedTrackChanges(Graph(), outputTrack->GetID(),
- outputTrack->GetRate(), startTicks, 0,
- *segment);
- }
- outputTrack->GetSegment()->AppendFrom(segment);
- }
- }
+ bool* aOutputTrackFinished);
nsTArray<TrackMapEntry> mTrackMap;
};
}
#endif /* MOZILLA_MEDIASTREAMGRAPH_H_ */
--- a/content/media/mediasource/TrackBuffer.cpp
+++ b/content/media/mediasource/TrackBuffer.cpp
@@ -70,24 +70,28 @@ public:
private:
nsTArray<nsRefPtr<SourceBufferDecoder>> mDecoders;
};
void
TrackBuffer::Shutdown()
{
+ // End the SourceBufferResource associated with mCurrentDecoder, which will
+ // unblock any decoder initialization in ReadMetadata().
+ DiscardDecoder();
+
+ // Finish any decoder initialization, which may add to mInitializedDecoders.
// Shutdown waits for any pending events, which may require the monitor,
// so we must not hold the monitor during this call.
mParentDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
mTaskQueue->Shutdown();
mTaskQueue = nullptr;
ReentrantMonitorAutoEnter mon(mParentDecoder->GetReentrantMonitor());
- DiscardDecoder();
for (uint32_t i = 0; i < mDecoders.Length(); ++i) {
mDecoders[i]->GetReader()->Shutdown();
}
mInitializedDecoders.Clear();
NS_DispatchToMainThread(new ReleaseDecoderTask(mDecoders));
MOZ_ASSERT(mDecoders.IsEmpty());
mParentDecoder = nullptr;
}
--- a/content/media/moz.build
+++ b/content/media/moz.build
@@ -159,16 +159,17 @@ UNIFIED_SOURCES += [
'RtspMediaResource.cpp',
'SharedThreadPool.cpp',
'StreamBuffer.cpp',
'TextTrack.cpp',
'TextTrackCue.cpp',
'TextTrackCueList.cpp',
'TextTrackList.cpp',
'TextTrackRegion.cpp',
+ 'TrackUnionStream.cpp',
'VideoFrameContainer.cpp',
'VideoPlaybackQuality.cpp',
'VideoSegment.cpp',
'VideoStreamTrack.cpp',
'VideoTrack.cpp',
'VideoTrackList.cpp',
'VideoUtils.cpp',
'WebVTTListener.cpp',
--- a/content/media/test/mochitest.ini
+++ b/content/media/test/mochitest.ini
@@ -314,16 +314,17 @@ skip-if = buildapp == 'mulet' || os == '
[test_bug465498.html]
[test_bug493187.html]
[test_bug495145.html]
[test_bug495300.html]
[test_bug654550.html]
[test_bug686942.html]
[test_bug726904.html]
[test_bug874897.html]
+[test_bug879717.html]
[test_bug883173.html]
[test_bug895091.html]
[test_bug895305.html]
[test_bug919265.html]
[test_bug957847.html]
[test_bug1018933.html]
[test_can_play_type.html]
[test_can_play_type_mpeg.html]
new file mode 100644
--- /dev/null
+++ b/content/media/test/test_bug879717.html
@@ -0,0 +1,65 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test for bug 879717, check that a video element can be drawn into a canvas directly on 'play' event</title>
+ <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ <script type="text/javascript" src="manifest.js"></script>
+</head>
+<body>
+<video id="v1" autoplay />
+<video id="v2" autoplay />
+<canvas id="c1" />
+<canvas id="c2" />
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+var media = getPlayableVideo(gSmallTests);
+
+var checkDrawImage = function(video, canvas, name) {
+ var exception = null;
+ var exceptionName = "nothing";
+ try {
+ var ctx = canvas.getContext('2d');
+ ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
+ } catch (e) {
+ exception = e;
+ exceptionName = e.name;
+ }
+ ok(exception === null || video.ended,
+ "drawImage shouldn't throw an exception on play of " + name + ", got " + exceptionName);
+};
+
+if (media == null) {
+ todo(false, "No media supported.");
+ SimpleTest.finish();
+} else {
+ v1.src = media.name;
+ v2.mozSrcObject = v1.mozCaptureStream();
+
+ var v1Tested = false;
+ var v2Tested = false;
+
+ v1.addEventListener('play', function() {
+ checkDrawImage(v1, c1, media.name);
+
+ v1Tested = true;
+ if (v2Tested) {
+ SimpleTest.finish();
+ }
+ });
+
+ v2.addEventListener('play', function() {
+ checkDrawImage(v2, c2, "stream of " + media.name);
+
+ v2Tested = true;
+ if (v1Tested) {
+ SimpleTest.finish();
+ }
+ });
+}
+</script>
+</pre>
+</body>
+</html>
--- a/content/media/webrtc/MediaEngine.h
+++ b/content/media/webrtc/MediaEngine.h
@@ -107,22 +107,16 @@ public:
/* Start the device and add the track to the provided SourceMediaStream, with
* the provided TrackID. You may start appending data to the track
* immediately after. */
virtual nsresult Start(SourceMediaStream*, TrackID) = 0;
/* tell the source if there are any direct listeners attached */
virtual void SetDirectListeners(bool) = 0;
- /* Take a snapshot from this source. In the case of video this is a single
- * image, and for audio, it is a snippet lasting aDuration milliseconds. The
- * duration argument is ignored for a MediaEngineVideoSource.
- */
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile) = 0;
-
/* Called when the stream wants more data */
virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aId,
StreamTime aDesiredTime,
TrackTicks &aLastEndTime) = 0;
/* Stop the device and release the corresponding MediaStream */
@@ -173,16 +167,18 @@ public:
return true;
}
}
/* It is an error to call Start() before an Allocate(), and Stop() before
* a Start(). Only Allocate() may be called after a Deallocate(). */
protected:
+ // Only class' own members can be initialized in constructor initializer list.
+ explicit MediaEngineSource(MediaEngineState aState) : mState(aState) {}
MediaEngineState mState;
};
/**
* Video source and friends.
*/
class MediaEnginePrefs {
public:
@@ -226,33 +222,40 @@ private:
}
};
class MediaEngineVideoSource : public MediaEngineSource
{
public:
virtual ~MediaEngineVideoSource() {}
- virtual const MediaSourceType GetMediaSource() {
- return MediaSourceType::Camera;
- }
/* This call reserves but does not start the device. */
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) = 0;
+protected:
+ explicit MediaEngineVideoSource(MediaEngineState aState)
+ : MediaEngineSource(aState) {}
+ MediaEngineVideoSource()
+ : MediaEngineSource(kReleased) {}
};
/**
* Audio source and friends.
*/
class MediaEngineAudioSource : public MediaEngineSource
{
public:
virtual ~MediaEngineAudioSource() {}
/* This call reserves but does not start the device. */
virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) = 0;
+protected:
+ explicit MediaEngineAudioSource(MediaEngineState aState)
+ : MediaEngineSource(aState) {}
+ MediaEngineAudioSource()
+ : MediaEngineSource(kReleased) {}
};
}
#endif /* MEDIAENGINE_H_ */
copy from content/media/webrtc/MediaEngineWebRTCVideo.cpp
copy to content/media/webrtc/MediaEngineCameraVideoSource.cpp
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.cpp
@@ -1,350 +1,62 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#include "MediaEngineWebRTC.h"
-#include "Layers.h"
-#include "ImageTypes.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/GrallocTextureClient.h"
-#include "nsMemory.h"
-#include "mtransport/runnable_utils.h"
-#include "MediaTrackConstraints.h"
+#include "MediaEngineCameraVideoSource.h"
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
namespace mozilla {
-using namespace mozilla::gfx;
using dom::ConstrainLongRange;
using dom::ConstrainDoubleRange;
using dom::MediaTrackConstraintSet;
#ifdef PR_LOGGING
extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
#else
#define LOG(msg)
#define LOGFRAME(msg)
#endif
-/**
- * Webrtc video source.
- */
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
-
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
-int
-MediaEngineWebRTCVideoSource::FrameSizeChange(
- unsigned int w, unsigned int h, unsigned int streams)
-{
- mWidth = w;
- mHeight = h;
- LOG(("Video FrameSizeChange: %ux%u", w, h));
- return 0;
+/* static */ bool
+MediaEngineCameraVideoSource::IsWithin(int32_t n, const ConstrainLongRange& aRange) {
+ return aRange.mMin <= n && n <= aRange.mMax;
}
-// ViEExternalRenderer Callback. Process every incoming frame here.
-int
-MediaEngineWebRTCVideoSource::DeliverFrame(
- unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
- void *handle)
-{
- // mInSnapshotMode can only be set before the camera is turned on and
- // the renderer is started, so this amounts to a 1-shot
- if (mInSnapshotMode) {
- // Set the condition variable to false and notify Snapshot().
- MonitorAutoLock lock(mMonitor);
- mInSnapshotMode = false;
- lock.Notify();
- return 0;
- }
-
- // Check for proper state.
- if (mState != kStarted) {
- LOG(("DeliverFrame: video not started"));
- return 0;
- }
-
- if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
- MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
- return 0;
- }
-
- // Create a video frame and append it to the track.
- nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-
- layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
- uint8_t* frame = static_cast<uint8_t*> (buffer);
- const uint8_t lumaBpp = 8;
- const uint8_t chromaBpp = 4;
-
- // Take lots of care to round up!
- layers::PlanarYCbCrData data;
- data.mYChannel = frame;
- data.mYSize = IntSize(mWidth, mHeight);
- data.mYStride = (mWidth * lumaBpp + 7)/ 8;
- data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
- data.mCbChannel = frame + mHeight * data.mYStride;
- data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
- data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
- data.mPicX = 0;
- data.mPicY = 0;
- data.mPicSize = IntSize(mWidth, mHeight);
- data.mStereoMode = StereoMode::MONO;
-
- videoImage->SetData(data);
-
-#ifdef DEBUG
- static uint32_t frame_num = 0;
- LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
- mWidth, mHeight, time_stamp, render_time));
-#endif
-
- // we don't touch anything in 'this' until here (except for snapshot,
- // which has it's own lock)
- MonitorAutoLock lock(mMonitor);
-
- // implicitly releases last image
- mImage = image.forget();
-
- return 0;
-}
-#endif
-
-// Called if the graph thinks it's running out of buffered video; repeat
-// the last frame for whatever minimum period it think it needs. Note that
-// this means that no *real* frame can be inserted during this period.
-void
-MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
- TrackID aID,
- StreamTime aDesiredTime,
- TrackTicks &aLastEndTime)
-{
- VideoSegment segment;
-
- MonitorAutoLock lock(mMonitor);
- // B2G does AddTrack, but holds kStarted until the hardware changes state.
- // So mState could be kReleased here. We really don't care about the state,
- // though.
-
- // Note: we're not giving up mImage here
- nsRefPtr<layers::Image> image = mImage;
- TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
- TrackTicks delta = target - aLastEndTime;
- LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
- (int64_t) target, (int64_t) delta, image ? "" : "<null>"));
-
- // Bug 846188 We may want to limit incoming frames to the requested frame rate
- // mFps - if you want 30FPS, and the camera gives you 60FPS, this could
- // cause issues.
- // We may want to signal if the actual frame rate is below mMinFPS -
- // cameras often don't return the requested frame rate especially in low
- // light; we should consider surfacing this so that we can switch to a
- // lower resolution (which may up the frame rate)
-
- // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
- // Doing so means a negative delta and thus messes up handling of the graph
- if (delta > 0) {
- // nullptr images are allowed
- IntSize size(image ? mWidth : 0, image ? mHeight : 0);
- segment.AppendFrame(image.forget(), delta, size);
- // This can fail if either a) we haven't added the track yet, or b)
- // we've removed or finished the track.
- if (aSource->AppendToTrack(aID, &(segment))) {
- aLastEndTime = target;
- }
- }
-}
-
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
+/* static */ bool
+MediaEngineCameraVideoSource::IsWithin(double n, const ConstrainDoubleRange& aRange) {
return aRange.mMin <= n && n <= aRange.mMax;
}
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
- return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
+/* static */ int32_t
+MediaEngineCameraVideoSource::Clamp(int32_t n, const ConstrainLongRange& aRange) {
return std::max(aRange.mMin, std::min(n, aRange.mMax));
}
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
+/* static */ bool
+MediaEngineCameraVideoSource::AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
}
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
+/* static */ bool
+MediaEngineCameraVideoSource::Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
MOZ_ASSERT(AreIntersecting(aA, aB));
aA.mMin = std::max(aA.mMin, aB.mMin);
aA.mMax = std::min(aA.mMax, aB.mMax);
return true;
}
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
- const webrtc::CaptureCapability& aCandidate) {
- if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
- !IsWithin(aCandidate.height, aConstraints.mHeight)) {
- return false;
- }
- if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
- return false;
- }
- return true;
-}
-
+// A special version of the algorithm for cameras that don't list capabilities.
void
-MediaEngineWebRTCVideoSource::ChooseCapability(
- const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
-{
-#ifdef MOZ_B2G_CAMERA
- return GuessCapability(aConstraints, aPrefs);
-#else
- NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
- int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
- if (num <= 0) {
- // Mac doesn't support capabilities.
- return GuessCapability(aConstraints, aPrefs);
- }
-
- // The rest is the full algorithm for cameras that can list their capabilities.
-
- LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
- aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
- typedef nsTArray<uint8_t> SourceSet;
-
- SourceSet candidateSet;
- for (int i = 0; i < num; i++) {
- candidateSet.AppendElement(i);
- }
-
- // Pick among capabilities: First apply required constraints.
-
- for (uint32_t i = 0; i < candidateSet.Length();) {
- webrtc::CaptureCapability cap;
- mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
- candidateSet[i], cap);
- if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
- candidateSet.RemoveElementAt(i);
- } else {
- ++i;
- }
- }
-
- SourceSet tailSet;
-
- // Then apply advanced (formerly known as optional) constraints.
-
- if (aConstraints.mAdvanced.WasPassed()) {
- auto &array = aConstraints.mAdvanced.Value();
-
- for (uint32_t i = 0; i < array.Length(); i++) {
- SourceSet rejects;
- for (uint32_t j = 0; j < candidateSet.Length();) {
- webrtc::CaptureCapability cap;
- mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
- candidateSet[j], cap);
- if (!SatisfyConstraintSet(array[i], cap)) {
- rejects.AppendElement(candidateSet[j]);
- candidateSet.RemoveElementAt(j);
- } else {
- ++j;
- }
- }
- (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
- }
- }
-
- if (!candidateSet.Length()) {
- candidateSet.AppendElement(0);
- }
-
- int prefWidth = aPrefs.GetWidth();
- int prefHeight = aPrefs.GetHeight();
-
- // Default is closest to available capability but equal to or below;
- // otherwise closest above. Since we handle the num=0 case above and
- // take the first entry always, we can never exit uninitialized.
-
- webrtc::CaptureCapability cap;
- bool higher = true;
- for (uint32_t i = 0; i < candidateSet.Length(); i++) {
- mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
- kMaxUniqueIdLength, candidateSet[i], cap);
- if (higher) {
- if (i == 0 ||
- (mCapability.width > cap.width && mCapability.height > cap.height)) {
- // closer than the current choice
- mCapability = cap;
- // FIXME: expose expected capture delay?
- }
- if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
- higher = false;
- }
- } else {
- if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
- cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
- continue;
- }
- if (mCapability.width < cap.width && mCapability.height < cap.height) {
- mCapability = cap;
- // FIXME: expose expected capture delay?
- }
- }
- // Same resolution, maybe better format or FPS match
- if (mCapability.width == cap.width && mCapability.height == cap.height) {
- // FPS too low
- if (cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
- continue;
- }
- // Better match
- if (cap.maxFPS < mCapability.maxFPS) {
- mCapability = cap;
- } else if (cap.maxFPS == mCapability.maxFPS) {
- // Resolution and FPS the same, check format
- if (cap.rawType == webrtc::RawVideoType::kVideoI420
- || cap.rawType == webrtc::RawVideoType::kVideoYUY2
- || cap.rawType == webrtc::RawVideoType::kVideoYV12) {
- mCapability = cap;
- }
- }
- }
- }
- LOG(("chose cap %dx%d @%dfps codec %d raw %d",
- mCapability.width, mCapability.height, mCapability.maxFPS,
- mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
- const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
+MediaEngineCameraVideoSource::GuessCapability(
+ const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs)
{
LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
// In short: compound constraint-ranges and use pref as ideal.
ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
@@ -411,731 +123,27 @@ MediaEngineWebRTCVideoSource::GuessCapab
}
}
mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
LOG(("chose cap %dx%d @%dfps",
mCapability.width, mCapability.height, mCapability.maxFPS));
}
void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
+MediaEngineCameraVideoSource::GetName(nsAString& aName)
{
aName = mDeviceName;
}
void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
+MediaEngineCameraVideoSource::GetUUID(nsAString& aUUID)
{
aUUID = mUniqueId;
}
-nsresult
-MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
-{
- LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (mState == kReleased && mInitDone) {
- ChooseCapability(aConstraints, aPrefs);
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::AllocImpl));
- mCallbackMonitor.Wait();
- if (mState != kAllocated) {
- return NS_ERROR_FAILURE;
- }
- }
-#else
- if (mState == kReleased && mInitDone) {
- // Note: if shared, we don't allow a later opener to affect the resolution.
- // (This may change depending on spec changes for Constraints/settings)
-
- ChooseCapability(aConstraints, aPrefs);
-
- if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
- kMaxUniqueIdLength, mCaptureIndex)) {
- return NS_ERROR_FAILURE;
- }
- mState = kAllocated;
- LOG(("Video device %d allocated", mCaptureIndex));
- } else if (mSources.IsEmpty()) {
- LOG(("Video device %d reallocated", mCaptureIndex));
- } else {
- LOG(("Video device %d allocated shared", mCaptureIndex));
- }
-#endif
-
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Deallocate()
-{
- LOG((__FUNCTION__));
- if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
- if (mState != kStopped && mState != kAllocated) {
- return NS_ERROR_FAILURE;
- }
-#ifdef MOZ_B2G_CAMERA
- // We do not register success callback here
-
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::DeallocImpl));
- mCallbackMonitor.Wait();
- if (mState != kReleased) {
- return NS_ERROR_FAILURE;
- }
-#elif XP_MACOSX
- // Bug 829907 - on mac, in shutdown, the mainthread stops processing
- // 'native' events, and the QTKit code uses events to the main native CFRunLoop
- // in order to provide thread safety. In order to avoid this locking us up,
- // release the ViE capture device synchronously on MainThread (so the native
- // event isn't needed).
- // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
- // XXX It might be nice to only do this if we're in shutdown... Hard to be
- // sure when that is though.
- // Thread safety: a) we call this synchronously, and don't use ViECapture from
- // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
- // an exclusive object lock and deletes it in a critical section, so all in all
- // this should be safe threadwise.
- NS_DispatchToMainThread(WrapRunnable(mViECapture,
- &webrtc::ViECapture::ReleaseCaptureDevice,
- mCaptureIndex),
- NS_DISPATCH_SYNC);
-#else
- mViECapture->ReleaseCaptureDevice(mCaptureIndex);
-#endif
- mState = kReleased;
- LOG(("Video device %d deallocated", mCaptureIndex));
- } else {
- LOG(("Video device %d deallocated but still in use", mCaptureIndex));
- }
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
-{
- LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
- int error = 0;
-#endif
- if (!mInitDone || !aStream) {
- return NS_ERROR_FAILURE;
- }
-
- mSources.AppendElement(aStream);
-
- aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
- aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
-
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-
- if (mState == kStarted) {
- return NS_OK;
- }
- mImageContainer = layers::LayerManager::CreateImageContainer();
-
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StartImpl,
- mCapability));
- mCallbackMonitor.Wait();
- if (mState != kStarted) {
- return NS_ERROR_FAILURE;
- }
-#else
- mState = kStarted;
- error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
- if (error == -1) {
- return NS_ERROR_FAILURE;
- }
-
- error = mViERender->StartRender(mCaptureIndex);
- if (error == -1) {
- return NS_ERROR_FAILURE;
- }
-
- if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
- return NS_ERROR_FAILURE;
- }
-#endif
-
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
-{
- LOG((__FUNCTION__));
- if (!mSources.RemoveElement(aSource)) {
- // Already stopped - this is allowed
- return NS_OK;
- }
- if (!mSources.IsEmpty()) {
- return NS_OK;
- }
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
- if (mState != kStarted) {
- return NS_ERROR_FAILURE;
- }
-
- {
- MonitorAutoLock lock(mMonitor);
- mState = kStopped;
- aSource->EndTrack(aID);
- // Drop any cached image so we don't start with a stale image on next
- // usage
- mImage = nullptr;
- }
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StopImpl));
-#else
- mViERender->StopRender(mCaptureIndex);
- mViERender->RemoveRenderer(mCaptureIndex);
- mViECapture->StopCapture(mCaptureIndex);
-#endif
-
- return NS_OK;
-}
-
void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
+MediaEngineCameraVideoSource::SetDirectListeners(bool aHasDirectListeners)
{
LOG((__FUNCTION__));
mHasDirectListeners = aHasDirectListeners;
}
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- return NS_ERROR_NOT_IMPLEMENTED;
-}
-
-/**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
-
-void
-MediaEngineWebRTCVideoSource::Init()
-{
-#ifdef MOZ_B2G_CAMERA
- nsAutoCString deviceName;
- ICameraControl::GetCameraName(mCaptureIndex, deviceName);
- CopyUTF8toUTF16(deviceName, mDeviceName);
- CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
- // fix compile warning for these being unused. (remove once used)
- (void) mFps;
- (void) mMinFps;
-
- LOG((__FUNCTION__));
- if (mVideoEngine == nullptr) {
- return;
- }
-
- mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
- if (mViEBase == nullptr) {
- return;
- }
-
- // Get interfaces for capture, render for now
- mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
- mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
-
- if (mViECapture == nullptr || mViERender == nullptr) {
- return;
- }
-
- char deviceName[kMaxDeviceNameLength];
- char uniqueId[kMaxUniqueIdLength];
- if (mViECapture->GetCaptureDevice(mCaptureIndex,
- deviceName, kMaxDeviceNameLength,
- uniqueId, kMaxUniqueIdLength)) {
- return;
- }
-
- CopyUTF8toUTF16(deviceName, mDeviceName);
- CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
-
- mInitDone = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::Shutdown()
-{
- LOG((__FUNCTION__));
- if (!mInitDone) {
- return;
- }
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
- if (mState == kStarted) {
- while (!mSources.IsEmpty()) {
- Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
- }
- MOZ_ASSERT(mState == kStopped);
- }
-
- if (mState == kAllocated || mState == kStopped) {
- Deallocate();
- }
-#ifndef MOZ_B2G_CAMERA
- mViECapture->Release();
- mViERender->Release();
- mViEBase->Release();
-#endif
- mState = kReleased;
- mInitDone = false;
-}
-
-void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
- // NOTE: mCaptureIndex might have changed when allocated!
- // Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
- // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
- // Caller looked up this source by uniqueId, so it shouldn't change
- char deviceName[kMaxDeviceNameLength];
- char uniqueId[kMaxUniqueIdLength];
-
- if (mViECapture->GetCaptureDevice(aIndex,
- deviceName, sizeof(deviceName),
- uniqueId, sizeof(uniqueId))) {
- return;
- }
-
- CopyUTF8toUTF16(deviceName, mDeviceName);
-#ifdef DEBUG
- nsString temp;
- CopyUTF8toUTF16(uniqueId, temp);
- MOZ_ASSERT(temp.Equals(mUniqueId));
-#endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
-// All these functions must be run on MainThread!
-void
-MediaEngineWebRTCVideoSource::AllocImpl() {
- MOZ_ASSERT(NS_IsMainThread());
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-
- mCameraControl = ICameraControl::Create(mCaptureIndex);
- if (mCameraControl) {
- mState = kAllocated;
- // Add this as a listener for CameraControl events. We don't need
- // to explicitly remove this--destroying the CameraControl object
- // in DeallocImpl() will do that for us.
- mCameraControl->AddListener(this);
- }
-
- mCallbackMonitor.Notify();
-}
-
-void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
- MOZ_ASSERT(NS_IsMainThread());
-
- mCameraControl = nullptr;
-}
-
-// The same algorithm from bug 840244
-static int
-GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
- int screenAngle = 0;
- switch (aScreen) {
- case eScreenOrientation_PortraitPrimary:
- screenAngle = 0;
- break;
- case eScreenOrientation_PortraitSecondary:
- screenAngle = 180;
- break;
- case eScreenOrientation_LandscapePrimary:
- screenAngle = 90;
- break;
- case eScreenOrientation_LandscapeSecondary:
- screenAngle = 270;
- break;
- default:
- MOZ_ASSERT(false);
- break;
- }
-
- int result;
-
- if (aBackCamera) {
- //back camera
- result = (aCameraMountAngle - screenAngle + 360) % 360;
- } else {
- //front camera
- result = (aCameraMountAngle + screenAngle) % 360;
- }
- return result;
-}
-
-// undefine to remove on-the-fly rotation support
-#define DYNAMIC_GUM_ROTATION
-
-void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
-#ifdef DYNAMIC_GUM_ROTATION
- if (mHasDirectListeners) {
- // aka hooked to PeerConnection
- MonitorAutoLock enter(mMonitor);
- mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
-
- LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
- mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
- }
-#endif
-
- mOrientationChanged = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
- MOZ_ASSERT(NS_IsMainThread());
-
- ICameraControl::Configuration config;
- config.mMode = ICameraControl::kPictureMode;
- config.mPreviewSize.width = aCapability.width;
- config.mPreviewSize.height = aCapability.height;
- mCameraControl->Start(&config);
- mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
-
- hal::RegisterScreenConfigurationObserver(this);
-}
-
-void
-MediaEngineWebRTCVideoSource::StopImpl() {
- MOZ_ASSERT(NS_IsMainThread());
-
- hal::UnregisterScreenConfigurationObserver(this);
- mCameraControl->Stop();
-}
-
-void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
- MOZ_ASSERT(NS_IsMainThread());
- mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
-{
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (aState == CameraControlListener::kHardwareClosed) {
- // When the first CameraControl listener is added, it gets pushed
- // the current state of the camera--normally 'closed'. We only
- // pay attention to that state if we've progressed out of the
- // allocated state.
- if (mState != kAllocated) {
- mState = kReleased;
- mCallbackMonitor.Notify();
- }
- } else {
- // Can't read this except on MainThread (ugh)
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::GetRotation));
- mState = kStarted;
- mCallbackMonitor.Notify();
- }
-}
-
-void
-MediaEngineWebRTCVideoSource::GetRotation()
-{
- MOZ_ASSERT(NS_IsMainThread());
- MonitorAutoLock enter(mMonitor);
-
- mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
- MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
- mCameraAngle == 270);
- hal::ScreenConfiguration config;
- hal::GetCurrentScreenConfiguration(&config);
-
- nsCString deviceName;
- ICameraControl::GetCameraName(mCaptureIndex, deviceName);
- if (deviceName.EqualsASCII("back")) {
- mBackCamera = true;
- }
-
- mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
- LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
- mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-}
-
-void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
-{
- {
- // Scope the monitor, since there is another monitor below and we don't want
- // unexpected deadlock.
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- mCallbackMonitor.Notify();
- }
-
- // A main thread runnable to send error code to all queued PhotoCallbacks.
- class TakePhotoError : public nsRunnable {
- public:
- TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
- nsresult aRv)
- : mRv(aRv)
- {
- mCallbacks.SwapElements(aCallbacks);
- }
-
- NS_IMETHOD Run()
- {
- uint32_t callbackNumbers = mCallbacks.Length();
- for (uint8_t i = 0; i < callbackNumbers; i++) {
- mCallbacks[i]->PhotoError(mRv);
- }
- // PhotoCallback needs to dereference on main thread.
- mCallbacks.Clear();
- return NS_OK;
- }
-
- protected:
- nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
- nsresult mRv;
- };
-
- if (aContext == UserContext::kInTakePicture) {
- MonitorAutoLock lock(mMonitor);
- if (mPhotoCallbacks.Length()) {
- NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
- }
- }
-}
-
-void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
-{
- // It needs to start preview because Gonk camera will stop preview while
- // taking picture.
- mCameraControl->StartPreview();
-
- // Create a main thread runnable to generate a blob and call all current queued
- // PhotoCallbacks.
- class GenerateBlobRunnable : public nsRunnable {
- public:
- GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
- uint8_t* aData,
- uint32_t aLength,
- const nsAString& aMimeType)
- {
- mCallbacks.SwapElements(aCallbacks);
- mPhoto.AppendElements(aData, aLength);
- mMimeType = aMimeType;
- }
-
- NS_IMETHOD Run()
- {
- nsRefPtr<dom::File> blob =
- dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
- uint32_t callbackCounts = mCallbacks.Length();
- for (uint8_t i = 0; i < callbackCounts; i++) {
- nsRefPtr<dom::File> tempBlob = blob;
- mCallbacks[i]->PhotoComplete(tempBlob.forget());
- }
- // PhotoCallback needs to dereference on main thread.
- mCallbacks.Clear();
- return NS_OK;
- }
-
- nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
- nsTArray<uint8_t> mPhoto;
- nsString mMimeType;
- };
-
- // All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
- // constructor. This captured image will be sent to all the queued
- // PhotoCallbacks in this runnable.
- MonitorAutoLock lock(mMonitor);
- if (mPhotoCallbacks.Length()) {
- NS_DispatchToMainThread(
- new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
- }
-}
-
-uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
-{
- switch (aFormat) {
- case HAL_PIXEL_FORMAT_RGBA_8888:
- return libyuv::FOURCC_BGRA;
- case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- return libyuv::FOURCC_NV21;
- case HAL_PIXEL_FORMAT_YV12:
- return libyuv::FOURCC_YV12;
- default: {
- LOG((" xxxxx Unknown pixel format %d", aFormat));
- MOZ_ASSERT(false, "Unknown pixel format.");
- return libyuv::FOURCC_ANY;
- }
- }
-}
-
-void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
- layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
- android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
- void *pMem = nullptr;
- uint32_t size = aWidth * aHeight * 3 / 2;
-
- graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
-
- uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
- // Create a video frame and append it to the track.
- nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
- layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
- uint32_t dstWidth;
- uint32_t dstHeight;
-
- if (mRotation == 90 || mRotation == 270) {
- dstWidth = aHeight;
- dstHeight = aWidth;
- } else {
- dstWidth = aWidth;
- dstHeight = aHeight;
- }
-
- uint32_t half_width = dstWidth / 2;
- uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
- libyuv::ConvertToI420(srcPtr, size,
- dstPtr, dstWidth,
- dstPtr + (dstWidth * dstHeight), half_width,
- dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
- 0, 0,
- aWidth, aHeight,
- aWidth, aHeight,
- static_cast<libyuv::RotationMode>(mRotation),
- ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
- graphicBuffer->unlock();
-
- const uint8_t lumaBpp = 8;
- const uint8_t chromaBpp = 4;
-
- layers::PlanarYCbCrData data;
- data.mYChannel = dstPtr;
- data.mYSize = IntSize(dstWidth, dstHeight);
- data.mYStride = dstWidth * lumaBpp / 8;
- data.mCbCrStride = dstWidth * chromaBpp / 8;
- data.mCbChannel = dstPtr + dstHeight * data.mYStride;
- data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
- data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
- data.mPicX = 0;
- data.mPicY = 0;
- data.mPicSize = IntSize(dstWidth, dstHeight);
- data.mStereoMode = StereoMode::MONO;
-
- videoImage->SetDataNoCopy(data);
-
- // implicitly releases last image
- mImage = image.forget();
-}
-
-bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
- {
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (mState == kStopped) {
- return false;
- }
- }
-
- MonitorAutoLock enter(mMonitor);
- // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
- RotateImage(aImage, aWidth, aHeight);
- if (mRotation != 0 && mRotation != 180) {
- uint32_t temp = aWidth;
- aWidth = aHeight;
- aHeight = temp;
- }
- if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
- mWidth = aWidth;
- mHeight = aHeight;
- LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
- }
-
- return true; // return true because we're accepting the frame
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- MonitorAutoLock lock(mMonitor);
-
- // If other callback exists, that means there is a captured picture on the way,
- // it doesn't need to TakePicture() again.
- if (!mPhotoCallbacks.Length()) {
- nsresult rv;
- if (mOrientationChanged) {
- UpdatePhotoOrientation();
- }
- rv = mCameraControl->TakePicture();
- if (NS_FAILED(rv)) {
- return rv;
- }
- }
-
- mPhotoCallbacks.AppendElement(aCallback);
-
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- hal::ScreenConfiguration config;
- hal::GetCurrentScreenConfiguration(&config);
-
- // The rotation angle is clockwise.
- int orientation = 0;
- switch (config.orientation()) {
- case eScreenOrientation_PortraitPrimary:
- orientation = 0;
- break;
- case eScreenOrientation_PortraitSecondary:
- orientation = 180;
- break;
- case eScreenOrientation_LandscapePrimary:
- orientation = 270;
- break;
- case eScreenOrientation_LandscapeSecondary:
- orientation = 90;
- break;
- }
-
- // Front camera is inverse angle comparing to back camera.
- orientation = (mBackCamera ? orientation : (-orientation));
-
- ICameraControlParameterSetAutoEnter batch(mCameraControl);
- // It changes the orientation value in EXIF information only.
- mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
- mOrientationChanged = false;
-
- return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
copy from content/media/webrtc/MediaEngineWebRTC.h
copy to content/media/webrtc/MediaEngineCameraVideoSource.h
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineCameraVideoSource.h
@@ -1,462 +1,100 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#ifndef MEDIAENGINEWEBRTC_H_
-#define MEDIAENGINEWEBRTC_H_
-
-#include "prcvar.h"
-#include "prthread.h"
-#include "nsIThread.h"
-#include "nsIRunnable.h"
+#ifndef MediaEngineCameraVideoSource_h
+#define MediaEngineCameraVideoSource_h
-#include "mozilla/dom/File.h"
-#include "mozilla/Mutex.h"
-#include "mozilla/Monitor.h"
-#include "nsCOMPtr.h"
-#include "nsThreadUtils.h"
-#include "DOMMediaStream.h"
-#include "nsDirectoryServiceDefs.h"
-#include "nsComponentManagerUtils.h"
-#include "nsRefPtrHashtable.h"
-
-#include "VideoUtils.h"
#include "MediaEngine.h"
-#include "VideoSegment.h"
-#include "AudioSegment.h"
-#include "StreamBuffer.h"
-#include "MediaStreamGraph.h"
+#include "MediaTrackConstraints.h"
-#include "MediaEngineWrapper.h"
-#include "mozilla/dom/MediaStreamTrackBinding.h"
-// WebRTC library includes follow
-#include "webrtc/common.h"
-// Audio Engine
-#include "webrtc/voice_engine/include/voe_base.h"
-#include "webrtc/voice_engine/include/voe_codec.h"
-#include "webrtc/voice_engine/include/voe_hardware.h"
-#include "webrtc/voice_engine/include/voe_network.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_volume_control.h"
-#include "webrtc/voice_engine/include/voe_external_media.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_call_report.h"
+#include "nsDirectoryServiceDefs.h"
-// Video Engine
// conflicts with #include of scoped_ptr.h
#undef FF
-#include "webrtc/video_engine/include/vie_base.h"
-#include "webrtc/video_engine/include/vie_codec.h"
-#include "webrtc/video_engine/include/vie_render.h"
#include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
-#endif
-
-#include "NullTransport.h"
-#include "AudioOutputObserver.h"
namespace mozilla {
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
-/**
- * The WebRTC implementation of the MediaEngine interface.
- *
- * On B2G platform, member data may accessed from different thread after construction:
- *
- * MediaThread:
- * mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- * mImageContainer, mSources, mState, mImage
- *
- * MainThread:
- * mCaptureIndex, mLastCapture, mState, mWidth, mHeight,
- *
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- * mState is protected by mCallbackMonitor
- * Other variable is accessed only from single thread
- */
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
- , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
- , public CameraControlListener
- , public mozilla::hal::ScreenConfigurationObserver
-#else
- , public webrtc::ExternalRenderer
-#endif
+class MediaEngineCameraVideoSource : public MediaEngineVideoSource
{
public:
-#ifdef MOZ_B2G_CAMERA
- MediaEngineWebRTCVideoSource(int aIndex,
- MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mCameraControl(nullptr)
- , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
- , mRotation(0)
- , mBackCamera(false)
- , mOrientationChanged(true) // Correct the orientation at first time takePhoto.
- , mCaptureIndex(aIndex)
- , mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
+ MediaEngineCameraVideoSource(int aIndex,
+ const char* aMonitorName = "Camera.Monitor")
+ : MediaEngineVideoSource(kReleased)
+ , mMonitor(aMonitorName)
, mWidth(0)
, mHeight(0)
+ , mInitDone(false)
, mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr)
- {
- mState = kReleased;
- Init();
- }
-#else
- // ViEExternalRenderer.
- virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
- virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
- void *handle);
- /**
- * Does DeliverFrame() support a null buffer and non-null handle
- * (video texture)?
- * XXX Investigate! Especially for Android/B2G
- */
- virtual bool IsTextureSupported() { return false; }
-
- MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
- MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mVideoEngine(aVideoEnginePtr)
, mCaptureIndex(aIndex)
, mFps(-1)
- , mMinFps(-1)
- , mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
- , mWidth(0)
- , mHeight(0)
- , mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr) {
- MOZ_ASSERT(aVideoEnginePtr);
- mState = kReleased;
- Init();
- }
-#endif
+ {}
+
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
- virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
- virtual nsresult Deallocate();
- virtual nsresult Start(SourceMediaStream*, TrackID);
- virtual nsresult Stop(SourceMediaStream*, TrackID);
- virtual void SetDirectListeners(bool aHasListeners);
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
+ virtual void GetName(nsAString& aName) MOZ_OVERRIDE;
+ virtual void GetUUID(nsAString& aUUID) MOZ_OVERRIDE;
+ virtual void SetDirectListeners(bool aHasListeners) MOZ_OVERRIDE;
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
- int32_t aPlayoutDelay) { return NS_OK; };
- virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
- TrackID aId,
- StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
+ int32_t aPlayoutDelay) MOZ_OVERRIDE
+ {
+ return NS_OK;
+ };
- virtual bool IsFake() {
+ virtual bool IsFake() MOZ_OVERRIDE
+ {
return false;
}
virtual const MediaSourceType GetMediaSource() {
- return mMediaSource;
+ return MediaSourceType::Camera;
}
-#ifndef MOZ_B2G_CAMERA
- NS_DECL_THREADSAFE_ISUPPORTS
-
- nsresult TakePhoto(PhotoCallback* aCallback)
+ virtual nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE
{
return NS_ERROR_NOT_IMPLEMENTED;
}
-#else
- // We are subclassed from CameraControlListener, which implements a
- // threadsafe reference-count for us.
- NS_DECL_ISUPPORTS_INHERITED
-
- void OnHardwareStateChange(HardwareState aState);
- void GetRotation();
- bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
- void OnUserError(UserContext aContext, nsresult aError);
- void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
-
- void AllocImpl();
- void DeallocImpl();
- void StartImpl(webrtc::CaptureCapability aCapability);
- void StopImpl();
- void SnapshotImpl();
- void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
- uint32_t ConvertPixelFormatToFOURCC(int aFormat);
- void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
-
- nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
-
- // It sets the correct photo orientation via camera parameter according to
- // current screen orientation.
- nsresult UpdatePhotoOrientation();
-
-#endif
-
- // This runnable is for creating a temporary file on the main thread.
- NS_IMETHODIMP
- Run()
- {
- nsCOMPtr<nsIFile> tmp;
- nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
- NS_ENSURE_SUCCESS(rv, rv);
-
- tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
- rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
- NS_ENSURE_SUCCESS(rv, rv);
-
- mSnapshotPath = new nsString();
- rv = tmp->GetPath(*mSnapshotPath);
- NS_ENSURE_SUCCESS(rv, rv);
-
- return NS_OK;
- }
-
- void Refresh(int aIndex);
protected:
- ~MediaEngineWebRTCVideoSource() { Shutdown(); }
+ ~MediaEngineCameraVideoSource() {}
-private:
- // Initialize the needed Video engine interfaces.
- void Init();
- void Shutdown();
+ static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange);
+ static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange);
+ static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange);
+ static bool AreIntersecting(const dom::ConstrainLongRange& aA,
+ const dom::ConstrainLongRange& aB);
+ static bool Intersect(dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB);
+ void GuessCapability(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs);
// Engine variables.
-#ifdef MOZ_B2G_CAMERA
- mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
- // This is only modified on MainThread (AllocImpl and DeallocImpl)
- nsRefPtr<ICameraControl> mCameraControl;
- nsCOMPtr<nsIDOMFile> mLastCapture;
- nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
- // These are protected by mMonitor below
- int mRotation;
- int mCameraAngle; // See dom/base/ScreenOrientation.h
- bool mBackCamera;
- bool mOrientationChanged; // True when screen rotates.
-#else
- webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
- webrtc::ViEBase* mViEBase;
- webrtc::ViECapture* mViECapture;
- webrtc::ViERender* mViERender;
-#endif
- webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
-
- int mCaptureIndex;
- int mFps; // Track rate (30 fps by default)
- int mMinFps; // Min rate we want to accept
- MediaSourceType mMediaSource; // source of media (camera | application | screen)
// mMonitor protects mImage access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack() and
// image changes). Note that mSources is not accessed from other threads
// for video and is not protected.
- Monitor mMonitor; // Monitor for processing WebRTC frames.
- int mWidth, mHeight;
+ // All the mMonitor accesses are from the child classes.
+ Monitor mMonitor; // Monitor for processing Camera frames.
nsRefPtr<layers::Image> mImage;
nsRefPtr<layers::ImageContainer> mImageContainer;
- bool mHasDirectListeners;
+ int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
+ // end of data protected by mMonitor
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
+ nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
bool mInitDone;
- bool mInSnapshotMode;
- nsString* mSnapshotPath;
+ bool mHasDirectListeners;
+ int mCaptureIndex;
+ int mFps; // Track rate (30 fps by default)
+
+ webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
nsString mDeviceName;
nsString mUniqueId;
-
- void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
-
- void GuessCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
};
-class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
- public webrtc::VoEMediaProcess
-{
-public:
- MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
- int aIndex, const char* name, const char* uuid)
- : mSamples(0)
- , mVoiceEngine(aVoiceEnginePtr)
- , mMonitor("WebRTCMic.Monitor")
- , mThread(aThread)
- , mCapIndex(aIndex)
- , mChannel(-1)
- , mInitDone(false)
- , mStarted(false)
- , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
- , mEchoCancel(webrtc::kEcDefault)
- , mAGC(webrtc::kAgcDefault)
- , mNoiseSuppress(webrtc::kNsDefault)
- , mPlayoutDelay(0)
- , mNullTransport(nullptr) {
- MOZ_ASSERT(aVoiceEnginePtr);
- mState = kReleased;
- mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
- mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
- Init();
- }
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
-
- virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
- virtual nsresult Deallocate();
- virtual nsresult Start(SourceMediaStream*, TrackID);
- virtual nsresult Stop(SourceMediaStream*, TrackID);
- virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
- virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
- bool aAgcOn, uint32_t aAGC,
- bool aNoiseOn, uint32_t aNoise,
- int32_t aPlayoutDelay);
-
- virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
- TrackID aId,
- StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
-
- virtual bool IsFake() {
- return false;
- }
-
- virtual const MediaSourceType GetMediaSource() {
- return MediaSourceType::Microphone;
- }
-
- virtual nsresult TakePhoto(PhotoCallback* aCallback)
- {
- return NS_ERROR_NOT_IMPLEMENTED;
- }
-
- // VoEMediaProcess.
- void Process(int channel, webrtc::ProcessingTypes type,
- int16_t audio10ms[], int length,
- int samplingFreq, bool isStereo);
-
- NS_DECL_THREADSAFE_ISUPPORTS
-
-protected:
- ~MediaEngineWebRTCAudioSource() { Shutdown(); }
-
- // mSamples is an int to avoid conversions when comparing/etc to
- // samplingFreq & length. Making mSamples protected instead of private is a
- // silly way to avoid -Wunused-private-field warnings when PR_LOGGING is not
- // #defined. mSamples is not actually expected to be used by a derived class.
- int mSamples;
-
-private:
- void Init();
- void Shutdown();
-
- webrtc::VoiceEngine* mVoiceEngine;
- ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
- ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
- ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
- ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
- ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
-
- // mMonitor protects mSources[] access/changes, and transitions of mState
- // from kStarted to kStopped (which are combined with EndTrack()).
- // mSources[] is accessed from webrtc threads.
- Monitor mMonitor;
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
- nsCOMPtr<nsIThread> mThread;
- int mCapIndex;
- int mChannel;
- TrackID mTrackID;
- bool mInitDone;
- bool mStarted;
-
- nsString mDeviceName;
- nsString mDeviceUUID;
-
- bool mEchoOn, mAgcOn, mNoiseOn;
- webrtc::EcModes mEchoCancel;
- webrtc::AgcModes mAGC;
- webrtc::NsModes mNoiseSuppress;
- int32_t mPlayoutDelay;
-
- NullTransport *mNullTransport;
-};
-
-class MediaEngineWebRTC : public MediaEngine
-{
-public:
- explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
-
- // Clients should ensure to clean-up sources video/audio sources
- // before invoking Shutdown on this class.
- void Shutdown();
-
- virtual void EnumerateVideoDevices(MediaSourceType,
- nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
- virtual void EnumerateAudioDevices(MediaSourceType,
- nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
-private:
- ~MediaEngineWebRTC() {
- Shutdown();
-#ifdef MOZ_B2G_CAMERA
- AsyncLatencyLogger::Get()->Release();
-#endif
- gFarendObserver = nullptr;
- }
-
- nsCOMPtr<nsIThread> mThread;
-
- Mutex mMutex;
-
- // protected with mMutex:
- webrtc::VideoEngine* mScreenEngine;
- webrtc::VideoEngine* mBrowserEngine;
- webrtc::VideoEngine* mWinEngine;
- webrtc::VideoEngine* mAppEngine;
- webrtc::VideoEngine* mVideoEngine;
- webrtc::VoiceEngine* mVoiceEngine;
-
- // specialized configurations
- webrtc::Config mAppEngineConfig;
- webrtc::Config mWinEngineConfig;
- webrtc::Config mScreenEngineConfig;
- webrtc::Config mBrowserEngineConfig;
-
- // Need this to avoid unneccesary WebRTC calls while enumerating.
- bool mVideoEngineInit;
- bool mAudioEngineInit;
- bool mScreenEngineInit;
- bool mBrowserEngineInit;
- bool mWinEngineInit;
- bool mAppEngineInit;
- bool mHasTabVideoSource;
-
- // Store devices we've already seen in a hashtable for quick return.
- // Maps UUID to MediaEngineSource (one set for audio, one for video).
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
-};
-
-}
-
-#endif /* NSMEDIAENGINEWEBRTC_H_ */
+} // namespace mozilla
+#endif // MediaEngineCameraVideoSource_h
--- a/content/media/webrtc/MediaEngineDefault.cpp
+++ b/content/media/webrtc/MediaEngineDefault.cpp
@@ -34,20 +34,22 @@ namespace mozilla {
using namespace mozilla::gfx;
NS_IMPL_ISUPPORTS(MediaEngineDefaultVideoSource, nsITimerCallback)
/**
* Default video source.
*/
MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
- : mTimer(nullptr), mMonitor("Fake video"), mCb(16), mCr(16)
+ : MediaEngineVideoSource(kReleased)
+ , mTimer(nullptr)
+ , mMonitor("Fake video")
+ , mCb(16), mCr(16)
{
mImageContainer = layers::LayerManager::CreateImageContainer();
- mState = kReleased;
}
MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource()
{}
void
MediaEngineDefaultVideoSource::GetName(nsAString& aName)
{
@@ -165,60 +167,16 @@ MediaEngineDefaultVideoSource::Stop(Sour
aSource->EndTrack(aID);
aSource->Finish();
mState = kStopped;
return NS_OK;
}
-nsresult
-MediaEngineDefaultVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- *aFile = nullptr;
-
-#ifndef MOZ_WIDGET_ANDROID
- return NS_ERROR_NOT_IMPLEMENTED;
-#else
- nsAutoString filePath;
- nsCOMPtr<nsIFilePicker> filePicker = do_CreateInstance("@mozilla.org/filepicker;1");
- if (!filePicker)
- return NS_ERROR_FAILURE;
-
- nsXPIDLString title;
- nsContentUtils::GetLocalizedString(nsContentUtils::eFORMS_PROPERTIES, "Browse", title);
- int16_t mode = static_cast<int16_t>(nsIFilePicker::modeOpen);
-
- nsresult rv = filePicker->Init(nullptr, title, mode);
- NS_ENSURE_SUCCESS(rv, rv);
- filePicker->AppendFilters(nsIFilePicker::filterImages);
-
- // XXX - This API should be made async
- int16_t dialogReturn;
- rv = filePicker->Show(&dialogReturn);
- NS_ENSURE_SUCCESS(rv, rv);
- if (dialogReturn == nsIFilePicker::returnCancel) {
- *aFile = nullptr;
- return NS_OK;
- }
-
- nsCOMPtr<nsIFile> localFile;
- filePicker->GetFile(getter_AddRefs(localFile));
-
- if (!localFile) {
- *aFile = nullptr;
- return NS_OK;
- }
-
- nsCOMPtr<nsIDOMFile> domFile = dom::File::CreateFromFile(nullptr, localFile);
- domFile.forget(aFile);
- return NS_OK;
-#endif
-}
-
NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{
// Update the target color
if (mCr <= 16) {
if (mCb < 240) {
mCb++;
} else {
@@ -346,19 +304,19 @@ private:
};
/**
* Default audio source.
*/
NS_IMPL_ISUPPORTS(MediaEngineDefaultAudioSource, nsITimerCallback)
MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
- : mTimer(nullptr)
+ : MediaEngineAudioSource(kReleased)
+ , mTimer(nullptr)
{
- mState = kReleased;
}
MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
{}
void
MediaEngineDefaultAudioSource::GetName(nsAString& aName)
{
@@ -450,22 +408,16 @@ MediaEngineDefaultAudioSource::Stop(Sour
aSource->EndTrack(aID);
aSource->Finish();
mState = kStopped;
return NS_OK;
}
-nsresult
-MediaEngineDefaultAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- return NS_ERROR_NOT_IMPLEMENTED;
-}
-
NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{
AudioSegment segment;
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(AUDIO_FRAME_LENGTH * sizeof(int16_t));
int16_t* dest = static_cast<int16_t*>(buffer->Data());
mSineGenerator->generate(dest, AUDIO_FRAME_LENGTH);
--- a/content/media/webrtc/MediaEngineDefault.h
+++ b/content/media/webrtc/MediaEngineDefault.h
@@ -41,17 +41,16 @@ public:
virtual void GetUUID(nsAString&);
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs);
virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) { return NS_OK; };
virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aId,
StreamTime aDesiredTime,
@@ -106,17 +105,16 @@ public:
virtual void GetUUID(nsAString&);
virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs);
virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) { return NS_OK; };
virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aId,
StreamTime aDesiredTime,
copy from content/media/webrtc/MediaEngineWebRTCVideo.cpp
copy to content/media/webrtc/MediaEngineGonkVideoSource.cpp
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineGonkVideoSource.cpp
@@ -1,151 +1,59 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "MediaEngineGonkVideoSource.h"
-#include "MediaEngineWebRTC.h"
-#include "Layers.h"
-#include "ImageTypes.h"
-#include "ImageContainer.h"
-#include "mozilla/layers/GrallocTextureClient.h"
-#include "nsMemory.h"
+#define LOG_TAG "MediaEngineGonkVideoSource"
+
+#include <utils/Log.h>
+
+#include "GrallocImages.h"
+#include "VideoUtils.h"
+#include "ScreenOrientation.h"
+
+#include "libyuv.h"
#include "mtransport/runnable_utils.h"
-#include "MediaTrackConstraints.h"
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
namespace mozilla {
+using namespace mozilla::dom;
using namespace mozilla::gfx;
-using dom::ConstrainLongRange;
-using dom::ConstrainDoubleRange;
-using dom::MediaTrackConstraintSet;
#ifdef PR_LOGGING
extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
#else
#define LOG(msg)
#define LOGFRAME(msg)
#endif
-/**
- * Webrtc video source.
- */
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
-
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
-int
-MediaEngineWebRTCVideoSource::FrameSizeChange(
- unsigned int w, unsigned int h, unsigned int streams)
-{
- mWidth = w;
- mHeight = h;
- LOG(("Video FrameSizeChange: %ux%u", w, h));
- return 0;
-}
-
-// ViEExternalRenderer Callback. Process every incoming frame here.
-int
-MediaEngineWebRTCVideoSource::DeliverFrame(
- unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
- void *handle)
-{
- // mInSnapshotMode can only be set before the camera is turned on and
- // the renderer is started, so this amounts to a 1-shot
- if (mInSnapshotMode) {
- // Set the condition variable to false and notify Snapshot().
- MonitorAutoLock lock(mMonitor);
- mInSnapshotMode = false;
- lock.Notify();
- return 0;
- }
-
- // Check for proper state.
- if (mState != kStarted) {
- LOG(("DeliverFrame: video not started"));
- return 0;
- }
-
- if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
- MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
- return 0;
- }
-
- // Create a video frame and append it to the track.
- nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
-
- layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
- uint8_t* frame = static_cast<uint8_t*> (buffer);
- const uint8_t lumaBpp = 8;
- const uint8_t chromaBpp = 4;
-
- // Take lots of care to round up!
- layers::PlanarYCbCrData data;
- data.mYChannel = frame;
- data.mYSize = IntSize(mWidth, mHeight);
- data.mYStride = (mWidth * lumaBpp + 7)/ 8;
- data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
- data.mCbChannel = frame + mHeight * data.mYStride;
- data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
- data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
- data.mPicX = 0;
- data.mPicY = 0;
- data.mPicSize = IntSize(mWidth, mHeight);
- data.mStereoMode = StereoMode::MONO;
-
- videoImage->SetData(data);
-
-#ifdef DEBUG
- static uint32_t frame_num = 0;
- LOGFRAME(("frame %d (%dx%d); timestamp %u, render_time %lu", frame_num++,
- mWidth, mHeight, time_stamp, render_time));
-#endif
-
- // we don't touch anything in 'this' until here (except for snapshot,
- // which has it's own lock)
- MonitorAutoLock lock(mMonitor);
-
- // implicitly releases last image
- mImage = image.forget();
-
- return 0;
-}
-#endif
+// We are subclassed from CameraControlListener, which implements a
+// threadsafe reference-count for us.
+NS_IMPL_QUERY_INTERFACE(MediaEngineGonkVideoSource, nsISupports)
+NS_IMPL_ADDREF_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
+NS_IMPL_RELEASE_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
// Called if the graph thinks it's running out of buffered video; repeat
-// the last frame for whatever minimum period it think it needs. Note that
+// the last frame for whatever minimum period it think it needs. Note that
// this means that no *real* frame can be inserted during this period.
void
-MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
- TrackID aID,
- StreamTime aDesiredTime,
- TrackTicks &aLastEndTime)
+MediaEngineGonkVideoSource::NotifyPull(MediaStreamGraph* aGraph,
+ SourceMediaStream* aSource,
+ TrackID aID,
+ StreamTime aDesiredTime,
+ TrackTicks& aLastEndTime)
{
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
// B2G does AddTrack, but holds kStarted until the hardware changes state.
- // So mState could be kReleased here. We really don't care about the state,
+ // So mState could be kReleased here. We really don't care about the state,
// though.
// Note: we're not giving up mImage here
nsRefPtr<layers::Image> image = mImage;
TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
(int64_t) target, (int64_t) delta, image ? "" : "<null>"));
@@ -167,589 +75,196 @@ MediaEngineWebRTCVideoSource::NotifyPull
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
- return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
- return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
- return std::max(aRange.mMin, std::min(n, aRange.mMax));
-}
-
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
- return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
-}
-
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
- MOZ_ASSERT(AreIntersecting(aA, aB));
- aA.mMin = std::max(aA.mMin, aB.mMin);
- aA.mMax = std::min(aA.mMax, aB.mMax);
- return true;
-}
-
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
- const webrtc::CaptureCapability& aCandidate) {
- if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
- !IsWithin(aCandidate.height, aConstraints.mHeight)) {
- return false;
- }
- if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
- return false;
- }
- return true;
-}
-
void
-MediaEngineWebRTCVideoSource::ChooseCapability(
- const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
+MediaEngineGonkVideoSource::ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs)
{
-#ifdef MOZ_B2G_CAMERA
return GuessCapability(aConstraints, aPrefs);
-#else
- NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
- int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
- if (num <= 0) {
- // Mac doesn't support capabilities.
- return GuessCapability(aConstraints, aPrefs);
- }
-
- // The rest is the full algorithm for cameras that can list their capabilities.
-
- LOG(("ChooseCapability: prefs: %dx%d @%d-%dfps",
- aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
- typedef nsTArray<uint8_t> SourceSet;
-
- SourceSet candidateSet;
- for (int i = 0; i < num; i++) {
- candidateSet.AppendElement(i);
- }
-
- // Pick among capabilities: First apply required constraints.
-
- for (uint32_t i = 0; i < candidateSet.Length();) {
- webrtc::CaptureCapability cap;
- mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
- candidateSet[i], cap);
- if (!SatisfyConstraintSet(aConstraints.mRequired, cap)) {
- candidateSet.RemoveElementAt(i);
- } else {
- ++i;
- }
- }
-
- SourceSet tailSet;
-
- // Then apply advanced (formerly known as optional) constraints.
-
- if (aConstraints.mAdvanced.WasPassed()) {
- auto &array = aConstraints.mAdvanced.Value();
-
- for (uint32_t i = 0; i < array.Length(); i++) {
- SourceSet rejects;
- for (uint32_t j = 0; j < candidateSet.Length();) {
- webrtc::CaptureCapability cap;
- mViECapture->GetCaptureCapability(uniqueId.get(), kMaxUniqueIdLength,
- candidateSet[j], cap);
- if (!SatisfyConstraintSet(array[i], cap)) {
- rejects.AppendElement(candidateSet[j]);
- candidateSet.RemoveElementAt(j);
- } else {
- ++j;
- }
- }
- (candidateSet.Length()? tailSet : candidateSet).MoveElementsFrom(rejects);
- }
- }
-
- if (!candidateSet.Length()) {
- candidateSet.AppendElement(0);
- }
-
- int prefWidth = aPrefs.GetWidth();
- int prefHeight = aPrefs.GetHeight();
-
- // Default is closest to available capability but equal to or below;
- // otherwise closest above. Since we handle the num=0 case above and
- // take the first entry always, we can never exit uninitialized.
-
- webrtc::CaptureCapability cap;
- bool higher = true;
- for (uint32_t i = 0; i < candidateSet.Length(); i++) {
- mViECapture->GetCaptureCapability(NS_ConvertUTF16toUTF8(mUniqueId).get(),
- kMaxUniqueIdLength, candidateSet[i], cap);
- if (higher) {
- if (i == 0 ||
- (mCapability.width > cap.width && mCapability.height > cap.height)) {
- // closer than the current choice
- mCapability = cap;
- // FIXME: expose expected capture delay?
- }
- if (cap.width <= (uint32_t) prefWidth && cap.height <= (uint32_t) prefHeight) {
- higher = false;
- }
- } else {
- if (cap.width > (uint32_t) prefWidth || cap.height > (uint32_t) prefHeight ||
- cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
- continue;
- }
- if (mCapability.width < cap.width && mCapability.height < cap.height) {
- mCapability = cap;
- // FIXME: expose expected capture delay?
- }
- }
- // Same resolution, maybe better format or FPS match
- if (mCapability.width == cap.width && mCapability.height == cap.height) {
- // FPS too low
- if (cap.maxFPS < (uint32_t) aPrefs.mMinFPS) {
- continue;
- }
- // Better match
- if (cap.maxFPS < mCapability.maxFPS) {
- mCapability = cap;
- } else if (cap.maxFPS == mCapability.maxFPS) {
- // Resolution and FPS the same, check format
- if (cap.rawType == webrtc::RawVideoType::kVideoI420
- || cap.rawType == webrtc::RawVideoType::kVideoYUY2
- || cap.rawType == webrtc::RawVideoType::kVideoYV12) {
- mCapability = cap;
- }
- }
- }
- }
- LOG(("chose cap %dx%d @%dfps codec %d raw %d",
- mCapability.width, mCapability.height, mCapability.maxFPS,
- mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
- const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
-{
- LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
- aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
- // In short: compound constraint-ranges and use pref as ideal.
-
- ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
- ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
-
- if (aConstraints.mAdvanced.WasPassed()) {
- const auto& advanced = aConstraints.mAdvanced.Value();
- for (uint32_t i = 0; i < advanced.Length(); i++) {
- if (AreIntersecting(cWidth, advanced[i].mWidth) &&
- AreIntersecting(cHeight, advanced[i].mHeight)) {
- Intersect(cWidth, advanced[i].mWidth);
- Intersect(cHeight, advanced[i].mHeight);
- }
- }
- }
- // Detect Mac HD cams and give them some love in the form of a dynamic default
- // since that hardware switches between 4:3 at low res and 16:9 at higher res.
- //
- // Logic is: if we're relying on defaults in aPrefs, then
- // only use HD pref when non-HD pref is too small and HD pref isn't too big.
-
- bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
- mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
- (aPrefs.GetWidth() < cWidth.mMin ||
- aPrefs.GetHeight() < cHeight.mMin) &&
- !(aPrefs.GetWidth(true) > cWidth.mMax ||
- aPrefs.GetHeight(true) > cHeight.mMax));
- int prefWidth = aPrefs.GetWidth(macHD);
- int prefHeight = aPrefs.GetHeight(macHD);
-
- // Clamp width and height without distorting inherent aspect too much.
-
- if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
- // If both are within, we get the default (pref) aspect.
- // If neither are within, we get the aspect of the enclosing constraint.
- // Either are presumably reasonable (presuming constraints are sane).
- mCapability.width = Clamp(prefWidth, cWidth);
- mCapability.height = Clamp(prefHeight, cHeight);
- } else {
- // But if only one clips (e.g. width), the resulting skew is undesirable:
- // .------------.
- // | constraint |
- // .----+------------+----.
- // | | | |
- // |pref| result | | prefAspect != resultAspect
- // | | | |
- // '----+------------+----'
- // '------------'
- // So in this case, preserve prefAspect instead:
- // .------------.
- // | constraint |
- // .------------.
- // |pref | prefAspect is unchanged
- // '------------'
- // | |
- // '------------'
- if (IsWithin(prefWidth, cWidth)) {
- mCapability.height = Clamp(prefHeight, cHeight);
- mCapability.width = Clamp((mCapability.height * prefWidth) /
- prefHeight, cWidth);
- } else {
- mCapability.width = Clamp(prefWidth, cWidth);
- mCapability.height = Clamp((mCapability.width * prefHeight) /
- prefWidth, cHeight);
- }
- }
- mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
- LOG(("chose cap %dx%d @%dfps",
- mCapability.width, mCapability.height, mCapability.maxFPS));
-}
-
-void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
-{
- aName = mDeviceName;
-}
-
-void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
-{
- aUUID = mUniqueId;
}
nsresult
-MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
+MediaEngineGonkVideoSource::Allocate(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs)
{
LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
+
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kReleased && mInitDone) {
ChooseCapability(aConstraints, aPrefs);
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::AllocImpl));
+ NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+ &MediaEngineGonkVideoSource::AllocImpl));
mCallbackMonitor.Wait();
if (mState != kAllocated) {
return NS_ERROR_FAILURE;
}
}
-#else
- if (mState == kReleased && mInitDone) {
- // Note: if shared, we don't allow a later opener to affect the resolution.
- // (This may change depending on spec changes for Constraints/settings)
-
- ChooseCapability(aConstraints, aPrefs);
-
- if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
- kMaxUniqueIdLength, mCaptureIndex)) {
- return NS_ERROR_FAILURE;
- }
- mState = kAllocated;
- LOG(("Video device %d allocated", mCaptureIndex));
- } else if (mSources.IsEmpty()) {
- LOG(("Video device %d reallocated", mCaptureIndex));
- } else {
- LOG(("Video device %d allocated shared", mCaptureIndex));
- }
-#endif
return NS_OK;
}
nsresult
-MediaEngineWebRTCVideoSource::Deallocate()
+MediaEngineGonkVideoSource::Deallocate()
{
LOG((__FUNCTION__));
if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
+
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
if (mState != kStopped && mState != kAllocated) {
return NS_ERROR_FAILURE;
}
-#ifdef MOZ_B2G_CAMERA
+
// We do not register success callback here
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::DeallocImpl));
+ NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+ &MediaEngineGonkVideoSource::DeallocImpl));
mCallbackMonitor.Wait();
if (mState != kReleased) {
return NS_ERROR_FAILURE;
}
-#elif XP_MACOSX
- // Bug 829907 - on mac, in shutdown, the mainthread stops processing
- // 'native' events, and the QTKit code uses events to the main native CFRunLoop
- // in order to provide thread safety. In order to avoid this locking us up,
- // release the ViE capture device synchronously on MainThread (so the native
- // event isn't needed).
- // XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
- // XXX It might be nice to only do this if we're in shutdown... Hard to be
- // sure when that is though.
- // Thread safety: a) we call this synchronously, and don't use ViECapture from
- // another thread anywhere else, b) ViEInputManager::DestroyCaptureDevice() grabs
- // an exclusive object lock and deletes it in a critical section, so all in all
- // this should be safe threadwise.
- NS_DispatchToMainThread(WrapRunnable(mViECapture,
- &webrtc::ViECapture::ReleaseCaptureDevice,
- mCaptureIndex),
- NS_DISPATCH_SYNC);
-#else
- mViECapture->ReleaseCaptureDevice(mCaptureIndex);
-#endif
+
mState = kReleased;
LOG(("Video device %d deallocated", mCaptureIndex));
} else {
LOG(("Video device %d deallocated but still in use", mCaptureIndex));
}
return NS_OK;
}
nsresult
-MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
+MediaEngineGonkVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
- int error = 0;
-#endif
if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE;
}
mSources.AppendElement(aStream);
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
-#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
if (mState == kStarted) {
return NS_OK;
}
mImageContainer = layers::LayerManager::CreateImageContainer();
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StartImpl,
+ NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+ &MediaEngineGonkVideoSource::StartImpl,
mCapability));
mCallbackMonitor.Wait();
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
-#else
- mState = kStarted;
- error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
- if (error == -1) {
- return NS_ERROR_FAILURE;
- }
-
- error = mViERender->StartRender(mCaptureIndex);
- if (error == -1) {
- return NS_ERROR_FAILURE;
- }
-
- if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
- return NS_ERROR_FAILURE;
- }
-#endif
return NS_OK;
}
nsresult
-MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
+MediaEngineGonkVideoSource::Stop(SourceMediaStream* aSource, TrackID aID)
{
LOG((__FUNCTION__));
if (!mSources.RemoveElement(aSource)) {
// Already stopped - this is allowed
return NS_OK;
}
if (!mSources.IsEmpty()) {
return NS_OK;
}
-#ifdef MOZ_B2G_CAMERA
+
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
{
MonitorAutoLock lock(mMonitor);
mState = kStopped;
aSource->EndTrack(aID);
// Drop any cached image so we don't start with a stale image on next
// usage
mImage = nullptr;
}
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StopImpl));
-#else
- mViERender->StopRender(mCaptureIndex);
- mViERender->RemoveRenderer(mCaptureIndex);
- mViECapture->StopCapture(mCaptureIndex);
-#endif
+
+ NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+ &MediaEngineGonkVideoSource::StopImpl));
return NS_OK;
}
-void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
-{
- LOG((__FUNCTION__));
- mHasDirectListeners = aHasDirectListeners;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- return NS_ERROR_NOT_IMPLEMENTED;
-}
-
/**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
+* Initialization and Shutdown functions for the video source, called by the
+* constructor and destructor respectively.
+*/
void
-MediaEngineWebRTCVideoSource::Init()
+MediaEngineGonkVideoSource::Init()
{
-#ifdef MOZ_B2G_CAMERA
nsAutoCString deviceName;
ICameraControl::GetCameraName(mCaptureIndex, deviceName);
CopyUTF8toUTF16(deviceName, mDeviceName);
CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
- // fix compile warning for these being unused. (remove once used)
- (void) mFps;
- (void) mMinFps;
-
- LOG((__FUNCTION__));
- if (mVideoEngine == nullptr) {
- return;
- }
-
- mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine);
- if (mViEBase == nullptr) {
- return;
- }
-
- // Get interfaces for capture, render for now
- mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine);
- mViERender = webrtc::ViERender::GetInterface(mVideoEngine);
-
- if (mViECapture == nullptr || mViERender == nullptr) {
- return;
- }
-
- char deviceName[kMaxDeviceNameLength];
- char uniqueId[kMaxUniqueIdLength];
- if (mViECapture->GetCaptureDevice(mCaptureIndex,
- deviceName, kMaxDeviceNameLength,
- uniqueId, kMaxUniqueIdLength)) {
- return;
- }
-
- CopyUTF8toUTF16(deviceName, mDeviceName);
- CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
mInitDone = true;
}
void
-MediaEngineWebRTCVideoSource::Shutdown()
+MediaEngineGonkVideoSource::Shutdown()
{
LOG((__FUNCTION__));
if (!mInitDone) {
return;
}
-#ifdef MOZ_B2G_CAMERA
+
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
+
if (mState == kStarted) {
while (!mSources.IsEmpty()) {
Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
}
MOZ_ASSERT(mState == kStopped);
}
if (mState == kAllocated || mState == kStopped) {
Deallocate();
}
-#ifndef MOZ_B2G_CAMERA
- mViECapture->Release();
- mViERender->Release();
- mViEBase->Release();
-#endif
+
mState = kReleased;
mInitDone = false;
}
-void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
- // NOTE: mCaptureIndex might have changed when allocated!
- // Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
- // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
- // Caller looked up this source by uniqueId, so it shouldn't change
- char deviceName[kMaxDeviceNameLength];
- char uniqueId[kMaxUniqueIdLength];
-
- if (mViECapture->GetCaptureDevice(aIndex,
- deviceName, sizeof(deviceName),
- uniqueId, sizeof(uniqueId))) {
- return;
- }
-
- CopyUTF8toUTF16(deviceName, mDeviceName);
-#ifdef DEBUG
- nsString temp;
- CopyUTF8toUTF16(uniqueId, temp);
- MOZ_ASSERT(temp.Equals(mUniqueId));
-#endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
// All these functions must be run on MainThread!
void
-MediaEngineWebRTCVideoSource::AllocImpl() {
+MediaEngineGonkVideoSource::AllocImpl() {
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCameraControl = ICameraControl::Create(mCaptureIndex);
if (mCameraControl) {
mState = kAllocated;
// Add this as a listener for CameraControl events. We don't need
// to explicitly remove this--destroying the CameraControl object
// in DeallocImpl() will do that for us.
mCameraControl->AddListener(this);
}
-
mCallbackMonitor.Notify();
}
void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
+MediaEngineGonkVideoSource::DeallocImpl() {
MOZ_ASSERT(NS_IsMainThread());
mCameraControl = nullptr;
}
// The same algorithm from bug 840244
static int
GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
@@ -770,96 +285,91 @@ GetRotateAmount(ScreenOrientation aScree
default:
MOZ_ASSERT(false);
break;
}
int result;
if (aBackCamera) {
- //back camera
+ // back camera
result = (aCameraMountAngle - screenAngle + 360) % 360;
} else {
- //front camera
+ // front camera
result = (aCameraMountAngle + screenAngle) % 360;
}
return result;
}
// undefine to remove on-the-fly rotation support
#define DYNAMIC_GUM_ROTATION
void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
+MediaEngineGonkVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
#ifdef DYNAMIC_GUM_ROTATION
if (mHasDirectListeners) {
// aka hooked to PeerConnection
MonitorAutoLock enter(mMonitor);
mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
#endif
mOrientationChanged = true;
}
void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
+MediaEngineGonkVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
MOZ_ASSERT(NS_IsMainThread());
ICameraControl::Configuration config;
config.mMode = ICameraControl::kPictureMode;
config.mPreviewSize.width = aCapability.width;
config.mPreviewSize.height = aCapability.height;
mCameraControl->Start(&config);
mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
hal::RegisterScreenConfigurationObserver(this);
}
void
-MediaEngineWebRTCVideoSource::StopImpl() {
+MediaEngineGonkVideoSource::StopImpl() {
MOZ_ASSERT(NS_IsMainThread());
hal::UnregisterScreenConfigurationObserver(this);
mCameraControl->Stop();
}
void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
- MOZ_ASSERT(NS_IsMainThread());
- mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
+MediaEngineGonkVideoSource::OnHardwareStateChange(HardwareState aState)
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (aState == CameraControlListener::kHardwareClosed) {
// When the first CameraControl listener is added, it gets pushed
// the current state of the camera--normally 'closed'. We only
// pay attention to that state if we've progressed out of the
// allocated state.
if (mState != kAllocated) {
mState = kReleased;
mCallbackMonitor.Notify();
}
} else {
// Can't read this except on MainThread (ugh)
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::GetRotation));
+ NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
+ &MediaEngineGonkVideoSource::GetRotation));
mState = kStarted;
mCallbackMonitor.Notify();
}
}
+
void
-MediaEngineWebRTCVideoSource::GetRotation()
+MediaEngineGonkVideoSource::GetRotation()
{
MOZ_ASSERT(NS_IsMainThread());
MonitorAutoLock enter(mMonitor);
mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
mCameraAngle == 270);
hal::ScreenConfiguration config;
@@ -872,17 +382,17 @@ MediaEngineWebRTCVideoSource::GetRotatio
}
mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
+MediaEngineGonkVideoSource::OnUserError(UserContext aContext, nsresult aError)
{
{
// Scope the monitor, since there is another monitor below and we don't want
// unexpected deadlock.
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCallbackMonitor.Notify();
}
@@ -916,17 +426,17 @@ MediaEngineWebRTCVideoSource::OnUserErro
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
}
}
}
void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
+MediaEngineGonkVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
{
// It needs to start preview because Gonk camera will stop preview while
// taking picture.
mCameraControl->StartPreview();
// Create a main thread runnable to generate a blob and call all current queued
// PhotoCallbacks.
class GenerateBlobRunnable : public nsRunnable {
@@ -965,18 +475,80 @@ MediaEngineWebRTCVideoSource::OnTakePict
// PhotoCallbacks in this runnable.
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(
new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
}
}
+nsresult
+MediaEngineGonkVideoSource::TakePhoto(PhotoCallback* aCallback)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ MonitorAutoLock lock(mMonitor);
+
+ // If other callback exists, that means there is a captured picture on the way,
+ // it doesn't need to TakePicture() again.
+ if (!mPhotoCallbacks.Length()) {
+ nsresult rv;
+ if (mOrientationChanged) {
+ UpdatePhotoOrientation();
+ }
+ rv = mCameraControl->TakePicture();
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ }
+
+ mPhotoCallbacks.AppendElement(aCallback);
+
+ return NS_OK;
+}
+
+nsresult
+MediaEngineGonkVideoSource::UpdatePhotoOrientation()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ hal::ScreenConfiguration config;
+ hal::GetCurrentScreenConfiguration(&config);
+
+ // The rotation angle is clockwise.
+ int orientation = 0;
+ switch (config.orientation()) {
+ case eScreenOrientation_PortraitPrimary:
+ orientation = 0;
+ break;
+ case eScreenOrientation_PortraitSecondary:
+ orientation = 180;
+ break;
+ case eScreenOrientation_LandscapePrimary:
+ orientation = 270;
+ break;
+ case eScreenOrientation_LandscapeSecondary:
+ orientation = 90;
+ break;
+ }
+
+ // Front camera is inverse angle comparing to back camera.
+ orientation = (mBackCamera ? orientation : (-orientation));
+
+ ICameraControlParameterSetAutoEnter batch(mCameraControl);
+ // It changes the orientation value in EXIF information only.
+ mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
+
+ mOrientationChanged = false;
+
+ return NS_OK;
+}
+
uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
+MediaEngineGonkVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
{
switch (aFormat) {
case HAL_PIXEL_FORMAT_RGBA_8888:
return libyuv::FOURCC_BGRA;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
return libyuv::FOURCC_NV21;
case HAL_PIXEL_FORMAT_YV12:
return libyuv::FOURCC_YV12;
@@ -984,17 +556,17 @@ MediaEngineWebRTCVideoSource::ConvertPix
LOG((" xxxxx Unknown pixel format %d", aFormat));
MOZ_ASSERT(false, "Unknown pixel format.");
return libyuv::FOURCC_ANY;
}
}
}
void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
+MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
void *pMem = nullptr;
uint32_t size = aWidth * aHeight * 3 / 2;
graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
@@ -1044,17 +616,17 @@ MediaEngineWebRTCVideoSource::RotateImag
videoImage->SetDataNoCopy(data);
// implicitly releases last image
mImage = image.forget();
}
bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
+MediaEngineGonkVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kStopped) {
return false;
}
}
MonitorAutoLock enter(mMonitor);
@@ -1069,73 +641,9 @@ MediaEngineWebRTCVideoSource::OnNewPrevi
mWidth = aWidth;
mHeight = aHeight;
LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
}
return true; // return true because we're accepting the frame
}
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- MonitorAutoLock lock(mMonitor);
-
- // If other callback exists, that means there is a captured picture on the way,
- // it doesn't need to TakePicture() again.
- if (!mPhotoCallbacks.Length()) {
- nsresult rv;
- if (mOrientationChanged) {
- UpdatePhotoOrientation();
- }
- rv = mCameraControl->TakePicture();
- if (NS_FAILED(rv)) {
- return rv;
- }
- }
-
- mPhotoCallbacks.AppendElement(aCallback);
-
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- hal::ScreenConfiguration config;
- hal::GetCurrentScreenConfiguration(&config);
-
- // The rotation angle is clockwise.
- int orientation = 0;
- switch (config.orientation()) {
- case eScreenOrientation_PortraitPrimary:
- orientation = 0;
- break;
- case eScreenOrientation_PortraitSecondary:
- orientation = 180;
- break;
- case eScreenOrientation_LandscapePrimary:
- orientation = 270;
- break;
- case eScreenOrientation_LandscapeSecondary:
- orientation = 90;
- break;
- }
-
- // Front camera is inverse angle comparing to back camera.
- orientation = (mBackCamera ? orientation : (-orientation));
-
- ICameraControlParameterSetAutoEnter batch(mCameraControl);
- // It changes the orientation value in EXIF information only.
- mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
- mOrientationChanged = false;
-
- return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
copy from content/media/webrtc/MediaEngineWebRTC.h
copy to content/media/webrtc/MediaEngineGonkVideoSource.h
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineGonkVideoSource.h
@@ -1,462 +1,114 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
-#ifndef MEDIAENGINEWEBRTC_H_
-#define MEDIAENGINEWEBRTC_H_
-
-#include "prcvar.h"
-#include "prthread.h"
-#include "nsIThread.h"
-#include "nsIRunnable.h"
-
-#include "mozilla/dom/File.h"
-#include "mozilla/Mutex.h"
-#include "mozilla/Monitor.h"
-#include "nsCOMPtr.h"
-#include "nsThreadUtils.h"
-#include "DOMMediaStream.h"
-#include "nsDirectoryServiceDefs.h"
-#include "nsComponentManagerUtils.h"
-#include "nsRefPtrHashtable.h"
-
-#include "VideoUtils.h"
-#include "MediaEngine.h"
-#include "VideoSegment.h"
-#include "AudioSegment.h"
-#include "StreamBuffer.h"
-#include "MediaStreamGraph.h"
+#ifndef MediaEngineGonkVideoSource_h_
+#define MediaEngineGonkVideoSource_h_
-#include "MediaEngineWrapper.h"
-#include "mozilla/dom/MediaStreamTrackBinding.h"
-// WebRTC library includes follow
-#include "webrtc/common.h"
-// Audio Engine
-#include "webrtc/voice_engine/include/voe_base.h"
-#include "webrtc/voice_engine/include/voe_codec.h"
-#include "webrtc/voice_engine/include/voe_hardware.h"
-#include "webrtc/voice_engine/include/voe_network.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_volume_control.h"
-#include "webrtc/voice_engine/include/voe_external_media.h"
-#include "webrtc/voice_engine/include/voe_audio_processing.h"
-#include "webrtc/voice_engine/include/voe_call_report.h"
-
-// Video Engine
-// conflicts with #include of scoped_ptr.h
-#undef FF
-#include "webrtc/video_engine/include/vie_base.h"
-#include "webrtc/video_engine/include/vie_codec.h"
-#include "webrtc/video_engine/include/vie_render.h"
-#include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
+#ifndef MOZ_B2G_CAMERA
+#error MediaEngineGonkVideoSource is only available when MOZ_B2G_CAMERA is defined.
#endif
-#include "NullTransport.h"
-#include "AudioOutputObserver.h"
+#include "CameraControlListener.h"
+#include "MediaEngineCameraVideoSource.h"
+
+#include "mozilla/Hal.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/dom/File.h"
namespace mozilla {
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
/**
- * The WebRTC implementation of the MediaEngine interface.
+ * The B2G implementation of the MediaEngine interface.
*
* On B2G platform, member data may accessed from different thread after construction:
*
* MediaThread:
- * mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- * mImageContainer, mSources, mState, mImage
+ * mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
+ * mSources, mImageContainer, mSources, mState, mImage, mLastCapture.
*
- * MainThread:
- * mCaptureIndex, mLastCapture, mState, mWidth, mHeight,
+ * CameraThread:
+ * mDOMCameraControl, mCaptureIndex, mCameraThread, mWindowId, mCameraManager,
+ * mNativeCameraControl, mPreviewStream, mState, mLastCapture, mWidth, mHeight
*
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- * mState is protected by mCallbackMonitor
+ * Where mWidth, mHeight, mImage, mPhotoCallbacks, mRotation, mCameraAngle and
+ * mBackCamera are protected by mMonitor (in parent MediaEngineCameraVideoSource)
+ * mState, mLastCapture is protected by mCallbackMonitor
* Other variable is accessed only from single thread
*/
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
- , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
- , public CameraControlListener
- , public mozilla::hal::ScreenConfigurationObserver
-#else
- , public webrtc::ExternalRenderer
-#endif
+class MediaEngineGonkVideoSource : public MediaEngineCameraVideoSource
+ , public mozilla::hal::ScreenConfigurationObserver
+ , public CameraControlListener
{
public:
-#ifdef MOZ_B2G_CAMERA
- MediaEngineWebRTCVideoSource(int aIndex,
- MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mCameraControl(nullptr)
- , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
+ NS_DECL_ISUPPORTS_INHERITED
+
+ MediaEngineGonkVideoSource(int aIndex)
+ : MediaEngineCameraVideoSource(aIndex, "GonkCamera.Monitor")
+ , mCameraControl(nullptr)
+ , mCallbackMonitor("GonkCamera.CallbackMonitor")
, mRotation(0)
, mBackCamera(false)
, mOrientationChanged(true) // Correct the orientation at first time takePhoto.
- , mCaptureIndex(aIndex)
- , mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
- , mWidth(0)
- , mHeight(0)
- , mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr)
- {
- mState = kReleased;
- Init();
- }
-#else
- // ViEExternalRenderer.
- virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
- virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
- void *handle);
- /**
- * Does DeliverFrame() support a null buffer and non-null handle
- * (video texture)?
- * XXX Investigate! Especially for Android/B2G
- */
- virtual bool IsTextureSupported() { return false; }
+ {
+ Init();
+ }
- MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
- MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mVideoEngine(aVideoEnginePtr)
- , mCaptureIndex(aIndex)
- , mFps(-1)
- , mMinFps(-1)
- , mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
- , mWidth(0)
- , mHeight(0)
- , mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr) {
- MOZ_ASSERT(aVideoEnginePtr);
- mState = kReleased;
- Init();
- }
-#endif
-
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
- virtual nsresult Deallocate();
- virtual nsresult Start(SourceMediaStream*, TrackID);
- virtual nsresult Stop(SourceMediaStream*, TrackID);
- virtual void SetDirectListeners(bool aHasListeners);
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
- virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
- bool aAgcOn, uint32_t aAGC,
- bool aNoiseOn, uint32_t aNoise,
- int32_t aPlayoutDelay) { return NS_OK; };
+ const MediaEnginePrefs &aPrefs) MOZ_OVERRIDE;
+ virtual nsresult Deallocate() MOZ_OVERRIDE;
+ virtual nsresult Start(SourceMediaStream* aStream, TrackID aID) MOZ_OVERRIDE;
+ virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID) MOZ_OVERRIDE;
virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
+ SourceMediaStream* aSource,
TrackID aId,
StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
-
- virtual bool IsFake() {
- return false;
- }
-
- virtual const MediaSourceType GetMediaSource() {
- return mMediaSource;
- }
-
-#ifndef MOZ_B2G_CAMERA
- NS_DECL_THREADSAFE_ISUPPORTS
-
- nsresult TakePhoto(PhotoCallback* aCallback)
- {
- return NS_ERROR_NOT_IMPLEMENTED;
- }
-#else
- // We are subclassed from CameraControlListener, which implements a
- // threadsafe reference-count for us.
- NS_DECL_ISUPPORTS_INHERITED
+ TrackTicks& aLastEndTime) MOZ_OVERRIDE;
void OnHardwareStateChange(HardwareState aState);
void GetRotation();
bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
void OnUserError(UserContext aContext, nsresult aError);
void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
void AllocImpl();
void DeallocImpl();
void StartImpl(webrtc::CaptureCapability aCapability);
void StopImpl();
- void SnapshotImpl();
+ uint32_t ConvertPixelFormatToFOURCC(int aFormat);
void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
- uint32_t ConvertPixelFormatToFOURCC(int aFormat);
void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
// It sets the correct photo orientation via camera parameter according to
// current screen orientation.
nsresult UpdatePhotoOrientation();
-#endif
-
- // This runnable is for creating a temporary file on the main thread.
- NS_IMETHODIMP
- Run()
+protected:
+ ~MediaEngineGonkVideoSource()
{
- nsCOMPtr<nsIFile> tmp;
- nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
- NS_ENSURE_SUCCESS(rv, rv);
-
- tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
- rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
- NS_ENSURE_SUCCESS(rv, rv);
-
- mSnapshotPath = new nsString();
- rv = tmp->GetPath(*mSnapshotPath);
- NS_ENSURE_SUCCESS(rv, rv);
-
- return NS_OK;
+ Shutdown();
}
-
- void Refresh(int aIndex);
-
-protected:
- ~MediaEngineWebRTCVideoSource() { Shutdown(); }
-
-private:
// Initialize the needed Video engine interfaces.
void Init();
void Shutdown();
+ void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs);
- // Engine variables.
-#ifdef MOZ_B2G_CAMERA
mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
// This is only modified on MainThread (AllocImpl and DeallocImpl)
nsRefPtr<ICameraControl> mCameraControl;
nsCOMPtr<nsIDOMFile> mLastCapture;
+
+ // These are protected by mMonitor in parent class
nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
- // These are protected by mMonitor below
int mRotation;
int mCameraAngle; // See dom/base/ScreenOrientation.h
bool mBackCamera;
bool mOrientationChanged; // True when screen rotates.
-#else
- webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
- webrtc::ViEBase* mViEBase;
- webrtc::ViECapture* mViECapture;
- webrtc::ViERender* mViERender;
-#endif
- webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
-
- int mCaptureIndex;
- int mFps; // Track rate (30 fps by default)
- int mMinFps; // Min rate we want to accept
- MediaSourceType mMediaSource; // source of media (camera | application | screen)
-
- // mMonitor protects mImage access/changes, and transitions of mState
- // from kStarted to kStopped (which are combined with EndTrack() and
- // image changes). Note that mSources is not accessed from other threads
- // for video and is not protected.
- Monitor mMonitor; // Monitor for processing WebRTC frames.
- int mWidth, mHeight;
- nsRefPtr<layers::Image> mImage;
- nsRefPtr<layers::ImageContainer> mImageContainer;
- bool mHasDirectListeners;
-
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-
- bool mInitDone;
- bool mInSnapshotMode;
- nsString* mSnapshotPath;
-
- nsString mDeviceName;
- nsString mUniqueId;
-
- void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
-
- void GuessCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
};
-class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
- public webrtc::VoEMediaProcess
-{
-public:
- MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
- int aIndex, const char* name, const char* uuid)
- : mSamples(0)
- , mVoiceEngine(aVoiceEnginePtr)
- , mMonitor("WebRTCMic.Monitor")
- , mThread(aThread)
- , mCapIndex(aIndex)
- , mChannel(-1)
- , mInitDone(false)
- , mStarted(false)
- , mEchoOn(false), mAgcOn(false), mNoiseOn(false)
- , mEchoCancel(webrtc::kEcDefault)
- , mAGC(webrtc::kAgcDefault)
- , mNoiseSuppress(webrtc::kNsDefault)
- , mPlayoutDelay(0)
- , mNullTransport(nullptr) {
- MOZ_ASSERT(aVoiceEnginePtr);
- mState = kReleased;
- mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
- mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
- Init();
- }
-
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
-
- virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
- virtual nsresult Deallocate();
- virtual nsresult Start(SourceMediaStream*, TrackID);
- virtual nsresult Stop(SourceMediaStream*, TrackID);
- virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
- virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
- bool aAgcOn, uint32_t aAGC,
- bool aNoiseOn, uint32_t aNoise,
- int32_t aPlayoutDelay);
-
- virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
- TrackID aId,
- StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
-
- virtual bool IsFake() {
- return false;
- }
-
- virtual const MediaSourceType GetMediaSource() {
- return MediaSourceType::Microphone;
- }
-
- virtual nsresult TakePhoto(PhotoCallback* aCallback)
- {
- return NS_ERROR_NOT_IMPLEMENTED;
- }
-
- // VoEMediaProcess.
- void Process(int channel, webrtc::ProcessingTypes type,
- int16_t audio10ms[], int length,
- int samplingFreq, bool isStereo);
-
- NS_DECL_THREADSAFE_ISUPPORTS
-
-protected:
- ~MediaEngineWebRTCAudioSource() { Shutdown(); }
-
- // mSamples is an int to avoid conversions when comparing/etc to
- // samplingFreq & length. Making mSamples protected instead of private is a
- // silly way to avoid -Wunused-private-field warnings when PR_LOGGING is not
- // #defined. mSamples is not actually expected to be used by a derived class.
- int mSamples;
-
-private:
- void Init();
- void Shutdown();
+} // namespace mozilla
- webrtc::VoiceEngine* mVoiceEngine;
- ScopedCustomReleasePtr<webrtc::VoEBase> mVoEBase;
- ScopedCustomReleasePtr<webrtc::VoEExternalMedia> mVoERender;
- ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
- ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
- ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
-
- // mMonitor protects mSources[] access/changes, and transitions of mState
- // from kStarted to kStopped (which are combined with EndTrack()).
- // mSources[] is accessed from webrtc threads.
- Monitor mMonitor;
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
- nsCOMPtr<nsIThread> mThread;
- int mCapIndex;
- int mChannel;
- TrackID mTrackID;
- bool mInitDone;
- bool mStarted;
-
- nsString mDeviceName;
- nsString mDeviceUUID;
-
- bool mEchoOn, mAgcOn, mNoiseOn;
- webrtc::EcModes mEchoCancel;
- webrtc::AgcModes mAGC;
- webrtc::NsModes mNoiseSuppress;
- int32_t mPlayoutDelay;
-
- NullTransport *mNullTransport;
-};
-
-class MediaEngineWebRTC : public MediaEngine
-{
-public:
- explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
-
- // Clients should ensure to clean-up sources video/audio sources
- // before invoking Shutdown on this class.
- void Shutdown();
-
- virtual void EnumerateVideoDevices(MediaSourceType,
- nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
- virtual void EnumerateAudioDevices(MediaSourceType,
- nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
-private:
- ~MediaEngineWebRTC() {
- Shutdown();
-#ifdef MOZ_B2G_CAMERA
- AsyncLatencyLogger::Get()->Release();
-#endif
- gFarendObserver = nullptr;
- }
-
- nsCOMPtr<nsIThread> mThread;
-
- Mutex mMutex;
-
- // protected with mMutex:
- webrtc::VideoEngine* mScreenEngine;
- webrtc::VideoEngine* mBrowserEngine;
- webrtc::VideoEngine* mWinEngine;
- webrtc::VideoEngine* mAppEngine;
- webrtc::VideoEngine* mVideoEngine;
- webrtc::VoiceEngine* mVoiceEngine;
-
- // specialized configurations
- webrtc::Config mAppEngineConfig;
- webrtc::Config mWinEngineConfig;
- webrtc::Config mScreenEngineConfig;
- webrtc::Config mBrowserEngineConfig;
-
- // Need this to avoid unneccesary WebRTC calls while enumerating.
- bool mVideoEngineInit;
- bool mAudioEngineInit;
- bool mScreenEngineInit;
- bool mBrowserEngineInit;
- bool mWinEngineInit;
- bool mAppEngineInit;
- bool mHasTabVideoSource;
-
- // Store devices we've already seen in a hashtable for quick return.
- // Maps UUID to MediaEngineSource (one set for audio, one for video).
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
-};
-
-}
-
-#endif /* NSMEDIAENGINEWEBRTC_H_ */
+#endif // MediaEngineGonkVideoSource_h_
--- a/content/media/webrtc/MediaEngineTabVideoSource.cpp
+++ b/content/media/webrtc/MediaEngineTabVideoSource.cpp
@@ -184,22 +184,16 @@ MediaEngineTabVideoSource::Start(mozilla
runnable = new StartRunnable(this);
NS_DispatchToMainThread(runnable);
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
return NS_OK;
}
-nsresult
-MediaEngineTabVideoSource::Snapshot(uint32_t, nsIDOMFile**)
-{
- return NS_OK;
-}
-
void
MediaEngineTabVideoSource::
NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime)
{
VideoSegment segment;
MonitorAutoLock mon(mMonitor);
// Note: we're not giving up mImage here
--- a/content/media/webrtc/MediaEngineTabVideoSource.h
+++ b/content/media/webrtc/MediaEngineTabVideoSource.h
@@ -20,17 +20,16 @@ class MediaEngineTabVideoSource : public
virtual void GetName(nsAString_internal&);
virtual void GetUUID(nsAString_internal&);
virtual nsresult Allocate(const VideoTrackConstraintsN &,
const mozilla::MediaEnginePrefs&);
virtual nsresult Deallocate();
virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t, nsIDOMFile**);
virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&);
virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID);
virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t);
virtual bool IsFake();
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Browser;
}
--- a/content/media/webrtc/MediaEngineWebRTC.cpp
+++ b/content/media/webrtc/MediaEngineWebRTC.cpp
@@ -26,16 +26,21 @@ GetUserMediaLog()
#include "nsITabSource.h"
#include "MediaTrackConstraints.h"
#ifdef MOZ_WIDGET_ANDROID
#include "AndroidJNIWrapper.h"
#include "AndroidBridge.h"
#endif
+#ifdef MOZ_B2G_CAMERA
+#include "ICameraControl.h"
+#include "MediaEngineGonkVideoSource.h"
+#endif
+
#undef LOG
#define LOG(args) PR_LOG(GetUserMediaLog(), PR_LOG_DEBUG, args)
namespace mozilla {
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
: mMutex("mozilla::MediaEngineWebRTC")
, mScreenEngine(nullptr)
@@ -68,17 +73,17 @@ MediaEngineWebRTC::MediaEngineWebRTC(Med
void
MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources)
{
// We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex);
- #ifdef MOZ_B2G_CAMERA
+#ifdef MOZ_B2G_CAMERA
if (aMediaSource != MediaSourceType::Camera) {
// only supports camera sources
return;
}
/**
* We still enumerate every time, in case a new device was plugged in since
* the last call. TODO: Verify that WebRTC actually does deal with hotplugging
@@ -96,23 +101,23 @@ MediaEngineWebRTC::EnumerateVideoDevices
for (int i = 0; i < num; i++) {
nsCString cameraName;
result = ICameraControl::GetCameraName(i, cameraName);
if (result != NS_OK) {
continue;
}
- nsRefPtr<MediaEngineWebRTCVideoSource> vSource;
+ nsRefPtr<MediaEngineVideoSource> vSource;
NS_ConvertUTF8toUTF16 uuid(cameraName);
if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
// We've already seen this device, just append.
aVSources->AppendElement(vSource.get());
} else {
- vSource = new MediaEngineWebRTCVideoSource(i, aMediaSource);
+ vSource = new MediaEngineGonkVideoSource(i);
mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
aVSources->AppendElement(vSource);
}
}
return;
#else
ScopedCustomReleasePtr<webrtc::ViEBase> ptrViEBase;
@@ -251,21 +256,21 @@ MediaEngineWebRTC::EnumerateVideoDevices
#endif
if (uniqueId[0] == '\0') {
// In case a device doesn't set uniqueId!
strncpy(uniqueId, deviceName, sizeof(uniqueId));
uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe
}
- nsRefPtr<MediaEngineWebRTCVideoSource> vSource;
+ nsRefPtr<MediaEngineVideoSource> vSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
// We've already seen this device, just refresh and append.
- vSource->Refresh(i);
+ static_cast<MediaEngineWebRTCVideoSource*>(vSource.get())->Refresh(i);
aVSources->AppendElement(vSource.get());
} else {
vSource = new MediaEngineWebRTCVideoSource(videoEngine, i, aMediaSource);
mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
aVSources->AppendElement(vSource);
}
}
--- a/content/media/webrtc/MediaEngineWebRTC.h
+++ b/content/media/webrtc/MediaEngineWebRTC.h
@@ -16,17 +16,17 @@
#include "nsCOMPtr.h"
#include "nsThreadUtils.h"
#include "DOMMediaStream.h"
#include "nsDirectoryServiceDefs.h"
#include "nsComponentManagerUtils.h"
#include "nsRefPtrHashtable.h"
#include "VideoUtils.h"
-#include "MediaEngine.h"
+#include "MediaEngineCameraVideoSource.h"
#include "VideoSegment.h"
#include "AudioSegment.h"
#include "StreamBuffer.h"
#include "MediaStreamGraph.h"
#include "MediaEngineWrapper.h"
#include "mozilla/dom/MediaStreamTrackBinding.h"
// WebRTC library includes follow
@@ -44,304 +44,147 @@
// Video Engine
// conflicts with #include of scoped_ptr.h
#undef FF
#include "webrtc/video_engine/include/vie_base.h"
#include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_render.h"
#include "webrtc/video_engine/include/vie_capture.h"
-#ifdef MOZ_B2G_CAMERA
-#include "CameraControlListener.h"
-#include "ICameraControl.h"
-#include "ImageContainer.h"
-#include "nsGlobalWindow.h"
-#include "prprf.h"
-#include "mozilla/Hal.h"
-#endif
#include "NullTransport.h"
#include "AudioOutputObserver.h"
namespace mozilla {
-#ifdef MOZ_B2G_CAMERA
-class CameraAllocateRunnable;
-class GetCameraNameRunnable;
-#endif
-
/**
* The WebRTC implementation of the MediaEngine interface.
- *
- * On B2G platform, member data may accessed from different thread after construction:
- *
- * MediaThread:
- * mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
- * mImageContainer, mSources, mState, mImage
- *
- * MainThread:
- * mCaptureIndex, mLastCapture, mState, mWidth, mHeight,
- *
- * Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
- * mState is protected by mCallbackMonitor
- * Other variable is accessed only from single thread
*/
-class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
- , public nsRunnable
-#ifdef MOZ_B2G_CAMERA
- , public CameraControlListener
- , public mozilla::hal::ScreenConfigurationObserver
-#else
+class MediaEngineWebRTCVideoSource : public MediaEngineCameraVideoSource
, public webrtc::ExternalRenderer
-#endif
{
public:
-#ifdef MOZ_B2G_CAMERA
- MediaEngineWebRTCVideoSource(int aIndex,
- MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mCameraControl(nullptr)
- , mCallbackMonitor("WebRTCCamera.CallbackMonitor")
- , mRotation(0)
- , mBackCamera(false)
- , mOrientationChanged(true) // Correct the orientation at first time takePhoto.
- , mCaptureIndex(aIndex)
- , mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
- , mWidth(0)
- , mHeight(0)
- , mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr)
- {
- mState = kReleased;
- Init();
- }
-#else
+ NS_DECL_THREADSAFE_ISUPPORTS
+
// ViEExternalRenderer.
- virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int);
- virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t,
+ virtual int FrameSizeChange(unsigned int w, unsigned int h, unsigned int streams);
+ virtual int DeliverFrame(unsigned char* buffer,
+ int size,
+ uint32_t time_stamp,
+ int64_t render_time,
void *handle);
/**
* Does DeliverFrame() support a null buffer and non-null handle
* (video texture)?
* XXX Investigate! Especially for Android/B2G
*/
virtual bool IsTextureSupported() { return false; }
MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
MediaSourceType aMediaSource = MediaSourceType::Camera)
- : mVideoEngine(aVideoEnginePtr)
- , mCaptureIndex(aIndex)
- , mFps(-1)
+ : MediaEngineCameraVideoSource(aIndex, "WebRTCCamera.Monitor")
+ , mVideoEngine(aVideoEnginePtr)
, mMinFps(-1)
, mMediaSource(aMediaSource)
- , mMonitor("WebRTCCamera.Monitor")
- , mWidth(0)
- , mHeight(0)
- , mHasDirectListeners(false)
- , mInitDone(false)
- , mInSnapshotMode(false)
- , mSnapshotPath(nullptr) {
+ {
MOZ_ASSERT(aVideoEnginePtr);
- mState = kReleased;
Init();
}
-#endif
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
- virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
+ virtual nsresult Allocate(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs);
virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID);
- virtual void SetDirectListeners(bool aHasListeners);
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
- virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
- bool aAgcOn, uint32_t aAGC,
- bool aNoiseOn, uint32_t aNoise,
- int32_t aPlayoutDelay) { return NS_OK; };
virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
+ SourceMediaStream* aSource,
TrackID aId,
StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
-
- virtual bool IsFake() {
- return false;
- }
+ TrackTicks& aLastEndTime);
virtual const MediaSourceType GetMediaSource() {
return mMediaSource;
}
-
-#ifndef MOZ_B2G_CAMERA
- NS_DECL_THREADSAFE_ISUPPORTS
-
- nsresult TakePhoto(PhotoCallback* aCallback)
+ virtual nsresult TakePhoto(PhotoCallback* aCallback)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
-#else
- // We are subclassed from CameraControlListener, which implements a
- // threadsafe reference-count for us.
- NS_DECL_ISUPPORTS_INHERITED
-
- void OnHardwareStateChange(HardwareState aState);
- void GetRotation();
- bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
- void OnUserError(UserContext aContext, nsresult aError);
- void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
-
- void AllocImpl();
- void DeallocImpl();
- void StartImpl(webrtc::CaptureCapability aCapability);
- void StopImpl();
- void SnapshotImpl();
- void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
- uint32_t ConvertPixelFormatToFOURCC(int aFormat);
- void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
-
- nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
-
- // It sets the correct photo orientation via camera parameter according to
- // current screen orientation.
- nsresult UpdatePhotoOrientation();
-
-#endif
-
- // This runnable is for creating a temporary file on the main thread.
- NS_IMETHODIMP
- Run()
- {
- nsCOMPtr<nsIFile> tmp;
- nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
- NS_ENSURE_SUCCESS(rv, rv);
-
- tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
- rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
- NS_ENSURE_SUCCESS(rv, rv);
-
- mSnapshotPath = new nsString();
- rv = tmp->GetPath(*mSnapshotPath);
- NS_ENSURE_SUCCESS(rv, rv);
-
- return NS_OK;
- }
void Refresh(int aIndex);
protected:
~MediaEngineWebRTCVideoSource() { Shutdown(); }
private:
// Initialize the needed Video engine interfaces.
void Init();
void Shutdown();
// Engine variables.
-#ifdef MOZ_B2G_CAMERA
- mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
- // This is only modified on MainThread (AllocImpl and DeallocImpl)
- nsRefPtr<ICameraControl> mCameraControl;
- nsCOMPtr<nsIDOMFile> mLastCapture;
- nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
-
- // These are protected by mMonitor below
- int mRotation;
- int mCameraAngle; // See dom/base/ScreenOrientation.h
- bool mBackCamera;
- bool mOrientationChanged; // True when screen rotates.
-#else
webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
webrtc::ViEBase* mViEBase;
webrtc::ViECapture* mViECapture;
webrtc::ViERender* mViERender;
-#endif
webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
- int mCaptureIndex;
- int mFps; // Track rate (30 fps by default)
int mMinFps; // Min rate we want to accept
MediaSourceType mMediaSource; // source of media (camera | application | screen)
- // mMonitor protects mImage access/changes, and transitions of mState
- // from kStarted to kStopped (which are combined with EndTrack() and
- // image changes). Note that mSources is not accessed from other threads
- // for video and is not protected.
- Monitor mMonitor; // Monitor for processing WebRTC frames.
- int mWidth, mHeight;
- nsRefPtr<layers::Image> mImage;
- nsRefPtr<layers::ImageContainer> mImageContainer;
- bool mHasDirectListeners;
-
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
-
- bool mInitDone;
- bool mInSnapshotMode;
- nsString* mSnapshotPath;
-
- nsString mDeviceName;
- nsString mUniqueId;
-
- void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
-
- void GuessCapability(const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
+ static bool SatisfyConstraintSet(const dom::MediaTrackConstraintSet& aConstraints,
+ const webrtc::CaptureCapability& aCandidate);
+ void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs);
};
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess
{
public:
- MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
+ MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex, const char* name, const char* uuid)
- : mSamples(0)
+ : MediaEngineAudioSource(kReleased)
+ , mSamples(0)
, mVoiceEngine(aVoiceEnginePtr)
, mMonitor("WebRTCMic.Monitor")
, mThread(aThread)
, mCapIndex(aIndex)
, mChannel(-1)
, mInitDone(false)
, mStarted(false)
, mEchoOn(false), mAgcOn(false), mNoiseOn(false)
, mEchoCancel(webrtc::kEcDefault)
, mAGC(webrtc::kAgcDefault)
, mNoiseSuppress(webrtc::kNsDefault)
, mPlayoutDelay(0)
, mNullTransport(nullptr) {
MOZ_ASSERT(aVoiceEnginePtr);
- mState = kReleased;
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
Init();
}
- virtual void GetName(nsAString&);
- virtual void GetUUID(nsAString&);
+ virtual void GetName(nsAString& aName);
+ virtual void GetUUID(nsAString& aUUID);
- virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs);
+ virtual nsresult Allocate(const AudioTrackConstraintsN& aConstraints,
+ const MediaEnginePrefs& aPrefs);
virtual nsresult Deallocate();
- virtual nsresult Start(SourceMediaStream*, TrackID);
- virtual nsresult Stop(SourceMediaStream*, TrackID);
+ virtual nsresult Start(SourceMediaStream* aStream, TrackID aID);
+ virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID);
virtual void SetDirectListeners(bool aHasDirectListeners) {};
- virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay);
virtual void NotifyPull(MediaStreamGraph* aGraph,
- SourceMediaStream *aSource,
+ SourceMediaStream* aSource,
TrackID aId,
StreamTime aDesiredTime,
- TrackTicks &aLastEndTime);
+ TrackTicks& aLastEndTime);
virtual bool IsFake() {
return false;
}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Microphone;
}
@@ -377,17 +220,17 @@ private:
ScopedCustomReleasePtr<webrtc::VoENetwork> mVoENetwork;
ScopedCustomReleasePtr<webrtc::VoEAudioProcessing> mVoEProcessing;
ScopedCustomReleasePtr<webrtc::VoECallReport> mVoECallReport;
// mMonitor protects mSources[] access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack()).
// mSources[] is accessed from webrtc threads.
Monitor mMonitor;
- nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
+ nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
nsCOMPtr<nsIThread> mThread;
int mCapIndex;
int mChannel;
TrackID mTrackID;
bool mInitDone;
bool mStarted;
nsString mDeviceName;
@@ -400,17 +243,17 @@ private:
int32_t mPlayoutDelay;
NullTransport *mNullTransport;
};
class MediaEngineWebRTC : public MediaEngine
{
public:
- explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs);
+ explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
// Clients should ensure to clean-up sources video/audio sources
// before invoking Shutdown on this class.
void Shutdown();
virtual void EnumerateVideoDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
virtual void EnumerateAudioDevices(MediaSourceType,
@@ -448,15 +291,15 @@ private:
bool mScreenEngineInit;
bool mBrowserEngineInit;
bool mWinEngineInit;
bool mAppEngineInit;
bool mHasTabVideoSource;
// Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video).
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
- nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
+ nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
+ nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
};
}
#endif /* NSMEDIAENGINEWEBRTC_H_ */
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -401,22 +401,16 @@ MediaEngineWebRTCAudioSource::NotifyPull
#ifdef DEBUG
TrackTicks target = aSource->TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta));
aLastEndTime = target;
#endif
}
-nsresult
-MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- return NS_ERROR_NOT_IMPLEMENTED;
-}
-
void
MediaEngineWebRTCAudioSource::Init()
{
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
mVoEBase->Init();
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp
@@ -6,23 +6,16 @@
#include "Layers.h"
#include "ImageTypes.h"
#include "ImageContainer.h"
#include "mozilla/layers/GrallocTextureClient.h"
#include "nsMemory.h"
#include "mtransport/runnable_utils.h"
#include "MediaTrackConstraints.h"
-#ifdef MOZ_B2G_CAMERA
-#include "GrallocImages.h"
-#include "libyuv.h"
-#include "mozilla/Hal.h"
-#include "ScreenOrientation.h"
-using namespace mozilla::dom;
-#endif
namespace mozilla {
using namespace mozilla::gfx;
using dom::ConstrainLongRange;
using dom::ConstrainDoubleRange;
using dom::MediaTrackConstraintSet;
#ifdef PR_LOGGING
@@ -32,52 +25,35 @@ extern PRLogModuleInfo* GetMediaManagerL
#else
#define LOG(msg)
#define LOGFRAME(msg)
#endif
/**
* Webrtc video source.
*/
-#ifndef MOZ_B2G_CAMERA
-NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
-#else
-NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
-NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
-#endif
-// ViEExternalRenderer Callback.
-#ifndef MOZ_B2G_CAMERA
+NS_IMPL_ISUPPORTS0(MediaEngineWebRTCVideoSource)
+
int
MediaEngineWebRTCVideoSource::FrameSizeChange(
unsigned int w, unsigned int h, unsigned int streams)
{
mWidth = w;
mHeight = h;
LOG(("Video FrameSizeChange: %ux%u", w, h));
return 0;
}
// ViEExternalRenderer Callback. Process every incoming frame here.
int
MediaEngineWebRTCVideoSource::DeliverFrame(
unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
void *handle)
{
- // mInSnapshotMode can only be set before the camera is turned on and
- // the renderer is started, so this amounts to a 1-shot
- if (mInSnapshotMode) {
- // Set the condition variable to false and notify Snapshot().
- MonitorAutoLock lock(mMonitor);
- mInSnapshotMode = false;
- lock.Notify();
- return 0;
- }
-
// Check for proper state.
if (mState != kStarted) {
LOG(("DeliverFrame: video not started"));
return 0;
}
if (mWidth*mHeight + 2*(((mWidth+1)/2)*((mHeight+1)/2)) != size) {
MOZ_ASSERT(false, "Wrong size frame in DeliverFrame!");
@@ -119,17 +95,16 @@ MediaEngineWebRTCVideoSource::DeliverFra
// which has it's own lock)
MonitorAutoLock lock(mMonitor);
// implicitly releases last image
mImage = image.forget();
return 0;
}
-#endif
// Called if the graph thinks it's running out of buffered video; repeat
// the last frame for whatever minimum period it think it needs. Note that
// this means that no *real* frame can be inserted during this period.
void
MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
@@ -167,61 +142,34 @@ MediaEngineWebRTCVideoSource::NotifyPull
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
-static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) {
- return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static bool IsWithin(double n, const ConstrainDoubleRange& aRange) {
- return aRange.mMin <= n && n <= aRange.mMax;
-}
-
-static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
- return std::max(aRange.mMin, std::min(n, aRange.mMax));
-}
-
-static bool
-AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
- return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
-}
-
-static bool
-Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
- MOZ_ASSERT(AreIntersecting(aA, aB));
- aA.mMin = std::max(aA.mMin, aB.mMin);
- aA.mMax = std::min(aA.mMax, aB.mMax);
- return true;
-}
-
-static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
- const webrtc::CaptureCapability& aCandidate) {
- if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
- !IsWithin(aCandidate.height, aConstraints.mHeight)) {
+/*static*/
+bool MediaEngineWebRTCVideoSource::SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
+ const webrtc::CaptureCapability& aCandidate) {
+ if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.width, aConstraints.mWidth) ||
+ !MediaEngineCameraVideoSource::IsWithin(aCandidate.height, aConstraints.mHeight)) {
return false;
}
- if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
+ if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
return false;
}
return true;
}
void
MediaEngineWebRTCVideoSource::ChooseCapability(
const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs)
{
-#ifdef MOZ_B2G_CAMERA
- return GuessCapability(aConstraints, aPrefs);
-#else
NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
if (num <= 0) {
// Mac doesn't support capabilities.
return GuessCapability(aConstraints, aPrefs);
}
// The rest is the full algorithm for cameras that can list their capabilities.
@@ -326,129 +274,23 @@ MediaEngineWebRTCVideoSource::ChooseCapa
mCapability = cap;
}
}
}
}
LOG(("chose cap %dx%d @%dfps codec %d raw %d",
mCapability.width, mCapability.height, mCapability.maxFPS,
mCapability.codecType, mCapability.rawType));
-#endif
-}
-
-// A special version of the algorithm for cameras that don't list capabilities.
-
-void
-MediaEngineWebRTCVideoSource::GuessCapability(
- const VideoTrackConstraintsN &aConstraints,
- const MediaEnginePrefs &aPrefs)
-{
- LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
- aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
-
- // In short: compound constraint-ranges and use pref as ideal.
-
- ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
- ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
-
- if (aConstraints.mAdvanced.WasPassed()) {
- const auto& advanced = aConstraints.mAdvanced.Value();
- for (uint32_t i = 0; i < advanced.Length(); i++) {
- if (AreIntersecting(cWidth, advanced[i].mWidth) &&
- AreIntersecting(cHeight, advanced[i].mHeight)) {
- Intersect(cWidth, advanced[i].mWidth);
- Intersect(cHeight, advanced[i].mHeight);
- }
- }
- }
- // Detect Mac HD cams and give them some love in the form of a dynamic default
- // since that hardware switches between 4:3 at low res and 16:9 at higher res.
- //
- // Logic is: if we're relying on defaults in aPrefs, then
- // only use HD pref when non-HD pref is too small and HD pref isn't too big.
-
- bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
- mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
- (aPrefs.GetWidth() < cWidth.mMin ||
- aPrefs.GetHeight() < cHeight.mMin) &&
- !(aPrefs.GetWidth(true) > cWidth.mMax ||
- aPrefs.GetHeight(true) > cHeight.mMax));
- int prefWidth = aPrefs.GetWidth(macHD);
- int prefHeight = aPrefs.GetHeight(macHD);
-
- // Clamp width and height without distorting inherent aspect too much.
-
- if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
- // If both are within, we get the default (pref) aspect.
- // If neither are within, we get the aspect of the enclosing constraint.
- // Either are presumably reasonable (presuming constraints are sane).
- mCapability.width = Clamp(prefWidth, cWidth);
- mCapability.height = Clamp(prefHeight, cHeight);
- } else {
- // But if only one clips (e.g. width), the resulting skew is undesirable:
- // .------------.
- // | constraint |
- // .----+------------+----.
- // | | | |
- // |pref| result | | prefAspect != resultAspect
- // | | | |
- // '----+------------+----'
- // '------------'
- // So in this case, preserve prefAspect instead:
- // .------------.
- // | constraint |
- // .------------.
- // |pref | prefAspect is unchanged
- // '------------'
- // | |
- // '------------'
- if (IsWithin(prefWidth, cWidth)) {
- mCapability.height = Clamp(prefHeight, cHeight);
- mCapability.width = Clamp((mCapability.height * prefWidth) /
- prefHeight, cWidth);
- } else {
- mCapability.width = Clamp(prefWidth, cWidth);
- mCapability.height = Clamp((mCapability.width * prefHeight) /
- prefWidth, cHeight);
- }
- }
- mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
- LOG(("chose cap %dx%d @%dfps",
- mCapability.width, mCapability.height, mCapability.maxFPS));
-}
-
-void
-MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
-{
- aName = mDeviceName;
-}
-
-void
-MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
-{
- aUUID = mUniqueId;
}
nsresult
MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs)
{
LOG((__FUNCTION__));
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (mState == kReleased && mInitDone) {
- ChooseCapability(aConstraints, aPrefs);
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::AllocImpl));
- mCallbackMonitor.Wait();
- if (mState != kAllocated) {
- return NS_ERROR_FAILURE;
- }
- }
-#else
if (mState == kReleased && mInitDone) {
// Note: if shared, we don't allow a later opener to affect the resolution.
// (This may change depending on spec changes for Constraints/settings)
ChooseCapability(aConstraints, aPrefs);
if (mViECapture->AllocateCaptureDevice(NS_ConvertUTF16toUTF8(mUniqueId).get(),
kMaxUniqueIdLength, mCaptureIndex)) {
@@ -456,42 +298,29 @@ MediaEngineWebRTCVideoSource::Allocate(c
}
mState = kAllocated;
LOG(("Video device %d allocated", mCaptureIndex));
} else if (mSources.IsEmpty()) {
LOG(("Video device %d reallocated", mCaptureIndex));
} else {
LOG(("Video device %d allocated shared", mCaptureIndex));
}
-#endif
return NS_OK;
}
nsresult
MediaEngineWebRTCVideoSource::Deallocate()
{
LOG((__FUNCTION__));
if (mSources.IsEmpty()) {
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
if (mState != kStopped && mState != kAllocated) {
return NS_ERROR_FAILURE;
}
-#ifdef MOZ_B2G_CAMERA
- // We do not register success callback here
-
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::DeallocImpl));
- mCallbackMonitor.Wait();
- if (mState != kReleased) {
- return NS_ERROR_FAILURE;
- }
-#elif XP_MACOSX
+#ifdef XP_MACOSX
// Bug 829907 - on mac, in shutdown, the mainthread stops processing
// 'native' events, and the QTKit code uses events to the main native CFRunLoop
// in order to provide thread safety. In order to avoid this locking us up,
// release the ViE capture device synchronously on MainThread (so the native
// event isn't needed).
// XXX Note if MainThread Dispatch()es NS_DISPATCH_SYNC to us we can deadlock.
// XXX It might be nice to only do this if we're in shutdown... Hard to be
// sure when that is though.
@@ -513,130 +342,82 @@ MediaEngineWebRTCVideoSource::Deallocate
}
return NS_OK;
}
nsresult
MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
LOG((__FUNCTION__));
-#ifndef MOZ_B2G_CAMERA
int error = 0;
-#endif
if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE;
}
mSources.AppendElement(aStream);
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
-
if (mState == kStarted) {
return NS_OK;
}
mImageContainer = layers::LayerManager::CreateImageContainer();
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StartImpl,
- mCapability));
- mCallbackMonitor.Wait();
- if (mState != kStarted) {
- return NS_ERROR_FAILURE;
- }
-#else
mState = kStarted;
error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
if (error == -1) {
return NS_ERROR_FAILURE;
}
error = mViERender->StartRender(mCaptureIndex);
if (error == -1) {
return NS_ERROR_FAILURE;
}
if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
return NS_ERROR_FAILURE;
}
-#endif
return NS_OK;
}
nsresult
MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
{
LOG((__FUNCTION__));
if (!mSources.RemoveElement(aSource)) {
// Already stopped - this is allowed
return NS_OK;
}
if (!mSources.IsEmpty()) {
return NS_OK;
}
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
{
MonitorAutoLock lock(mMonitor);
mState = kStopped;
aSource->EndTrack(aID);
// Drop any cached image so we don't start with a stale image on next
// usage
mImage = nullptr;
}
-#ifdef MOZ_B2G_CAMERA
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::StopImpl));
-#else
mViERender->StopRender(mCaptureIndex);
mViERender->RemoveRenderer(mCaptureIndex);
mViECapture->StopCapture(mCaptureIndex);
-#endif
return NS_OK;
}
void
-MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
-{
- LOG((__FUNCTION__));
- mHasDirectListeners = aHasDirectListeners;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
-{
- return NS_ERROR_NOT_IMPLEMENTED;
-}
-
-/**
- * Initialization and Shutdown functions for the video source, called by the
- * constructor and destructor respectively.
- */
-
-void
MediaEngineWebRTCVideoSource::Init()
{
-#ifdef MOZ_B2G_CAMERA
- nsAutoCString deviceName;
- ICameraControl::GetCameraName(mCaptureIndex, deviceName);
- CopyUTF8toUTF16(deviceName, mDeviceName);
- CopyUTF8toUTF16(deviceName, mUniqueId);
-#else
// fix compile warning for these being unused. (remove once used)
(void) mFps;
(void) mMinFps;
LOG((__FUNCTION__));
if (mVideoEngine == nullptr) {
return;
}
@@ -659,483 +440,58 @@ MediaEngineWebRTCVideoSource::Init()
if (mViECapture->GetCaptureDevice(mCaptureIndex,
deviceName, kMaxDeviceNameLength,
uniqueId, kMaxUniqueIdLength)) {
return;
}
CopyUTF8toUTF16(deviceName, mDeviceName);
CopyUTF8toUTF16(uniqueId, mUniqueId);
-#endif
mInitDone = true;
}
void
MediaEngineWebRTCVideoSource::Shutdown()
{
LOG((__FUNCTION__));
if (!mInitDone) {
return;
}
-#ifdef MOZ_B2G_CAMERA
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-#endif
if (mState == kStarted) {
while (!mSources.IsEmpty()) {
Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
}
MOZ_ASSERT(mState == kStopped);
}
if (mState == kAllocated || mState == kStopped) {
Deallocate();
}
-#ifndef MOZ_B2G_CAMERA
mViECapture->Release();
mViERender->Release();
mViEBase->Release();
-#endif
mState = kReleased;
mInitDone = false;
}
void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
// NOTE: mCaptureIndex might have changed when allocated!
// Use aIndex to update information, but don't change mCaptureIndex!!
-#ifdef MOZ_B2G_CAMERA
- // Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
-#else
// Caller looked up this source by uniqueId, so it shouldn't change
char deviceName[kMaxDeviceNameLength];
char uniqueId[kMaxUniqueIdLength];
if (mViECapture->GetCaptureDevice(aIndex,
deviceName, sizeof(deviceName),
uniqueId, sizeof(uniqueId))) {
return;
}
CopyUTF8toUTF16(deviceName, mDeviceName);
#ifdef DEBUG
nsString temp;
CopyUTF8toUTF16(uniqueId, temp);
MOZ_ASSERT(temp.Equals(mUniqueId));
#endif
-#endif
-}
-
-#ifdef MOZ_B2G_CAMERA
-
-// All these functions must be run on MainThread!
-void
-MediaEngineWebRTCVideoSource::AllocImpl() {
- MOZ_ASSERT(NS_IsMainThread());
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
-
- mCameraControl = ICameraControl::Create(mCaptureIndex);
- if (mCameraControl) {
- mState = kAllocated;
- // Add this as a listener for CameraControl events. We don't need
- // to explicitly remove this--destroying the CameraControl object
- // in DeallocImpl() will do that for us.
- mCameraControl->AddListener(this);
- }
-
- mCallbackMonitor.Notify();
-}
-
-void
-MediaEngineWebRTCVideoSource::DeallocImpl() {
- MOZ_ASSERT(NS_IsMainThread());
-
- mCameraControl = nullptr;
-}
-
-// The same algorithm from bug 840244
-static int
-GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
- int screenAngle = 0;
- switch (aScreen) {
- case eScreenOrientation_PortraitPrimary:
- screenAngle = 0;
- break;
- case eScreenOrientation_PortraitSecondary:
- screenAngle = 180;
- break;
- case eScreenOrientation_LandscapePrimary:
- screenAngle = 90;
- break;
- case eScreenOrientation_LandscapeSecondary:
- screenAngle = 270;
- break;
- default:
- MOZ_ASSERT(false);
- break;
- }
-
- int result;
-
- if (aBackCamera) {
- //back camera
- result = (aCameraMountAngle - screenAngle + 360) % 360;
- } else {
- //front camera
- result = (aCameraMountAngle + screenAngle) % 360;
- }
- return result;
-}
-
-// undefine to remove on-the-fly rotation support
-#define DYNAMIC_GUM_ROTATION
-
-void
-MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
-#ifdef DYNAMIC_GUM_ROTATION
- if (mHasDirectListeners) {
- // aka hooked to PeerConnection
- MonitorAutoLock enter(mMonitor);
- mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
-
- LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
- mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
- }
-#endif
-
- mOrientationChanged = true;
-}
-
-void
-MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
- MOZ_ASSERT(NS_IsMainThread());
-
- ICameraControl::Configuration config;
- config.mMode = ICameraControl::kPictureMode;
- config.mPreviewSize.width = aCapability.width;
- config.mPreviewSize.height = aCapability.height;
- mCameraControl->Start(&config);
- mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
-
- hal::RegisterScreenConfigurationObserver(this);
-}
-
-void
-MediaEngineWebRTCVideoSource::StopImpl() {
- MOZ_ASSERT(NS_IsMainThread());
-
- hal::UnregisterScreenConfigurationObserver(this);
- mCameraControl->Stop();
-}
-
-void
-MediaEngineWebRTCVideoSource::SnapshotImpl() {
- MOZ_ASSERT(NS_IsMainThread());
- mCameraControl->TakePicture();
-}
-
-void
-MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
-{
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (aState == CameraControlListener::kHardwareClosed) {
- // When the first CameraControl listener is added, it gets pushed
- // the current state of the camera--normally 'closed'. We only
- // pay attention to that state if we've progressed out of the
- // allocated state.
- if (mState != kAllocated) {
- mState = kReleased;
- mCallbackMonitor.Notify();
- }
- } else {
- // Can't read this except on MainThread (ugh)
- NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
- &MediaEngineWebRTCVideoSource::GetRotation));
- mState = kStarted;
- mCallbackMonitor.Notify();
- }
-}
-
-void
-MediaEngineWebRTCVideoSource::GetRotation()
-{
- MOZ_ASSERT(NS_IsMainThread());
- MonitorAutoLock enter(mMonitor);
-
- mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
- MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
- mCameraAngle == 270);
- hal::ScreenConfiguration config;
- hal::GetCurrentScreenConfiguration(&config);
-
- nsCString deviceName;
- ICameraControl::GetCameraName(mCaptureIndex, deviceName);
- if (deviceName.EqualsASCII("back")) {
- mBackCamera = true;
- }
-
- mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
- LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
- mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
-}
-
-void
-MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
-{
- {
- // Scope the monitor, since there is another monitor below and we don't want
- // unexpected deadlock.
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- mCallbackMonitor.Notify();
- }
-
- // A main thread runnable to send error code to all queued PhotoCallbacks.
- class TakePhotoError : public nsRunnable {
- public:
- TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
- nsresult aRv)
- : mRv(aRv)
- {
- mCallbacks.SwapElements(aCallbacks);
- }
-
- NS_IMETHOD Run()
- {
- uint32_t callbackNumbers = mCallbacks.Length();
- for (uint8_t i = 0; i < callbackNumbers; i++) {
- mCallbacks[i]->PhotoError(mRv);
- }
- // PhotoCallback needs to dereference on main thread.
- mCallbacks.Clear();
- return NS_OK;
- }
-
- protected:
- nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
- nsresult mRv;
- };
-
- if (aContext == UserContext::kInTakePicture) {
- MonitorAutoLock lock(mMonitor);
- if (mPhotoCallbacks.Length()) {
- NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
- }
- }
}
-void
-MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
-{
- // It needs to start preview because Gonk camera will stop preview while
- // taking picture.
- mCameraControl->StartPreview();
-
- // Create a main thread runnable to generate a blob and call all current queued
- // PhotoCallbacks.
- class GenerateBlobRunnable : public nsRunnable {
- public:
- GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
- uint8_t* aData,
- uint32_t aLength,
- const nsAString& aMimeType)
- {
- mCallbacks.SwapElements(aCallbacks);
- mPhoto.AppendElements(aData, aLength);
- mMimeType = aMimeType;
- }
-
- NS_IMETHOD Run()
- {
- nsRefPtr<dom::File> blob =
- dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
- uint32_t callbackCounts = mCallbacks.Length();
- for (uint8_t i = 0; i < callbackCounts; i++) {
- nsRefPtr<dom::File> tempBlob = blob;
- mCallbacks[i]->PhotoComplete(tempBlob.forget());
- }
- // PhotoCallback needs to dereference on main thread.
- mCallbacks.Clear();
- return NS_OK;
- }
-
- nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
- nsTArray<uint8_t> mPhoto;
- nsString mMimeType;
- };
-
- // All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
- // constructor. This captured image will be sent to all the queued
- // PhotoCallbacks in this runnable.
- MonitorAutoLock lock(mMonitor);
- if (mPhotoCallbacks.Length()) {
- NS_DispatchToMainThread(
- new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
- }
-}
-
-uint32_t
-MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
-{
- switch (aFormat) {
- case HAL_PIXEL_FORMAT_RGBA_8888:
- return libyuv::FOURCC_BGRA;
- case HAL_PIXEL_FORMAT_YCrCb_420_SP:
- return libyuv::FOURCC_NV21;
- case HAL_PIXEL_FORMAT_YV12:
- return libyuv::FOURCC_YV12;
- default: {
- LOG((" xxxxx Unknown pixel format %d", aFormat));
- MOZ_ASSERT(false, "Unknown pixel format.");
- return libyuv::FOURCC_ANY;
- }
- }
-}
-
-void
-MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
- layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
- android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
- void *pMem = nullptr;
- uint32_t size = aWidth * aHeight * 3 / 2;
-
- graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
-
- uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
- // Create a video frame and append it to the track.
- nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
- layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
-
- uint32_t dstWidth;
- uint32_t dstHeight;
-
- if (mRotation == 90 || mRotation == 270) {
- dstWidth = aHeight;
- dstHeight = aWidth;
- } else {
- dstWidth = aWidth;
- dstHeight = aHeight;
- }
-
- uint32_t half_width = dstWidth / 2;
- uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
- libyuv::ConvertToI420(srcPtr, size,
- dstPtr, dstWidth,
- dstPtr + (dstWidth * dstHeight), half_width,
- dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
- 0, 0,
- aWidth, aHeight,
- aWidth, aHeight,
- static_cast<libyuv::RotationMode>(mRotation),
- ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
- graphicBuffer->unlock();
-
- const uint8_t lumaBpp = 8;
- const uint8_t chromaBpp = 4;
-
- layers::PlanarYCbCrData data;
- data.mYChannel = dstPtr;
- data.mYSize = IntSize(dstWidth, dstHeight);
- data.mYStride = dstWidth * lumaBpp / 8;
- data.mCbCrStride = dstWidth * chromaBpp / 8;
- data.mCbChannel = dstPtr + dstHeight * data.mYStride;
- data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
- data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
- data.mPicX = 0;
- data.mPicY = 0;
- data.mPicSize = IntSize(dstWidth, dstHeight);
- data.mStereoMode = StereoMode::MONO;
-
- videoImage->SetDataNoCopy(data);
-
- // implicitly releases last image
- mImage = image.forget();
-}
-
-bool
-MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
- {
- ReentrantMonitorAutoEnter sync(mCallbackMonitor);
- if (mState == kStopped) {
- return false;
- }
- }
-
- MonitorAutoLock enter(mMonitor);
- // Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
- RotateImage(aImage, aWidth, aHeight);
- if (mRotation != 0 && mRotation != 180) {
- uint32_t temp = aWidth;
- aWidth = aHeight;
- aHeight = temp;
- }
- if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
- mWidth = aWidth;
- mHeight = aHeight;
- LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
- }
-
- return true; // return true because we're accepting the frame
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- MonitorAutoLock lock(mMonitor);
-
- // If other callback exists, that means there is a captured picture on the way,
- // it doesn't need to TakePicture() again.
- if (!mPhotoCallbacks.Length()) {
- nsresult rv;
- if (mOrientationChanged) {
- UpdatePhotoOrientation();
- }
- rv = mCameraControl->TakePicture();
- if (NS_FAILED(rv)) {
- return rv;
- }
- }
-
- mPhotoCallbacks.AppendElement(aCallback);
-
- return NS_OK;
-}
-
-nsresult
-MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
-{
- MOZ_ASSERT(NS_IsMainThread());
-
- hal::ScreenConfiguration config;
- hal::GetCurrentScreenConfiguration(&config);
-
- // The rotation angle is clockwise.
- int orientation = 0;
- switch (config.orientation()) {
- case eScreenOrientation_PortraitPrimary:
- orientation = 0;
- break;
- case eScreenOrientation_PortraitSecondary:
- orientation = 180;
- break;
- case eScreenOrientation_LandscapePrimary:
- orientation = 270;
- break;
- case eScreenOrientation_LandscapeSecondary:
- orientation = 90;
- break;
- }
-
- // Front camera is inverse angle comparing to back camera.
- orientation = (mBackCamera ? orientation : (-orientation));
-
- ICameraControlParameterSetAutoEnter batch(mCameraControl);
- // It changes the orientation value in EXIF information only.
- mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
-
- mOrientationChanged = false;
-
- return NS_OK;
-}
-
-#endif
-
-}
+} // namespace mozilla
--- a/content/media/webrtc/moz.build
+++ b/content/media/webrtc/moz.build
@@ -3,40 +3,48 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
XPIDL_MODULE = 'content_webrtc'
EXPORTS += [
'MediaEngine.h',
+ 'MediaEngineCameraVideoSource.h',
'MediaEngineDefault.h',
'MediaTrackConstraints.h',
]
if CONFIG['MOZ_WEBRTC']:
EXPORTS += ['AudioOutputObserver.h',
'MediaEngineWebRTC.h']
UNIFIED_SOURCES += [
+ 'MediaEngineCameraVideoSource.cpp',
'MediaEngineTabVideoSource.cpp',
'MediaEngineWebRTCAudio.cpp',
'MediaEngineWebRTCVideo.cpp',
]
# MediaEngineWebRTC.cpp needs to be built separately.
SOURCES += [
'MediaEngineWebRTC.cpp',
]
LOCAL_INCLUDES += [
'/dom/base',
'/dom/camera',
'/media/libyuv/include',
'/media/webrtc/signaling/src/common',
'/media/webrtc/signaling/src/common/browser_logging',
'/media/webrtc/trunk',
]
+ # Gonk camera source.
+ if CONFIG['MOZ_B2G_CAMERA']:
+ EXPORTS += ['MediaEngineGonkVideoSource.h']
+ UNIFIED_SOURCES += [
+ 'MediaEngineGonkVideoSource.cpp',
+ ]
XPIDL_SOURCES += [
'nsITabSource.idl'
]
UNIFIED_SOURCES += [
'MediaEngineDefault.cpp',
'PeerIdentity.cpp',
--- a/dom/camera/CameraPreviewMediaStream.cpp
+++ b/dom/camera/CameraPreviewMediaStream.cpp
@@ -14,29 +14,32 @@
*/
#define MAX_INVALIDATE_PENDING 4
using namespace mozilla::layers;
using namespace mozilla::dom;
namespace mozilla {
+static const TrackID TRACK_VIDEO = 2;
+
void
FakeMediaStreamGraph::DispatchToMainThreadAfterStreamStateUpdate(already_AddRefed<nsIRunnable> aRunnable)
{
nsRefPtr<nsIRunnable> task = aRunnable;
NS_DispatchToMainThread(task);
}
CameraPreviewMediaStream::CameraPreviewMediaStream(DOMMediaStream* aWrapper)
: MediaStream(aWrapper)
, mMutex("mozilla::camera::CameraPreviewMediaStream")
, mInvalidatePending(0)
, mDiscardedFrames(0)
, mRateLimit(false)
+ , mTrackCreated(false)
{
SetGraphImpl(MediaStreamGraph::GetInstance());
mFakeMediaStreamGraph = new FakeMediaStreamGraph();
mIsConsumed = false;
}
void
CameraPreviewMediaStream::AddAudioOutput(void* aKey)
@@ -107,16 +110,32 @@ CameraPreviewMediaStream::RemoveListener
MutexAutoLock lock(mMutex);
nsRefPtr<MediaStreamListener> listener(aListener);
mListeners.RemoveElement(aListener);
listener->NotifyEvent(mFakeMediaStreamGraph, MediaStreamListener::EVENT_REMOVED);
}
void
+CameraPreviewMediaStream::OnPreviewStateChange(bool aActive)
+{
+ MutexAutoLock lock(mMutex);
+ if (!mTrackCreated && aActive) {
+ mTrackCreated = true;
+ VideoSegment tmpSegment;
+ uint32_t trackEvent = aActive ? MediaStreamListener::TRACK_EVENT_CREATED
+ : MediaStreamListener::TRACK_EVENT_ENDED;
+ for (uint32_t j = 0; j < mListeners.Length(); ++j) {
+ MediaStreamListener* l = mListeners[j];
+ l->NotifyQueuedTrackChanges(mFakeMediaStreamGraph, TRACK_VIDEO, 0, 0, trackEvent, tmpSegment);
+ }
+ }
+}
+
+void
CameraPreviewMediaStream::Destroy()
{
MutexAutoLock lock(mMutex);
DestroyImpl();
}
void
CameraPreviewMediaStream::Invalidate()
--- a/dom/camera/CameraPreviewMediaStream.h
+++ b/dom/camera/CameraPreviewMediaStream.h
@@ -46,30 +46,32 @@ public:
virtual void SetAudioOutputVolume(void* aKey, float aVolume) MOZ_OVERRIDE;
virtual void RemoveAudioOutput(void* aKey) MOZ_OVERRIDE;
virtual void AddVideoOutput(VideoFrameContainer* aContainer) MOZ_OVERRIDE;
virtual void RemoveVideoOutput(VideoFrameContainer* aContainer) MOZ_OVERRIDE;
virtual void ChangeExplicitBlockerCount(int32_t aDelta) MOZ_OVERRIDE;
virtual void AddListener(MediaStreamListener* aListener) MOZ_OVERRIDE;
virtual void RemoveListener(MediaStreamListener* aListener) MOZ_OVERRIDE;
virtual void Destroy();
+ void OnPreviewStateChange(bool aActive);
void Invalidate();
// Call these on any thread.
void SetCurrentFrame(const gfxIntSize& aIntrinsicSize, Image* aImage);
void ClearCurrentFrame();
void RateLimit(bool aLimit);
protected:
// mMutex protects all the class' fields.
// This class is not registered to MediaStreamGraph.
// It needs to protect all the fields.
Mutex mMutex;
int32_t mInvalidatePending;
uint32_t mDiscardedFrames;
bool mRateLimit;
+ bool mTrackCreated;
nsRefPtr<FakeMediaStreamGraph> mFakeMediaStreamGraph;
};
}
#endif // DOM_CAMERA_CAMERAPREVIEWMEDIASTREAM_H
--- a/dom/camera/DOMCameraControl.cpp
+++ b/dom/camera/DOMCameraControl.cpp
@@ -213,17 +213,18 @@ nsDOMCameraControl::nsDOMCameraControl(u
config.mPreviewSize.height = aInitialConfig.mPreviewSize.mHeight;
config.mRecorderProfile = aInitialConfig.mRecorderProfile;
}
mCameraControl = ICameraControl::Create(aCameraId);
mCurrentConfiguration = initialConfig.forget();
// Attach our DOM-facing media stream to our viewfinder stream.
- mStream = mInput;
+ SetHintContents(HINT_CONTENTS_VIDEO);
+ InitStreamCommon(mInput);
MOZ_ASSERT(mWindow, "Shouldn't be created with a null window!");
if (mWindow->GetExtantDoc()) {
CombineWithPrincipal(mWindow->GetExtantDoc()->NodePrincipal());
}
// Register a listener for camera events.
mListener = new DOMCameraControlListener(this, mInput);
mCameraControl->AddListener(mListener);
--- a/dom/camera/DOMCameraControlListener.cpp
+++ b/dom/camera/DOMCameraControlListener.cpp
@@ -132,16 +132,17 @@ DOMCameraControlListener::OnPreviewState
DOM_CAMERA_LOGI("Preview started\n");
break;
default:
DOM_CAMERA_LOGE("Unknown preview state %d\n", aState);
MOZ_ASSERT_UNREACHABLE("Invalid preview state");
return;
}
+ mStream->OnPreviewStateChange(aState == kPreviewStarted);
NS_DispatchToMainThread(new Callback(mDOMCameraControl, aState));
}
void
DOMCameraControlListener::OnRecorderStateChange(RecorderState aState,
int32_t aStatus, int32_t aTrackNum)
{
class Callback : public DOMCallback
--- a/dom/camera/test/test_camera.html
+++ b/dom/camera/test/test_camera.html
@@ -104,28 +104,36 @@ var Camera = {
takePictureSuccess: function taken_foto(blob) {
ok(blob.size > 100 , "Blob Size Gathered = " + blob.size);
ok("image/" + test.fileFormat == blob.type, "Blob Type = " + blob.type);
},
takePictureEvent: function taken_foto_evt(e) {
var blob = e.data;
var img = new Image();
var test = this._currentTest;
+ var onPreviewStateChange = function(e) {
+ if (e.newState === 'started') {
+ ok(true, "viewfinder is ready and playing after resume");
+ Camera.cameraObj.removeEventListener('previewstatechange', onPreviewStateChange);
+ Camera._testsCompleted++;
+ if(Camera._testsCompleted == Camera._tests.length) {
+ ok(true, "test finishing");
+ SimpleTest.finish();
+ } else {
+ Camera.runTests();
+ }
+ }
+ }
+ Camera.cameraObj.addEventListener('previewstatechange', onPreviewStateChange);
img.onload = function Imgsize() {
ok(this.width == test.pictureSize.width, "The image taken has the width " +
this.width + " pictureSize width = " + test.pictureSize.width);
ok(this.height == test.pictureSize.height, "The image taken has the height " +
this.height + " picturesize height = " + test.pictureSize.height);
- Camera._testsCompleted++;
- if(Camera._testsCompleted == Camera._tests.length) {
- ok(true, "test finishing");
- SimpleTest.finish();
- } else {
- Camera.runTests();
- }
+ Camera.cameraObj.resumePreview();
}
ok(blob.size > 100 , "Blob Size Gathered = " + blob.size);
ok("image/" + test.fileFormat == blob.type, "Blob Type = " + blob.type);
img.src = window.URL.createObjectURL(blob);
},
shutter: function onShutter () {
Camera._shutter++;
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -1036,18 +1036,17 @@ static SourceSet *
result->MoveElementsFrom(candidateSet);
result->MoveElementsFrom(tailSet);
return result.forget();
}
/**
* Runs on a seperate thread and is responsible for enumerating devices.
* Depending on whether a picture or stream was asked for, either
- * ProcessGetUserMedia or ProcessGetUserMediaSnapshot is called, and the results
- * are sent back to the DOM.
+ * ProcessGetUserMedia is called, and the results are sent back to the DOM.
*
* Do not run this on the main thread. The success and error callbacks *MUST*
* be dispatched on the main thread!
*/
class GetUserMediaTask : public Task
{
public:
GetUserMediaTask(
@@ -1119,28 +1118,16 @@ public:
// Was a device provided?
if (!mDeviceChosen) {
nsresult rv = SelectDevice(backend);
if (rv != NS_OK) {
return;
}
}
- // It is an error if audio or video are requested along with picture.
- if (mConstraints.mPicture &&
- (IsOn(mConstraints.mAudio) || IsOn(mConstraints.mVideo))) {
- Fail(NS_LITERAL_STRING("NOT_SUPPORTED_ERR"));
- return;
- }
-
- if (mConstraints.mPicture) {
- ProcessGetUserMediaSnapshot(mVideoDevice->GetSource(), 0);
- return;
- }
-
// There's a bug in the permission code that can leave us with mAudio but no audio device
ProcessGetUserMedia(((IsOn(mConstraints.mAudio) && mAudioDevice) ?
mAudioDevice->GetSource() : nullptr),
((IsOn(mConstraints.mVideo) && mVideoDevice) ?
mVideoDevice->GetSource() : nullptr));
}
nsresult
@@ -1199,17 +1186,17 @@ public:
return NS_OK;
}
nsresult
SelectDevice(MediaEngine* backend)
{
MOZ_ASSERT(mSuccess);
MOZ_ASSERT(mError);
- if (mConstraints.mPicture || IsOn(mConstraints.mVideo)) {
+ if (IsOn(mConstraints.mVideo)) {
VideoTrackConstraintsN constraints(GetInvariant(mConstraints.mVideo));
ScopedDeletePtr<SourceSet> sources(GetSources(backend, constraints,
&MediaEngine::EnumerateVideoDevices));
if (!sources->Length()) {
Fail(NS_LITERAL_STRING("NO_DEVICES_FOUND"));
return NS_ERROR_FAILURE;
}
@@ -1276,48 +1263,16 @@ public:
));
MOZ_ASSERT(!mSuccess);
MOZ_ASSERT(!mError);
return;
}
- /**
- * Allocates a video device, takes a snapshot and returns a File via
- * a SuccessRunnable or an error via the ErrorRunnable. Off the main thread.
- */
- void
- ProcessGetUserMediaSnapshot(MediaEngineVideoSource* aSource, int aDuration)
- {
- MOZ_ASSERT(mSuccess);
- MOZ_ASSERT(mError);
- nsresult rv = aSource->Allocate(GetInvariant(mConstraints.mVideo), mPrefs);
- if (NS_FAILED(rv)) {
- Fail(NS_LITERAL_STRING("HARDWARE_UNAVAILABLE"));
- return;
- }
-
- /**
- * Display picture capture UI here before calling Snapshot() - Bug 748835.
- */
- nsCOMPtr<nsIDOMFile> file;
- aSource->Snapshot(aDuration, getter_AddRefs(file));
- aSource->Deallocate();
-
- NS_DispatchToMainThread(new SuccessCallbackRunnable(
- mSuccess, mError, file, mWindowID
- ));
-
- MOZ_ASSERT(!mSuccess);
- MOZ_ASSERT(!mError);
-
- return;
- }
-
private:
MediaStreamConstraints mConstraints;
nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> mSuccess;
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> mError;
uint64_t mWindowID;
nsRefPtr<GetUserMediaCallbackMediaStreamListener> mListener;
nsRefPtr<AudioDevice> mAudioDevice;
@@ -1589,45 +1544,16 @@ MediaManager::GetUserMedia(
bool privileged = nsContentUtils::IsChromeDoc(aWindow->GetExtantDoc());
nsCOMPtr<nsIDOMGetUserMediaSuccessCallback> onSuccess(aOnSuccess);
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onError(aOnError);
MediaStreamConstraints c(aConstraints); // copy
- /**
- * If we were asked to get a picture, before getting a snapshot, we check if
- * the calling page is allowed to open a popup. We do this because
- * {picture:true} will open a new "window" to let the user preview or select
- * an image, on Android. The desktop UI for {picture:true} is TBD, at which
- * may point we can decide whether to extend this test there as well.
- */
-#if !defined(MOZ_WEBRTC)
- if (c.mPicture && !privileged) {
- if (aWindow->GetPopupControlState() > openControlled) {
- nsCOMPtr<nsIPopupWindowManager> pm =
- do_GetService(NS_POPUPWINDOWMANAGER_CONTRACTID);
- if (!pm) {
- return NS_OK;
- }
- uint32_t permission;
- nsCOMPtr<nsIDocument> doc = aWindow->GetExtantDoc();
- if (doc) {
- pm->TestPermission(doc->NodePrincipal(), &permission);
- if (permission == nsIPopupWindowManager::DENY_POPUP) {
- aWindow->FirePopupBlockedEvent(doc, nullptr, EmptyString(),
- EmptyString());
- return NS_OK;
- }
- }
- }
- }
-#endif
-
static bool created = false;
if (!created) {
// Force MediaManager to startup before we try to access it from other threads
// Hack: should init singleton earlier unless it's expensive (mem or CPU)
(void) MediaManager::Get();
#ifdef MOZ_B2G
// Initialize MediaPermissionManager before send out any permission request.
(void) MediaPermissionManager::GetInstance();
@@ -1746,25 +1672,16 @@ MediaManager::GetUserMedia(
}
#ifdef MOZ_B2G_CAMERA
if (mCameraManager == nullptr) {
mCameraManager = nsDOMCameraManager::CreateInstance(aWindow);
}
#endif
-#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
- if (c.mPicture) {
- // ShowFilePickerForMimeType() must run on the Main Thread! (on Android)
- // Note, GetUserMediaRunnableWrapper takes ownership of task.
- NS_DispatchToMainThread(new GetUserMediaRunnableWrapper(task.forget()));
- return NS_OK;
- }
-#endif
-
bool isLoop = false;
nsCOMPtr<nsIURI> loopURI;
nsresult rv = NS_NewURI(getter_AddRefs(loopURI), "about:loopconversation");
NS_ENSURE_SUCCESS(rv, rv);
rv = docURI->EqualsExceptRef(loopURI, &isLoop);
NS_ENSURE_SUCCESS(rv, rv);
if (isLoop) {
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/TypedObject/aggregate-set-neutered.js
@@ -0,0 +1,27 @@
+// Bug 991981. Check for various quirks when setting a field of a typed object
+// during which set operation the underlying buffer is neutered.
+
+if (typeof TypedObject === "undefined")
+ quit();
+
+load(libdir + "asserts.js")
+
+var StructType = TypedObject.StructType;
+var uint32 = TypedObject.uint32;
+
+function main(variant)
+{
+ var Point = new StructType({ x: uint32, y: uint32 });
+ var Line = new StructType({ from: Point, to: Point });
+
+ var buf = new ArrayBuffer(16);
+ var line = new Line(buf);
+
+ assertThrowsInstanceOf(function()
+ {
+ line.to = { x: 22, get y() { neuter(buf, variant); return 44; } };
+ }, TypeError, "setting into a neutered buffer is bad mojo");
+}
+
+main("same-data");
+main("change-data");
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/asm.js/neuter-during-arguments-coercion.js
@@ -0,0 +1,27 @@
+load(libdir + "asm.js");
+
+function f(stdlib, foreign, buffer)
+{
+ "use asm";
+ var i32 = new stdlib.Int32Array(buffer);
+ function set(v)
+ {
+ v=v|0;
+ i32[5] = v;
+ }
+ return set;
+}
+if (isAsmJSCompilationAvailable())
+ assertEq(isAsmJSModule(f), true);
+
+var i32 = new Int32Array(4096);
+var buffer = i32.buffer;
+var set = f(this, null, buffer);
+if (isAsmJSCompilationAvailable())
+ assertEq(isAsmJSFunction(set), true);
+
+try
+{
+ set({ valueOf: function() { neuter(buffer, "same-data"); return 17; } });
+}
+catch (e) { /* if an exception thrown, swallow */ }
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/inlining/typedarray-data-inlining-neuter-samedata.js
@@ -0,0 +1,52 @@
+var NONINLINABLE_AMOUNT = 40;
+var SIZEOF_INT32 = 4;
+
+var INLINABLE_INT8_AMOUNT = 4;
+
+// Large arrays with non-inline data
+
+// Neutering and replacing data.
+var ab1 = new ArrayBuffer(NONINLINABLE_AMOUNT * SIZEOF_INT32);
+var ta1 = new Int32Array(ab1);
+for (var i = 0; i < ta1.length; i++)
+ ta1[i] = i + 43;
+function q1() { return ta1[NONINLINABLE_AMOUNT - 1]; }
+assertEq(q1(), NONINLINABLE_AMOUNT - 1 + 43);
+assertEq(q1(), NONINLINABLE_AMOUNT - 1 + 43);
+neuter(ab1, "change-data");
+assertEq(q1(), undefined);
+
+// Neutering preserving data pointer.
+var ab2 = new ArrayBuffer(NONINLINABLE_AMOUNT * SIZEOF_INT32);
+var ta2 = new Int32Array(ab2);
+for (var i = 0; i < ta2.length; i++)
+ ta2[i] = i + 77;
+function q2() { return ta2[NONINLINABLE_AMOUNT - 1]; }
+assertEq(q2(), NONINLINABLE_AMOUNT - 1 + 77);
+assertEq(q2(), NONINLINABLE_AMOUNT - 1 + 77);
+neuter(ab2, "same-data");
+assertEq(q2(), undefined);
+
+// Small arrays with inline data
+
+// Neutering and replacing data.
+var ab3 = new ArrayBuffer(INLINABLE_INT8_AMOUNT);
+var ta3 = new Int8Array(ab3);
+for (var i = 0; i < ta3.length; i++)
+ ta3[i] = i + 13;
+function q3() { return ta3[INLINABLE_INT8_AMOUNT - 1]; }
+assertEq(q3(), INLINABLE_INT8_AMOUNT - 1 + 13);
+assertEq(q3(), INLINABLE_INT8_AMOUNT - 1 + 13);
+neuter(ab3, "change-data");
+assertEq(q3(), undefined);
+
+// Neutering preserving data pointer.
+var ab4 = new ArrayBuffer(4);
+var ta4 = new Int8Array(ab4);
+for (var i = 0; i < ta4.length; i++)
+ ta4[i] = i + 17;
+function q4() { return ta4[INLINABLE_INT8_AMOUNT - 1]; }
+assertEq(q4(), INLINABLE_INT8_AMOUNT - 1 + 17);
+assertEq(q4(), INLINABLE_INT8_AMOUNT - 1 + 17);
+neuter(ab4, "same-data");
+assertEq(q4(), undefined);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/inlining/typedarray-length-inlining-neuter.js
@@ -0,0 +1,42 @@
+var INLINE_INT8_AMOUNT = 4;
+var OUT_OF_LINE_INT8_AMOUNT = 237;
+
+// Small and inline
+
+// Neutering and replacing data.
+var ab1 = new ArrayBuffer(INLINE_INT8_AMOUNT);
+var ta1 = new Int8Array(ab1);
+function q1() { return ta1.length; }
+q1();
+q1();
+neuter(ab1, "change-data");
+assertEq(q1(), 0);
+
+// Neutering preserving data pointer.
+var ab2 = new ArrayBuffer(INLINE_INT8_AMOUNT);
+var ta2 = new Int8Array(ab2);
+function q2() { return ta2.length; }
+q2();
+q2();
+neuter(ab2, "same-data");
+assertEq(q2(), 0);
+
+// Large and out-of-line
+
+// Neutering and replacing data.
+var ab3 = new ArrayBuffer(OUT_OF_LINE_INT8_AMOUNT);
+var ta3 = new Int8Array(ab3);
+function q3() { return ta3.length; }
+q3();
+q3();
+neuter(ab3, "change-data");
+assertEq(q3(), 0);
+
+// Neutering preserving data pointer.
+var ab4 = new ArrayBuffer(OUT_OF_LINE_INT8_AMOUNT);
+var ta4 = new Int8Array(ab4);
+function q4() { return ta4.length; }
+q4();
+q4();
+neuter(ab4, "same-data");
+assertEq(q4(), 0);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/typedarray-length.js
@@ -0,0 +1,25 @@
+function neuterEventually(arr, i, variant)
+{
+ with (arr)
+ {
+ // prevent inlining
+ }
+
+ if (i === 2000)
+ neuter(arr.buffer, variant);
+}
+
+function test(variant)
+{
+ var buf = new ArrayBuffer(1000);
+ var ta = new Int8Array(buf);
+
+ for (var i = 0; i < 2500; i++)
+ {
+ neuterEventually(ta, i, variant);
+ assertEq(ta.length, i >= 2000 ? 0 : 1000);
+ }
+}
+
+test("change-data");
+test("same-data");
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/typedarray-static-load.js
@@ -0,0 +1,12 @@
+var buffer = new ArrayBuffer(512 * 1024);
+var ta = new Uint8Array(buffer);
+
+function load() { return ta[0x1234]; }
+
+load();
+load();
+load();
+
+neuter(buffer, "change-data");
+
+load();
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/typedarray-static-store.js
@@ -0,0 +1,12 @@
+var buffer = new ArrayBuffer(512 * 1024);
+var ta = new Uint8Array(buffer);
+
+function store() { ta[0x1234] = 42; }
+
+store();
+store();
+store();
+
+neuter(buffer, "change-data");
+
+store();
--- a/js/src/jit/RangeAnalysis.h
+++ b/js/src/jit/RangeAnalysis.h
@@ -193,17 +193,17 @@ class Range : public TempObject {
bool canHaveFractionalPart_;
uint16_t max_exponent_;
// Any symbolic lower or upper bound computed for this term.
const SymbolicBound *symbolicLower_;
const SymbolicBound *symbolicUpper_;
- // This function simply makes several JS_ASSERTs to verify the internal
+ // This function simply makes several MOZ_ASSERTs to verify the internal
// consistency of this range.
void assertInvariants() const {
// Basic sanity :).
MOZ_ASSERT(lower_ <= upper_);
// When hasInt32LowerBound_ or hasInt32UpperBound_ are false, we set
// lower_ and upper_ to these specific values as it simplifies the
// implementation in some places.
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/TypedObject/map-neutered-midway.js
@@ -0,0 +1,43 @@
+// |reftest| skip-if(!this.hasOwnProperty("TypedObject")||!xulRuntime.shell) -- needs TypedObject, neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var BUGNUMBER = 991981;
+var summary =
+ "Behavior of mapping from an array neutered midway through mapping";
+
+function mapOneDimArrayOfUint8(dataHandling)
+{
+ var FourByteArray = TypedObject.uint8.array(4);
+ var FourByteArrayArray = FourByteArray.array(4);
+
+ var buf = new ArrayBuffer(16);
+ var arr = new FourByteArrayArray(buf);
+
+ var count = 0;
+ assertThrowsInstanceOf(function()
+ {
+ arr.map(function(v)
+ {
+ if (count++ > 0)
+ neuter(buf, dataHandling);
+ return new FourByteArray();
+ });
+ }, TypeError, "mapping of a neutered object worked?");
+}
+
+function runTests()
+{
+ print(BUGNUMBER + ": " + summary);
+
+ mapOneDimArrayOfUint8("change-data");
+ mapOneDimArrayOfUint8("same-data");
+
+ if (typeof reportCompare === "function")
+ reportCompare(true, true);
+ print("Tests complete");
+}
+
+runTests();
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/extensions/ArrayBuffer-slice-arguments-neutering.js
@@ -0,0 +1,83 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "ArrayBuffer-slice-arguments-neutering.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "ArrayBuffer.prototype.slice shouldn't misbehave horribly if " +
+ "index-argument conversion neuters the ArrayBuffer being sliced";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+function testStart(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var start =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0x800;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ ab.slice(start);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testStart("change-data");
+testStart("same-data");
+
+function testEnd(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var end =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0x1000;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ ab.slice(0x800, end);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "byteLength weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for byteLength weirdness");
+}
+testEnd("change-data");
+testEnd("same-data");
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/extensions/DataView-construct-arguments-neutering.js
@@ -0,0 +1,83 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "DataView-construct-arguments-neutering.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "new DataView(...) shouldn't misbehave horribly if index-argument " +
+ "conversion neuters the ArrayBuffer to be viewed";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+function testByteOffset(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var start =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0x800;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ new DataView(ab, start);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "byteOffset weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for byteOffset weirdness");
+}
+testByteOffset("change-data");
+testByteOffset("same-data");
+
+function testByteLength(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var len =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0x800;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ new DataView(ab, 0x800, len);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "byteLength weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for byteLength weirdness");
+}
+testByteLength("change-data");
+testByteLength("same-data");
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/extensions/DataView-set-arguments-neutering.js
@@ -0,0 +1,87 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "DataView-set-arguments-neutering.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "DataView.prototype.set* methods shouldn't misbehave horribly if " +
+ "index-argument conversion neuters the ArrayBuffer being modified";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+function testIndex(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var dv = new DataView(ab);
+
+ var start =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0xFFF;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ dv.setUint8(start, 0x42);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "should have thrown");
+ assertEq(ab.byteLength, 0, "should have been neutered correctly");
+}
+testIndex("change-data");
+testIndex("same-data");
+
+function testValue(dataType)
+{
+ var ab = new ArrayBuffer(0x100000);
+
+ var dv = new DataView(ab);
+
+ var value =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ gc();
+ return 0x42;
+ }
+ };
+
+ var ok = false;
+ try
+ {
+ dv.setUint8(0xFFFFF, value);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "should have thrown");
+ assertEq(ab.byteLength, 0, "should have been neutered correctly");
+}
+testValue("change-data");
+testValue("same-data");
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/extensions/TypedArray-set-object-funky-length-neuters.js
@@ -0,0 +1,67 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "set-object-funky-length-neuters.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "%TypedArray.set(object with funky length property, numeric offset) " +
+ "shouldn't misbehave if the funky length property neuters this typed array";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+var ctors = [Int8Array, Uint8Array, Uint8ClampedArray,
+ Int16Array, Uint16Array,
+ Int32Array, Uint32Array,
+ Float32Array, Float64Array];
+ctors.forEach(function(TypedArray) {
+ ["change-data", "same-data"].forEach(function(dataHandling) {
+ var buf = new ArrayBuffer(512 * 1024);
+ var ta = new TypedArray(buf);
+
+ var arraylike =
+ {
+ 0: 17,
+ 1: 42,
+ 2: 3,
+ 3: 99,
+ 4: 37,
+ 5: 9,
+ 6: 72,
+ 7: 31,
+ 8: 22,
+ 9: 0,
+ get length()
+ {
+ neuter(buf, dataHandling);
+ return 10;
+ }
+ };
+
+ var passed = false;
+ try
+ {
+ ta.set(arraylike, 0x1234);
+ }
+ catch (e)
+ {
+ passed = true;
+ }
+
+ assertEq(passed, true);
+ });
+});
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
new file mode 100644
--- /dev/null
+++ b/js/src/tests/ecma_6/extensions/TypedArray-subarray-arguments-neutering.js
@@ -0,0 +1,115 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "TypedArray-subarray-arguments-neutering.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "%TypedArray.prototype.subarray shouldn't misbehave horribly if " +
+ "index-argument conversion neuters the underlying ArrayBuffer";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+function testBegin(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var begin =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0x800;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.subarray(begin);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testBegin("change-data");
+testBegin("same-data");
+
+function testBeginWithEnd(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var begin =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0x800;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.subarray(begin, 0x1000);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testBeginWithEnd("change-data");
+testBeginWithEnd("same-data");
+
+function testEnd(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var end =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0x1000;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.subarray(0x800, end);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testEnd("change-data");
+testEnd("same-data");
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
new file mode 100644
--- /dev/null
+++ b/js/src/tests/js1_8_5/extensions/typedarray-copyWithin-arguments-neutering.js
@@ -0,0 +1,115 @@
+// |reftest| skip-if(!xulRuntime.shell) -- needs neuter()
+/*
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ */
+
+var gTestfile = "typedarray-copyWithin-arguments-neutering.js";
+//-----------------------------------------------------------------------------
+var BUGNUMBER = 991981;
+var summary =
+ "%TypedArray.prototype.copyWithin shouldn't misbehave horribly if " +
+ "index-argument conversion neuters the underlying ArrayBuffer";
+
+print(BUGNUMBER + ": " + summary);
+
+/**************
+ * BEGIN TEST *
+ **************/
+
+function testBegin(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var begin =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0x800;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.copyWithin(0, begin, 0x1000);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testBegin("change-data");
+testBegin("same-data");
+
+function testEnd(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var end =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0x1000;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.copyWithin(0, 0x800, end);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testEnd("change-data");
+testEnd("same-data");
+
+function testDest(dataType)
+{
+ var ab = new ArrayBuffer(0x1000);
+
+ var dest =
+ {
+ valueOf: function()
+ {
+ neuter(ab, dataType);
+ return 0;
+ }
+ };
+
+ var ta = new Uint8Array(ab);
+
+ var ok = false;
+ try
+ {
+ ta.copyWithin(dest, 0x800, 0x1000);
+ }
+ catch (e)
+ {
+ ok = true;
+ }
+ assertEq(ok, true, "start weirdness should have thrown");
+ assertEq(ab.byteLength, 0, "neutering should work for start weirdness");
+}
+testDest("change-data");
+testDest("same-data");
+
+/******************************************************************************/
+
+if (typeof reportCompare === "function")
+ reportCompare(true, true);
+
+print("Tests complete");
--- a/js/src/vm/Runtime.cpp
+++ b/js/src/vm/Runtime.cpp
@@ -394,17 +394,17 @@ JSRuntime::~JSRuntime()
/*
* Even though all objects in the compartment are dead, we may have keep
* some filenames around because of gcKeepAtoms.
*/
FreeScriptData(this);
#ifdef DEBUG
- /* Don't hurt everyone in leaky ol' Mozilla with a fatal JS_ASSERT! */
+ /* Don't hurt everyone in leaky ol' Mozilla with a fatal MOZ_ASSERT! */
if (hasContexts()) {
unsigned cxcount = 0;
for (ContextIter acx(this); !acx.done(); acx.next()) {
fprintf(stderr,
"JS API usage error: found live context at %p\n",
(void *) acx.get());
cxcount++;
}
deleted file mode 100644
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-a-bitrate.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[mediasource-config-change-webm-a-bitrate.html]
- type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
deleted file mode 100644
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-av-audio-bitrate.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[mediasource-config-change-webm-av-audio-bitrate.html]
- type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-av-framesize.html.ini
+++ b/testing/web-platform/meta/media-source/mediasource-config-change-webm-av-framesize.html.ini
@@ -1,3 +1,5 @@
[mediasource-config-change-webm-av-framesize.html]
type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
+ [Tests webm frame size changes in multiplexed content.]
+ expected: FAIL
+
deleted file mode 100644
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-av-video-bitrate.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[mediasource-config-change-webm-av-video-bitrate.html]
- type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-bitrate.html.ini
+++ b/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-bitrate.html.ini
@@ -1,3 +1,5 @@
[mediasource-config-change-webm-v-bitrate.html]
type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
+ [Tests webm video-only bitrate changes.]
+ expected: FAIL
+
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-framerate.html.ini
+++ b/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-framerate.html.ini
@@ -1,3 +1,5 @@
[mediasource-config-change-webm-v-framerate.html]
type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
+ [Tests webm video-only frame rate changes.]
+ expected: FAIL
+
--- a/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-framesize.html.ini
+++ b/testing/web-platform/meta/media-source/mediasource-config-change-webm-v-framesize.html.ini
@@ -1,3 +1,5 @@
[mediasource-config-change-webm-v-framesize.html]
type: testharness
- disabled: https://bugzilla.mozilla.org/show_bug.cgi?id=1066467
+ [Tests webm video-only frame size changes.]
+ expected: FAIL
+