author | Birunthan Mohanathas <birunthan@mohanathas.com> |
Wed, 23 Oct 2013 16:34:10 -0400 | |
changeset 151899 | b8c97df0418de30a4f189a65793898cc0393b967 |
parent 151898 | 242bb227928364796b91ede50e3ae8d6d1bf56af |
child 151900 | 3656e6195ed26ec8d0a4697ca2eb90254a3a1000 |
push id | 25512 |
push user | cbook@mozilla.com |
push date | Thu, 24 Oct 2013 05:06:01 +0000 |
treeherder | autoland@19fd3388c372 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | ehsan |
bugs | 784739 |
milestone | 27.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/content/canvas/src/ImageEncoder.h +++ b/content/canvas/src/ImageEncoder.h @@ -73,18 +73,18 @@ private: int32_t aFormat, const nsIntSize aSize, nsICanvasRenderingContextInternal* aContext, nsIInputStream** aStream, imgIEncoder* aEncoder); // Creates and returns an encoder instance of the type specified in aType. // aType may change to "image/png" if no instance of the original type could - // be created and we had to fall back to a PNG encoder. A return value of - // NULL should be interpreted as NS_IMAGELIB_ERROR_NO_ENCODER and aType is + // be created and we had to fall back to a PNG encoder. A null return value + // should be interpreted as NS_IMAGELIB_ERROR_NO_ENCODER and aType is // undefined in this case. static already_AddRefed<imgIEncoder> GetImageEncoder(nsAString& aType); friend class EncodingRunnable; }; } // namespace dom } // namespace mozilla
--- a/content/media/DecoderTraits.cpp +++ b/content/media/DecoderTraits.cpp @@ -515,17 +515,18 @@ DecoderTraits::CreateDecoder(const nsACS } #endif #ifdef MOZ_RTSP if (IsRtspSupportedType(aType)) { decoder = new RtspOmxDecoder(); } #endif #ifdef MOZ_MEDIA_PLUGINS - if (MediaDecoder::IsMediaPluginsEnabled() && GetMediaPluginHost()->FindDecoder(aType, NULL)) { + if (MediaDecoder::IsMediaPluginsEnabled() && + GetMediaPluginHost()->FindDecoder(aType, nullptr)) { decoder = new MediaPluginDecoder(aType); } #endif #ifdef MOZ_WEBM if (IsWebMType(aType)) { decoder = new WebMDecoder(); } #endif
--- a/content/media/MediaDecoderReader.h +++ b/content/media/MediaDecoderReader.h @@ -161,24 +161,24 @@ public: uint32_t mStride; uint32_t mOffset; uint32_t mSkip; }; Plane mPlanes[3]; }; - // Constructs a VideoData object. If aImage is NULL, creates a new Image - // holding a copy of the YCbCr data passed in aBuffer. If aImage is not NULL, - // it's stored as the underlying video image and aBuffer is assumed to point - // to memory within aImage so no copy is made. aTimecode is a codec specific - // number representing the timestamp of the frame of video data. Returns - // nsnull if an error occurs. This may indicate that memory couldn't be - // allocated to create the VideoData object, or it may indicate some problem - // with the input data (e.g. negative stride). + // Constructs a VideoData object. If aImage is nullptr, creates a new Image + // holding a copy of the YCbCr data passed in aBuffer. If aImage is not + // nullptr, it's stored as the underlying video image and aBuffer is assumed + // to point to memory within aImage so no copy is made. aTimecode is a codec + // specific number representing the timestamp of the frame of video data. + // Returns nsnull if an error occurs. This may indicate that memory couldn't + // be allocated to create the VideoData object, or it may indicate some + // problem with the input data (e.g. negative stride). static VideoData* Create(VideoInfo& aInfo, ImageContainer* aContainer, Image* aImage, int64_t aOffset, int64_t aTime, int64_t aEndTime, const YCbCrBuffer &aBuffer, bool aKeyframe,
--- a/content/media/directshow/AudioSinkFilter.cpp +++ b/content/media/directshow/AudioSinkFilter.cpp @@ -9,17 +9,17 @@ #include "AudioSinkInputPin.h" #include "VideoUtils.h" #include "prlog.h" #include <initguid.h> #include <wmsdkidl.h> -#define DELETE_RESET(p) { delete (p) ; (p) = NULL ;} +#define DELETE_RESET(p) { delete (p) ; (p) = nullptr ;} DEFINE_GUID(CLSID_MozAudioSinkFilter, 0x1872d8c8, 0xea8d, 0x4c34, 0xae, 0x96, 0x69, 0xde, 0xf1, 0x33, 0x7b, 0x33); using namespace mozilla::media; namespace mozilla {
--- a/content/media/directshow/DirectShowReader.cpp +++ b/content/media/directshow/DirectShowReader.cpp @@ -78,17 +78,17 @@ DirectShowReader::ReadMetadata(MediaInfo { MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread."); HRESULT hr; nsresult rv; // Create the filter graph, reference it by the GraphBuilder interface, // to make graph building more convenient. hr = CoCreateInstance(CLSID_FilterGraph, - NULL, + nullptr, CLSCTX_INPROC_SERVER, IID_IGraphBuilder, reinterpret_cast<void**>(static_cast<IGraphBuilder**>(byRef(mGraph)))); NS_ENSURE_TRUE(SUCCEEDED(hr) && mGraph, NS_ERROR_FAILURE); #ifdef DEBUG // Add the graph to the Running Object Table so that we can connect // to this graph with GraphEdit/GraphStudio. Note: on Vista and up you must @@ -212,17 +212,17 @@ DirectShowReader::Finish(HRESULT aStatus { MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread."); LOG("DirectShowReader::Finish(0x%x)", aStatus); // Notify the filter graph of end of stream. RefPtr<IMediaEventSink> eventSink; HRESULT hr = mGraph->QueryInterface(static_cast<IMediaEventSink**>(byRef(eventSink))); if (SUCCEEDED(hr) && eventSink) { - eventSink->Notify(EC_COMPLETE, aStatus, NULL); + eventSink->Notify(EC_COMPLETE, aStatus, 0); } return false; } bool DirectShowReader::DecodeAudioData() { MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread.");
--- a/content/media/directshow/DirectShowUtils.cpp +++ b/content/media/directshow/DirectShowUtils.cpp @@ -158,17 +158,17 @@ GetGraphNotifyString(long evCode) CASE(EC_WMT_INDEX_EVENT); // Sent when an application uses the WM ASF Writer to index Windows Media Video files. CASE(S_OK); // Success. CASE(VFW_S_AUDIO_NOT_RENDERED); // Partial success; the audio was not rendered. CASE(VFW_S_DUPLICATE_NAME); // Success; the Filter Graph Manager modified a filter name to avoid duplication. CASE(VFW_S_PARTIAL_RENDER); // Partial success; some of the streams in this movie are in an unsupported format. CASE(VFW_S_VIDEO_NOT_RENDERED); // Partial success; the video was not rendered. CASE(E_ABORT); // Operation aborted. CASE(E_OUTOFMEMORY); // Insufficient memory. - CASE(E_POINTER); // NULL pointer argument. + CASE(E_POINTER); // Null pointer argument. CASE(VFW_E_CANNOT_CONNECT); // No combination of intermediate filters could be found to make the connection. CASE(VFW_E_CANNOT_RENDER); // No combination of filters could be found to render the stream. CASE(VFW_E_NO_ACCEPTABLE_TYPES); // There is no common media type between these pins. CASE(VFW_E_NOT_IN_GRAPH); default: return "Unknown Code"; }; @@ -182,17 +182,17 @@ CreateAndAddFilter(IGraphBuilder* aGraph IBaseFilter **aOutFilter) { NS_ENSURE_TRUE(aGraph, E_POINTER); NS_ENSURE_TRUE(aOutFilter, E_POINTER); HRESULT hr; nsRefPtr<IBaseFilter> filter; hr = CoCreateInstance(aFilterClsId, - NULL, + nullptr, CLSCTX_INPROC_SERVER, IID_IBaseFilter, getter_AddRefs(filter)); if (FAILED(hr)) { // Object probably not available on this system. return hr; } @@ -210,17 +210,17 @@ AddMP3DMOWrapperFilter(IGraphBuilder* aG { NS_ENSURE_TRUE(aGraph, E_POINTER); NS_ENSURE_TRUE(aOutFilter, E_POINTER); HRESULT hr; // Create the wrapper filter. nsRefPtr<IBaseFilter> filter; hr = CoCreateInstance(CLSID_DMOWrapperFilter, - NULL, + nullptr, CLSCTX_INPROC_SERVER, IID_IBaseFilter, getter_AddRefs(filter)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Query for IDMOWrapperFilter. nsRefPtr<IDMOWrapperFilter> dmoWrapper; hr = filter->QueryInterface(IID_IDMOWrapperFilter, @@ -276,17 +276,17 @@ GetUnconnectedPin(IBaseFilter* aFilter, { RefPtr<IEnumPins> enumPins; HRESULT hr = aFilter->EnumPins(byRef(enumPins)); NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr); // Test each pin to see if it matches the direction we're looking for. RefPtr<IPin> pin; - while (S_OK == enumPins->Next(1, byRef(pin), NULL)) { + while (S_OK == enumPins->Next(1, byRef(pin), nullptr)) { bool matches = FALSE; if (SUCCEEDED(MatchUnconnectedPin(pin, aPinDir, &matches)) && matches) { return pin; } } return nullptr;
--- a/content/media/directshow/DirectShowUtils.h +++ b/content/media/directshow/DirectShowUtils.h @@ -17,17 +17,17 @@ namespace mozilla { // Java-style "monitor". class Signal { public: Signal(CriticalSection* aLock) : mLock(aLock) { CriticalSectionAutoEnter lock(*mLock); - mEvent = CreateEvent(NULL, FALSE, FALSE, NULL); + mEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr); } ~Signal() { CriticalSectionAutoEnter lock(*mLock); CloseHandle(mEvent); } // Lock must be held.
--- a/content/media/directshow/SourceFilter.cpp +++ b/content/media/directshow/SourceFilter.cpp @@ -380,17 +380,17 @@ OutputPin::RequestAllocator(IMemAllocato // Just create a default allocator. It's highly unlikely that we'll use // this anyway, as most parsers insist on using their own allocators. nsRefPtr<IMemAllocator> allocator; hr = CoCreateInstance(CLSID_MemoryAllocator, 0, CLSCTX_INPROC_SERVER, IID_IMemAllocator, getter_AddRefs(allocator)); - if(FAILED(hr) || (allocator == NULL)) { + if(FAILED(hr) || (allocator == nullptr)) { NS_WARNING("Can't create our own DirectShow allocator."); return hr; } // See if we can make it suitable hr = allocator->SetProperties(&props, &actualProps); if (SUCCEEDED(hr)) { // We need to release our refcount on pAlloc, and addref @@ -446,17 +446,17 @@ OutputPin::Request(IMediaSample* aSample STDMETHODIMP OutputPin::WaitForNext(DWORD aTimeout, IMediaSample** aOutSample, DWORD_PTR* aOutDwUser) { NS_ASSERTION(aTimeout == 0 || aTimeout == INFINITE, "Oops, we don't handle this!"); - *aOutSample = NULL; + *aOutSample = nullptr; *aOutDwUser = 0; LONGLONG offset = 0; LONG count = 0; BYTE* buf = nullptr; { CriticalSectionAutoEnter lock(*mLock); @@ -652,17 +652,17 @@ SourceFilter::~SourceFilter() BasePin* SourceFilter::GetPin(int n) { if (n == 0) { NS_ASSERTION(mOutputPin != 0, "GetPin with no pin!"); return static_cast<BasePin*>(mOutputPin); } else { - return NULL; + return nullptr; } } // Get's the media type we're supplying. const MediaType* SourceFilter::GetMediaType() const { return &mMediaType;
--- a/content/media/gstreamer/GStreamerFormatHelper.cpp +++ b/content/media/gstreamer/GStreamerFormatHelper.cpp @@ -138,17 +138,17 @@ GetDefaultCapsFromMIMEType(const char *a bool GStreamerFormatHelper::CanHandleMediaType(const nsACString& aMIMEType, const nsAString* aCodecs) { if (!sLoadOK) { return false; } const char *type; - NS_CStringGetData(aMIMEType, &type, NULL); + NS_CStringGetData(aMIMEType, &type, nullptr); GstCaps *caps; if (aCodecs && !aCodecs->IsEmpty()) { caps = ConvertFormatsToCaps(type, aCodecs); } else { // Get a minimal set of codec caps for this MIME type we should support so // that we don't overreport MIME types we are able to play. caps = GetDefaultCapsFromMIMEType(type);
--- a/content/media/gstreamer/GStreamerLoader.cpp +++ b/content/media/gstreamer/GStreamerLoader.cpp @@ -39,17 +39,17 @@ load_gstreamer() return true; #endif static bool loaded = false; if (loaded) { return true; } - void *gstreamerLib = NULL; + void *gstreamerLib = nullptr; guint major = 0; guint minor = 0; guint micro, nano; typedef typeof(::gst_version) VersionFuncType; if (VersionFuncType *versionFunc = (VersionFuncType*)dlsym(RTLD_DEFAULT, "gst_version")) { versionFunc(&major, &minor, µ, &nano); }
--- a/content/media/gstreamer/GStreamerMozVideoBuffer.cpp +++ b/content/media/gstreamer/GStreamerMozVideoBuffer.cpp @@ -44,17 +44,17 @@ gst_moz_video_buffer_finalize(GstMozVide GST_MINI_OBJECT_CLASS(gst_moz_video_buffer_parent_class)->finalize(GST_MINI_OBJECT(self)); } static GstMozVideoBuffer* gst_moz_video_buffer_copy(GstMozVideoBuffer* self) { GstMozVideoBuffer* copy; - g_return_val_if_fail(GST_IS_MOZ_VIDEO_BUFFER(self), NULL); + g_return_val_if_fail(GST_IS_MOZ_VIDEO_BUFFER(self), nullptr); copy = gst_moz_video_buffer_new(); /* we simply copy everything from our parent */ GST_BUFFER_DATA(GST_BUFFER_CAST(copy)) = (guint8*)g_memdup(GST_BUFFER_DATA(GST_BUFFER_CAST(self)), GST_BUFFER_SIZE(GST_BUFFER_CAST(self))); /* make sure it gets freed(even if the parent is subclassed, we return a @@ -90,17 +90,17 @@ gst_moz_video_buffer_set_data(GstMozVide g_return_if_fail(GST_IS_MOZ_VIDEO_BUFFER(self)); self->data = data; } GstMozVideoBufferData* gst_moz_video_buffer_get_data(const GstMozVideoBuffer* self) { - g_return_val_if_fail(GST_IS_MOZ_VIDEO_BUFFER(self), NULL); + g_return_val_if_fail(GST_IS_MOZ_VIDEO_BUFFER(self), nullptr); return self->data; } GType gst_moz_video_buffer_data_get_type(void) { static volatile gsize g_define_type_id__volatile = 0;
--- a/content/media/gstreamer/GStreamerReader.cpp +++ b/content/media/gstreamer/GStreamerReader.cpp @@ -194,17 +194,17 @@ GStreamerReader::Error(GstBus *aBus, Gst void GStreamerReader::PlayBinSourceSetupCb(GstElement* aPlayBin, GParamSpec* pspec, gpointer aUserData) { GstElement *source; GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData); - g_object_get(aPlayBin, "source", &source, NULL); + g_object_get(aPlayBin, "source", &source, nullptr); reader->PlayBinSourceSetup(GST_APP_SRC(source)); } void GStreamerReader::PlayBinSourceSetup(GstAppSrc* aSource) { mSource = GST_APP_SRC(aSource); gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, nullptr); MediaResource* resource = mDecoder->GetResource();
--- a/content/media/ogg/OggCodecState.cpp +++ b/content/media/ogg/OggCodecState.cpp @@ -304,17 +304,17 @@ bool TheoraState::Init() { // maximum, or zero sized. nsIntSize frame(mInfo.frame_width, mInfo.frame_height); nsIntRect picture(mInfo.pic_x, mInfo.pic_y, mInfo.pic_width, mInfo.pic_height); if (!VideoInfo::ValidateVideoRegion(frame, picture, frame)) { return mActive = false; } mCtx = th_decode_alloc(&mInfo, mSetup); - if (mCtx == NULL) { + if (mCtx == nullptr) { return mActive = false; } return true; } bool TheoraState::DecodeHeader(ogg_packet* aPacket) @@ -833,31 +833,31 @@ OpusState::OpusState(ogg_page* aBosPage) #ifdef MOZ_SAMPLE_TYPE_FLOAT32 mGain(1.0f), #else mGain_Q16(65536), #endif mChannelMapping(0), mStreams(0), mCoupledStreams(0), - mDecoder(NULL), + mDecoder(nullptr), mSkip(0), mPrevPacketGranulepos(0), mPrevPageGranulepos(0) { MOZ_COUNT_CTOR(OpusState); } OpusState::~OpusState() { MOZ_COUNT_DTOR(OpusState); Reset(); if (mDecoder) { opus_multistream_decoder_destroy(mDecoder); - mDecoder = NULL; + mDecoder = nullptr; } } nsresult OpusState::Reset() { return Reset(false); } @@ -888,17 +888,17 @@ nsresult OpusState::Reset(bool aStart) bool OpusState::Init(void) { if (!mActive) return false; int error; - NS_ASSERTION(mDecoder == NULL, "leaking OpusDecoder"); + NS_ASSERTION(mDecoder == nullptr, "leaking OpusDecoder"); mDecoder = opus_multistream_decoder_create(mRate, mChannels, mStreams, mCoupledStreams, mMappingTable, &error);
--- a/content/media/ogg/OggReader.cpp +++ b/content/media/ogg/OggReader.cpp @@ -308,17 +308,17 @@ nsresult OggReader::ReadMetadata(MediaIn } if (mVorbisState && ReadHeaders(mVorbisState)) { mInfo.mAudio.mHasAudio = true; mInfo.mAudio.mRate = mVorbisState->mInfo.rate; mInfo.mAudio.mChannels = mVorbisState->mInfo.channels > 2 ? 2 : mVorbisState->mInfo.channels; // Copy Vorbis info data for time computations on other threads. memcpy(&mVorbisInfo, &mVorbisState->mInfo, sizeof(mVorbisInfo)); - mVorbisInfo.codec_setup = NULL; + mVorbisInfo.codec_setup = nullptr; mVorbisSerial = mVorbisState->mSerial; *aTags = mVorbisState->GetTags(); } else { memset(&mVorbisInfo, 0, sizeof(mVorbisInfo)); } #ifdef MOZ_OPUS if (mOpusState && ReadHeaders(mOpusState)) { mInfo.mAudio.mHasAudio = true;
--- a/content/media/omx/OMXCodecProxy.cpp +++ b/content/media/omx/OMXCodecProxy.cpp @@ -30,17 +30,17 @@ sp<OMXCodecProxy> OMXCodecProxy::Create( const char *matchComponentName, uint32_t flags, const sp<ANativeWindow> &nativeWindow) { sp<OMXCodecProxy> proxy; const char *mime; if (!meta->findCString(kKeyMIMEType, &mime)) { - return NULL; + return nullptr; } if (!strncasecmp(mime, "video/", 6)) { proxy = new OMXCodecProxy(omx, meta, createEncoder, source, matchComponentName, flags, nativeWindow); } return proxy; } @@ -52,64 +52,64 @@ OMXCodecProxy::OMXCodecProxy( const sp<MediaSource> &source, const char *matchComponentName, uint32_t flags, const sp<ANativeWindow> &nativeWindow) : mOMX(omx), mSrcMeta(meta), mIsEncoder(createEncoder), mSource(source), - mComponentName(NULL), + mComponentName(nullptr), mFlags(flags), mNativeWindow(nativeWindow), mState(MediaResourceManagerClient::CLIENT_STATE_WAIT_FOR_RESOURCE) { } OMXCodecProxy::~OMXCodecProxy() { mState = MediaResourceManagerClient::CLIENT_STATE_SHUTDOWN; if (mOMXCodec.get()) { wp<MediaSource> tmp = mOMXCodec; mOMXCodec.clear(); - while (tmp.promote() != NULL) { + while (tmp.promote() != nullptr) { // this value come from stagefrigh's AwesomePlayer. usleep(1000); } } // Complete all pending Binder ipc transactions IPCThreadState::self()->flushCommands(); if (mManagerService.get() && mClient.get()) { mManagerService->cancelClient(mClient); } mSource.clear(); free(mComponentName); - mComponentName = NULL; + mComponentName = nullptr; } MediaResourceManagerClient::State OMXCodecProxy::getState() { Mutex::Autolock autoLock(mLock); return mState; } void OMXCodecProxy::setEventListener(const wp<OMXCodecProxy::EventListener>& listener) { Mutex::Autolock autoLock(mLock); mEventListener = listener; } void OMXCodecProxy::notifyStatusChangedLocked() { - if (mEventListener != NULL) { + if (mEventListener != nullptr) { sp<EventListener> listener = mEventListener.promote(); - if (listener != NULL) { + if (listener != nullptr) { listener->statusChanged(); } } } void OMXCodecProxy::requestResource() { Mutex::Autolock autoLock(mLock); @@ -117,17 +117,17 @@ void OMXCodecProxy::requestResource() if (mClient.get()) { return; } sp<MediaResourceManagerClient::EventListener> listener = this; mClient = new MediaResourceManagerClient(listener); mManagerService = mClient->getMediaResourceManagerService(); if (!mManagerService.get()) { - mClient = NULL; + mClient = nullptr; return; } mManagerService->requestMediaResource(mClient, MediaResourceManagerClient::HW_VIDEO_DECODER); } bool OMXCodecProxy::IsWaitingResources() { @@ -154,17 +154,17 @@ void OMXCodecProxy::statusChanged(int ev mState = MediaResourceManagerClient::CLIENT_STATE_SHUTDOWN; notifyStatusChangedLocked(); return; } if (!strncasecmp(mime, "video/", 6)) { sp<MediaSource> codec; mOMXCodec = OMXCodec::Create(mOMX, mSrcMeta, mIsEncoder, mSource, mComponentName, mFlags, mNativeWindow); - if (mOMXCodec == NULL) { + if (mOMXCodec == nullptr) { mState = MediaResourceManagerClient::CLIENT_STATE_SHUTDOWN; notifyStatusChangedLocked(); return; } // Check if this video is sized such that we're comfortable // possibly using an OMX decoder. int32_t maxWidth, maxHeight; char propValue[PROPERTY_VALUE_MAX]; @@ -199,58 +199,58 @@ void OMXCodecProxy::statusChanged(int ev status_t OMXCodecProxy::start(MetaData *params) { Mutex::Autolock autoLock(mLock); if (mState != MediaResourceManagerClient::CLIENT_STATE_RESOURCE_ASSIGNED) { return NO_INIT; } - CHECK(mOMXCodec.get() != NULL); + CHECK(mOMXCodec.get() != nullptr); return mOMXCodec->start(); } status_t OMXCodecProxy::stop() { Mutex::Autolock autoLock(mLock); if (mState != MediaResourceManagerClient::CLIENT_STATE_RESOURCE_ASSIGNED) { return NO_INIT; } - CHECK(mOMXCodec.get() != NULL); + CHECK(mOMXCodec.get() != nullptr); return mOMXCodec->stop(); } sp<MetaData> OMXCodecProxy::getFormat() { Mutex::Autolock autoLock(mLock); if (mState != MediaResourceManagerClient::CLIENT_STATE_RESOURCE_ASSIGNED) { sp<MetaData> meta = new MetaData; return meta; } - CHECK(mOMXCodec.get() != NULL); + CHECK(mOMXCodec.get() != nullptr); return mOMXCodec->getFormat(); } status_t OMXCodecProxy::read(MediaBuffer **buffer, const ReadOptions *options) { Mutex::Autolock autoLock(mLock); if (mState != MediaResourceManagerClient::CLIENT_STATE_RESOURCE_ASSIGNED) { return NO_INIT; } - CHECK(mOMXCodec.get() != NULL); + CHECK(mOMXCodec.get() != nullptr); return mOMXCodec->read(buffer, options); } status_t OMXCodecProxy::pause() { Mutex::Autolock autoLock(mLock); if (mState != MediaResourceManagerClient::CLIENT_STATE_RESOURCE_ASSIGNED) { return NO_INIT; } - CHECK(mOMXCodec.get() != NULL); + CHECK(mOMXCodec.get() != nullptr); return mOMXCodec->pause(); } } // namespace android
--- a/content/media/omx/OMXCodecProxy.h +++ b/content/media/omx/OMXCodecProxy.h @@ -27,38 +27,38 @@ public: struct EventListener : public virtual RefBase { virtual void statusChanged() = 0; }; static sp<OMXCodecProxy> Create( const sp<IOMX> &omx, const sp<MetaData> &meta, bool createEncoder, const sp<MediaSource> &source, - const char *matchComponentName = NULL, + const char *matchComponentName = nullptr, uint32_t flags = 0, - const sp<ANativeWindow> &nativeWindow = NULL); + const sp<ANativeWindow> &nativeWindow = nullptr); MediaResourceManagerClient::State getState(); void setEventListener(const wp<EventListener>& listener); void requestResource(); bool IsWaitingResources(); // MediaResourceManagerClient::EventListener virtual void statusChanged(int event); // MediaSource - virtual status_t start(MetaData *params = NULL); + virtual status_t start(MetaData *params = nullptr); virtual status_t stop(); virtual sp<MetaData> getFormat(); virtual status_t read( - MediaBuffer **buffer, const ReadOptions *options = NULL); + MediaBuffer **buffer, const ReadOptions *options = nullptr); virtual status_t pause(); protected: OMXCodecProxy( const sp<IOMX> &omx, const sp<MetaData> &meta, bool createEncoder,
--- a/content/media/plugins/MediaPluginHost.cpp +++ b/content/media/plugins/MediaPluginHost.cpp @@ -29,17 +29,17 @@ #define ALOG(args...) __android_log_print(ANDROID_LOG_INFO, "MediaPluginHost" , ## args) #else #define ALOG(args...) /* do nothing */ #endif using namespace MPAPI; Decoder::Decoder() : - mResource(NULL), mPrivate(NULL) + mResource(nullptr), mPrivate(nullptr) { } namespace mozilla { static char* GetResource(Decoder *aDecoder) { return static_cast<char*>(aDecoder->mResource); @@ -194,17 +194,17 @@ MediaPluginHost::MediaPluginHost() { MOZ_COUNT_CTOR(MediaPluginHost); mResourceServer = MediaResourceServer::Start(); const char* name = GetOmxLibraryName(); ALOG("Loading OMX Plugin: %s", name ? name : "nullptr"); if (name) { char *path = PR_GetLibraryFilePathname("libxul.so", (PRFuncPtr) GetOmxLibraryName); - PRLibrary *lib = NULL; + PRLibrary *lib = nullptr; if (path) { nsAutoCString libpath(path); PR_Free(path); int32_t slash = libpath.RFindChar('/'); if (slash != kNotFound) { libpath.Truncate(slash + 1); libpath.Append(name); lib = PR_LoadLibrary(libpath.get());
--- a/content/media/plugins/MediaPluginReader.cpp +++ b/content/media/plugins/MediaPluginReader.cpp @@ -17,22 +17,22 @@ namespace mozilla { typedef mozilla::layers::Image Image; MediaPluginReader::MediaPluginReader(AbstractMediaDecoder *aDecoder, const nsACString& aContentType) : MediaDecoderReader(aDecoder), mType(aContentType), - mPlugin(NULL), + mPlugin(nullptr), mHasAudio(false), mHasVideo(false), mVideoSeekTimeUs(-1), mAudioSeekTimeUs(-1), - mLastVideoFrame(NULL) + mLastVideoFrame(nullptr) { } MediaPluginReader::~MediaPluginReader() { ResetDecode(); } @@ -100,38 +100,38 @@ nsresult MediaPluginReader::ReadMetadata return NS_OK; } // Resets all state related to decoding, emptying all buffers etc. nsresult MediaPluginReader::ResetDecode() { if (mLastVideoFrame) { delete mLastVideoFrame; - mLastVideoFrame = NULL; + mLastVideoFrame = nullptr; } if (mPlugin) { GetMediaPluginHost()->DestroyDecoder(mPlugin); - mPlugin = NULL; + mPlugin = nullptr; } return NS_OK; } bool MediaPluginReader::DecodeVideoFrame(bool &aKeyframeSkip, int64_t aTimeThreshold) { // Record number of frames decoded and parsed. Automatically update the // stats counters using the AutoNotifyDecoded stack-based class. uint32_t parsed = 0, decoded = 0; AbstractMediaDecoder::AutoNotifyDecoded autoNotify(mDecoder, parsed, decoded); // Throw away the currently buffered frame if we are seeking. if (mLastVideoFrame && mVideoSeekTimeUs != -1) { delete mLastVideoFrame; - mLastVideoFrame = NULL; + mLastVideoFrame = nullptr; } ImageBufferCallback bufferCallback(mDecoder->GetImageContainer()); nsRefPtr<Image> currentImage; // Read next frame while (true) { MPAPI::VideoFrame frame; @@ -141,17 +141,17 @@ bool MediaPluginReader::DecodeVideoFrame // of the video as the end time. if (mLastVideoFrame) { int64_t durationUs; mPlugin->GetDuration(mPlugin, &durationUs); mLastVideoFrame->mEndTime = (durationUs > mLastVideoFrame->mTime) ? durationUs : mLastVideoFrame->mTime; mVideoQueue.Push(mLastVideoFrame); - mLastVideoFrame = NULL; + mLastVideoFrame = nullptr; } return false; } mVideoSeekTimeUs = -1; if (aKeyframeSkip) { // Disable keyframe skipping for now as // stagefright doesn't seem to be telling us @@ -261,17 +261,17 @@ bool MediaPluginReader::DecodeVideoFrame mLastVideoFrame->mEndTime = v->mTime; // We have the start time of the next frame, so we can push the previous // frame into the queue, except if the end time is below the threshold, // in which case it wouldn't be displayed anyway. if (mLastVideoFrame->mEndTime < aTimeThreshold) { delete mLastVideoFrame; - mLastVideoFrame = NULL; + mLastVideoFrame = nullptr; continue; } mVideoQueue.Push(mLastVideoFrame); // Buffer the current frame we just decoded. mLastVideoFrame = v;
--- a/content/media/plugins/MediaResourceServer.cpp +++ b/content/media/plugins/MediaResourceServer.cpp @@ -220,17 +220,17 @@ ServeResourceEvent::Run() { // Range: bytes=nnnn- // Were 'nnnn' is an integer number. // The end of the range is not checked, instead we return up to // the end of the resource and the client is informed of this via // the content-range header. NS_NAMED_LITERAL_CSTRING(byteRange, "Range: bytes="); const char* s = strstr(line.get(), byteRange.get()); if (s) { - start = strtoll(s+byteRange.Length(), NULL, 10); + start = strtoll(s+byteRange.Length(), nullptr, 10); // Clamp 'start' to be between 0 and the resource length. start = std::max(0ll, std::min(resource->GetLength(), start)); } } // HTTP response to use if this is a non byte range request const char* response_normal = "HTTP/1.1 200 OK\r\n";
--- a/content/media/webaudio/blink/HRTFDatabaseLoader.cpp +++ b/content/media/webaudio/blink/HRTFDatabaseLoader.cpp @@ -193,17 +193,17 @@ HRTFDatabaseLoader::shutdownEnumFunc(Loa entry->mLoader->waitForLoaderThreadCompletion(); return PLDHashOperator::PL_DHASH_NEXT; } void HRTFDatabaseLoader::shutdown() { MOZ_ASSERT(NS_IsMainThread()); if (s_loaderMap) { - // Set s_loaderMap to NULL so that the hashtable is not modified on + // Set s_loaderMap to nullptr so that the hashtable is not modified on // reference release during enumeration. nsTHashtable<LoaderByRateEntry>* loaderMap = s_loaderMap; s_loaderMap = nullptr; loaderMap->EnumerateEntries(shutdownEnumFunc, nullptr); delete loaderMap; } } } // namespace WebCore
--- a/content/media/webm/WebMReader.cpp +++ b/content/media/webm/WebMReader.cpp @@ -194,17 +194,17 @@ WebMReader::~WebMReader() vorbis_info_clear(&mVorbisInfo); vorbis_comment_clear(&mVorbisComment); MOZ_COUNT_DTOR(WebMReader); } nsresult WebMReader::Init(MediaDecoderReader* aCloneDonor) { - if (vpx_codec_dec_init(&mVP8, vpx_codec_vp8_dx(), NULL, 0)) { + if (vpx_codec_dec_init(&mVP8, vpx_codec_vp8_dx(), nullptr, 0)) { return NS_ERROR_FAILURE; } vorbis_info_init(&mVorbisInfo); vorbis_comment_init(&mVorbisComment); memset(&mVorbisDsp, 0, sizeof(vorbis_dsp_state)); memset(&mVorbisBlock, 0, sizeof(vorbis_block)); @@ -847,29 +847,29 @@ bool WebMReader::DecodeVideoFrame(bool & parsed++; // Assume 1 frame per chunk. continue; } if (aKeyframeSkip && si.is_kf) { aKeyframeSkip = false; } - if (vpx_codec_decode(&mVP8, data, length, NULL, 0)) { + if (vpx_codec_decode(&mVP8, data, length, nullptr, 0)) { return false; } // If the timestamp of the video frame is less than // the time threshold required then it is not added // to the video queue and won't be displayed. if (tstamp_usecs < aTimeThreshold) { parsed++; // Assume 1 frame per chunk. continue; } - vpx_codec_iter_t iter = NULL; + vpx_codec_iter_t iter = nullptr; vpx_image_t *img; while ((img = vpx_codec_get_frame(&mVP8, &iter))) { NS_ASSERTION(img->fmt == IMG_FMT_I420, "WebM image format is not I420"); // Chroma shifts are rounded down as per the decoding examples in the VP8 SDK VideoData::YCbCrBuffer b; b.mPlanes[0].mData = img->planes[0];
--- a/content/media/webm/WebMReader.h +++ b/content/media/webm/WebMReader.h @@ -216,23 +216,23 @@ public: protected: // Value passed to NextPacket to determine if we are reading a video or an // audio packet. enum TrackType { VIDEO = 0, AUDIO = 1 }; - // Read a packet from the nestegg file. Returns NULL if all packets for + // Read a packet from the nestegg file. Returns nullptr if all packets for // the particular track have been read. Pass VIDEO or AUDIO to indicate the // type of the packet we want to read. #ifdef MOZ_DASH nsReturnRef<NesteggPacketHolder> NextPacketInternal(TrackType aTrackType); - // Read a packet from the nestegg file. Returns NULL if all packets for + // Read a packet from the nestegg file. Returns nullptr if all packets for // the particular track have been read. Pass VIDEO or AUDIO to indicate the // type of the packet we want to read. If the reader reaches a switch access // point, this function will get a packet from |mNextReader|. #endif nsReturnRef<NesteggPacketHolder> NextPacket(TrackType aTrackType); // Pushes a packet to the front of the video packet queue. virtual void PushVideoPacket(NesteggPacketHolder* aItem);
--- a/content/media/webrtc/MediaEngineDefault.cpp +++ b/content/media/webrtc/MediaEngineDefault.cpp @@ -140,17 +140,17 @@ MediaEngineDefaultVideoSource::Stop(Sour if (mState != kStarted) { return NS_ERROR_FAILURE; } if (!mTimer) { return NS_ERROR_FAILURE; } mTimer->Cancel(); - mTimer = NULL; + mTimer = nullptr; aSource->EndTrack(aID); aSource->Finish(); mState = kStopped; return NS_OK; } @@ -238,17 +238,17 @@ MediaEngineDefaultVideoSource::NotifyPul } // Note: we're not giving up mImage here nsRefPtr<layers::Image> image = mImage; TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; if (delta > 0) { - // NULL images are allowed + // nullptr images are allowed if (image) { segment.AppendFrame(image.forget(), delta, gfxIntSize(mOpts.mWidth, mOpts.mHeight)); } else { segment.AppendFrame(nullptr, delta, gfxIntSize(0,0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. @@ -393,17 +393,17 @@ MediaEngineDefaultAudioSource::Stop(Sour if (mState != kStarted) { return NS_ERROR_FAILURE; } if (!mTimer) { return NS_ERROR_FAILURE; } mTimer->Cancel(); - mTimer = NULL; + mTimer = nullptr; aSource->EndTrack(aID); aSource->Finish(); mState = kStopped; return NS_OK; }
--- a/content/media/webrtc/MediaEngineTabVideoSource.cpp +++ b/content/media/webrtc/MediaEngineTabVideoSource.cpp @@ -44,17 +44,17 @@ MediaEngineTabVideoSource::StopRunnable: { nsCOMPtr<nsPIDOMWindow> privateDOMWindow = do_QueryInterface(mVideoSource->mWindow); if (privateDOMWindow && mVideoSource && privateDOMWindow->GetChromeEventHandler()) { privateDOMWindow->GetChromeEventHandler()->RemoveEventListener(NS_LITERAL_STRING("MozAfterPaint"), mVideoSource, false); } if (mVideoSource->mTimer) { mVideoSource->mTimer->Cancel(); - mVideoSource->mTimer = NULL; + mVideoSource->mTimer = nullptr; } return NS_OK; } NS_IMETHODIMP MediaEngineTabVideoSource::HandleEvent(nsIDOMEvent *event) { Draw(); return NS_OK; @@ -147,17 +147,17 @@ NotifyPull(MediaStreamGraph*, SourceMedi VideoSegment segment; MonitorAutoLock mon(mMonitor); // Note: we're not giving up mImage here nsRefPtr<layers::CairoImage> image = mImage; TrackTicks target = TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; if (delta > 0) { - // NULL images are allowed + // nullptr images are allowed if (image) { gfxIntSize size = image->GetSize(); segment.AppendFrame(image.forget(), delta, size); } else { segment.AppendFrame(nullptr, delta, gfxIntSize(0,0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track.
--- a/content/media/webrtc/MediaEngineWebRTC.cpp +++ b/content/media/webrtc/MediaEngineWebRTC.cpp @@ -233,29 +233,29 @@ MediaEngineWebRTC::EnumerateVideoDevices return; #endif } void MediaEngineWebRTC::EnumerateAudioDevices(nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources) { - webrtc::VoEBase* ptrVoEBase = NULL; - webrtc::VoEHardware* ptrVoEHw = NULL; + webrtc::VoEBase* ptrVoEBase = nullptr; + webrtc::VoEHardware* ptrVoEHw = nullptr; // We spawn threads to handle gUM runnables, so we must protect the member vars MutexAutoLock lock(mMutex); #ifdef MOZ_WIDGET_ANDROID jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef(); // get the JVM JavaVM *jvm = mozilla::AndroidBridge::Bridge()->GetVM(); JNIEnv *env; - jvm->AttachCurrentThread(&env, NULL); + jvm->AttachCurrentThread(&env, nullptr); if (webrtc::VoiceEngine::SetAndroidObjects(jvm, (void*)context) != 0) { LOG(("VoiceEngine:SetAndroidObjects Failed")); return; } env->DeleteGlobalRef(context); #endif @@ -352,13 +352,13 @@ MediaEngineWebRTC::Shutdown() webrtc::VideoEngine::Delete(mVideoEngine); } if (mVoiceEngine) { mAudioSources.Clear(); webrtc::VoiceEngine::Delete(mVoiceEngine); } - mVideoEngine = NULL; - mVoiceEngine = NULL; + mVideoEngine = nullptr; + mVoiceEngine = nullptr; } }
--- a/content/media/webrtc/MediaEngineWebRTC.h +++ b/content/media/webrtc/MediaEngineWebRTC.h @@ -124,17 +124,17 @@ public: , mCaptureIndex(aIndex) , mFps(-1) , mMinFps(-1) , mMonitor("WebRTCCamera.Monitor") , mWidth(0) , mHeight(0) , mInitDone(false) , mInSnapshotMode(false) - , mSnapshotPath(NULL) { + , mSnapshotPath(nullptr) { MOZ_ASSERT(aVideoEnginePtr); mState = kReleased; Init(); } #endif ~MediaEngineWebRTCVideoSource() { Shutdown(); }
--- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp +++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp @@ -135,17 +135,17 @@ MediaEngineWebRTCVideoSource::NotifyPull // We may want to signal if the actual frame rate is below mMinFPS - // cameras often don't return the requested frame rate especially in low // light; we should consider surfacing this so that we can switch to a // lower resolution (which may up the frame rate) // Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime // Doing so means a negative delta and thus messes up handling of the graph if (delta > 0) { - // NULL images are allowed + // nullptr images are allowed if (image) { segment.AppendFrame(image.forget(), delta, gfxIntSize(mWidth, mHeight)); } else { segment.AppendFrame(nullptr, delta, gfxIntSize(0,0)); } // This can fail if either a) we haven't added the track yet, or b) // we've removed or finished the track. if (aSource->AppendToTrack(aID, &(segment))) { @@ -495,29 +495,29 @@ MediaEngineWebRTCVideoSource::Snapshot(u if (!mSnapshotPath) { return NS_ERROR_FAILURE; } NS_ConvertUTF16toUTF8 path(*mSnapshotPath); if (vieFile->GetCaptureDeviceSnapshot(mCaptureIndex, path.get()) < 0) { delete mSnapshotPath; - mSnapshotPath = NULL; + mSnapshotPath = nullptr; return NS_ERROR_FAILURE; } // Stop the camera. mViERender->StopRender(mCaptureIndex); mViERender->RemoveRenderer(mCaptureIndex); nsCOMPtr<nsIFile> file; nsresult rv = NS_NewLocalFile(*mSnapshotPath, false, getter_AddRefs(file)); delete mSnapshotPath; - mSnapshotPath = NULL; + mSnapshotPath = nullptr; NS_ENSURE_SUCCESS(rv, rv); NS_ADDREF(*aFile = new nsDOMFileFile(file)); #endif return NS_OK; } @@ -535,30 +535,30 @@ MediaEngineWebRTCVideoSource::Init() CopyUTF8toUTF16(deviceName, mDeviceName); CopyUTF8toUTF16(deviceName, mUniqueId); #else // fix compile warning for these being unused. (remove once used) (void) mFps; (void) mMinFps; LOG((__FUNCTION__)); - if (mVideoEngine == NULL) { + if (mVideoEngine == nullptr) { return; } mViEBase = webrtc::ViEBase::GetInterface(mVideoEngine); - if (mViEBase == NULL) { + if (mViEBase == nullptr) { return; } // Get interfaces for capture, render for now mViECapture = webrtc::ViECapture::GetInterface(mVideoEngine); mViERender = webrtc::ViERender::GetInterface(mVideoEngine); - if (mViECapture == NULL || mViERender == NULL) { + if (mViECapture == nullptr || mViERender == nullptr) { return; } const uint32_t KMaxDeviceNameLength = 128; const uint32_t KMaxUniqueIdLength = 256; char deviceName[KMaxDeviceNameLength]; char uniqueId[KMaxUniqueIdLength]; if (mViECapture->GetCaptureDevice(mCaptureIndex,
--- a/content/media/webspeech/recognition/SpeechRecognition.cpp +++ b/content/media/webspeech/recognition/SpeechRecognition.cpp @@ -472,17 +472,17 @@ SpeechRecognition::NotifyFinalResult(Spe NS_NewDOMSpeechRecognitionEvent(getter_AddRefs(domEvent), nullptr, nullptr, nullptr); nsCOMPtr<nsIDOMSpeechRecognitionEvent> srEvent = do_QueryInterface(domEvent); nsRefPtr<SpeechRecognitionResultList> rlist = aEvent->mRecognitionResultList; nsCOMPtr<nsISupports> ilist = do_QueryInterface(rlist); srEvent->InitSpeechRecognitionEvent(NS_LITERAL_STRING("result"), true, false, 0, ilist, NS_LITERAL_STRING("NOT_IMPLEMENTED"), - NULL); + nullptr); domEvent->SetTrusted(true); bool defaultActionEnabled; this->DispatchEvent(domEvent, &defaultActionEnabled); } void SpeechRecognition::DoNothing(SpeechEvent* aEvent)
--- a/content/media/webspeech/synth/nsSpeechTask.cpp +++ b/content/media/webspeech/synth/nsSpeechTask.cpp @@ -162,17 +162,17 @@ nsSpeechTask::SendAudio(const JS::Value& if (mIndirectAudio) { NS_WARNING("Can't call SendAudio from an indirect audio speech service."); return NS_ERROR_FAILURE; } JS::Rooted<JSObject*> darray(aCx, &aData.toObject()); JSAutoCompartment ac(aCx, darray); - JS::Rooted<JSObject*> tsrc(aCx, NULL); + JS::Rooted<JSObject*> tsrc(aCx, nullptr); // Allow either Int16Array or plain JS Array if (JS_IsInt16Array(darray)) { tsrc = darray; } else if (JS_IsArrayObject(aCx, darray)) { tsrc = JS_NewInt16ArrayFromArray(aCx, darray); }
--- a/content/media/webspeech/synth/pico/PicoModule.cpp +++ b/content/media/webspeech/synth/pico/PicoModule.cpp @@ -19,40 +19,40 @@ using namespace mozilla::dom; // Defines nsPicoServiceConstructor NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(nsPicoService, nsPicoService::GetInstanceForService) // Defines kPICOSERVICE_CID NS_DEFINE_NAMED_CID(PICOSERVICE_CID); static const mozilla::Module::CIDEntry kCIDs[] = { - { &kPICOSERVICE_CID, true, NULL, nsPicoServiceConstructor }, - { NULL } + { &kPICOSERVICE_CID, true, nullptr, nsPicoServiceConstructor }, + { nullptr } }; static const mozilla::Module::ContractIDEntry kContracts[] = { { PICOSERVICE_CONTRACTID, &kPICOSERVICE_CID }, - { NULL } + { nullptr } }; static const mozilla::Module::CategoryEntry kCategories[] = { { "profile-after-change", "Pico Speech Synth", PICOSERVICE_CONTRACTID }, - { NULL } + { nullptr } }; static void UnloadPicoModule() { nsPicoService::Shutdown(); } static const mozilla::Module kModule = { mozilla::Module::kVersion, kCIDs, kContracts, kCategories, - NULL, - NULL, + nullptr, + nullptr, UnloadPicoModule }; NSMODULE_DEFN(synthpico) = &kModule; #endif
--- a/content/media/wmf/DXVA2Manager.cpp +++ b/content/media/wmf/DXVA2Manager.cpp @@ -101,17 +101,17 @@ D3D9DXVA2Manager::Init() nsRefPtr<IDirect3DDevice9Ex> device; hr = d3d9Ex->CreateDeviceEx(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, ::GetShellWindow(), D3DCREATE_FPU_PRESERVE | D3DCREATE_MULTITHREADED | D3DCREATE_MIXED_VERTEXPROCESSING, ¶ms, - NULL, + nullptr, getter_AddRefs(device)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Ensure we can create queries to synchronize operations between devices. // Without this, when we make a copy of the frame in order to share it with // another device, we can't be sure that the copy has finished before the // other device starts using it. nsRefPtr<IDirect3DQuery9> query;
--- a/content/media/wmf/WMFByteStream.cpp +++ b/content/media/wmf/WMFByteStream.cpp @@ -207,17 +207,17 @@ WMFByteStream::QueryInterface(REFIID aII } if (aIId == IID_IUnknown) { return DoGetInterface(static_cast<IMFByteStream*>(this), aInterface); } if (aIId == IID_IMFAttributes) { return DoGetInterface(static_cast<IMFAttributes*>(this), aInterface); } - *aInterface = NULL; + *aInterface = nullptr; return E_NOINTERFACE; } NS_IMPL_ADDREF(WMFByteStream) NS_IMPL_RELEASE(WMFByteStream) // Stores data regarding an async read opreation. @@ -253,17 +253,17 @@ STDMETHODIMP ReadRequest::QueryInterface(REFIID aIId, void **aInterface) { LOG("ReadRequest::QueryInterface %s", GetGUIDName(aIId).get()); if (aIId == IID_IUnknown) { return DoGetInterface(static_cast<IUnknown*>(this), aInterface); } - *aInterface = NULL; + *aInterface = nullptr; return E_NOINTERFACE; } class ProcessReadRequestEvent MOZ_FINAL : public nsRunnable { public: ProcessReadRequestEvent(WMFByteStream* aStream, IMFAsyncResult* aResult, ReadRequest* aRequestState)
--- a/content/media/wmf/WMFReader.cpp +++ b/content/media/wmf/WMFReader.cpp @@ -234,17 +234,17 @@ ConfigureSourceReaderStream(IMFSourceRea hr = type->SetGUID(MF_MT_MAJOR_TYPE, majorType); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = type->SetGUID(MF_MT_SUBTYPE, aOutputSubType); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Set the uncompressed format. This can fail if the decoder can't produce // that type. - return aReader->SetCurrentMediaType(aStreamIndex, NULL, type); + return aReader->SetCurrentMediaType(aStreamIndex, nullptr, type); } // Returns the duration of the resource, in microseconds. HRESULT GetSourceReaderDuration(IMFSourceReader *aReader, int64_t& aOutDuration) { AutoPropVar var; @@ -328,35 +328,35 @@ GetPictureRegion(IMFMediaType* aMediaTyp // If pan and scan mode is enabled. Try to get the display region. HRESULT hr = E_FAIL; MFVideoArea videoArea; memset(&videoArea, 0, sizeof(MFVideoArea)); if (panScan) { hr = aMediaType->GetBlob(MF_MT_PAN_SCAN_APERTURE, (UINT8*)&videoArea, sizeof(MFVideoArea), - NULL); + nullptr); } // If we're not in pan-and-scan mode, or the pan-and-scan region is not set, // check for a minimimum display aperture. if (!panScan || hr == MF_E_ATTRIBUTENOTFOUND) { hr = aMediaType->GetBlob(MF_MT_MINIMUM_DISPLAY_APERTURE, (UINT8*)&videoArea, sizeof(MFVideoArea), - NULL); + nullptr); } if (hr == MF_E_ATTRIBUTENOTFOUND) { // Minimum display aperture is not set, for "backward compatibility with // some components", check for a geometric aperture. hr = aMediaType->GetBlob(MF_MT_GEOMETRIC_APERTURE, (UINT8*)&videoArea, sizeof(MFVideoArea), - NULL); + nullptr); } if (SUCCEEDED(hr)) { // The media specified a picture region, return it. aOutPictureRegion = nsIntRect(MFOffsetToInt32(videoArea.OffsetX), MFOffsetToInt32(videoArea.OffsetY), videoArea.Area.cx, videoArea.Area.cy); @@ -791,17 +791,17 @@ WMFReader::CreateBasicVideoFrame(IMFSamp BYTE* data = nullptr; LONG stride = 0; RefPtr<IMF2DBuffer> twoDBuffer; hr = buffer->QueryInterface(static_cast<IMF2DBuffer**>(byRef(twoDBuffer))); if (SUCCEEDED(hr)) { hr = twoDBuffer->Lock2D(&data, &stride); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); } else { - hr = buffer->Lock(&data, NULL, NULL); + hr = buffer->Lock(&data, nullptr, nullptr); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); stride = mVideoStride; } // YV12, planar format: [YYYY....][VVVV....][UUUU....] // i.e., Y, then V, then U. VideoData::YCbCrBuffer b;
--- a/content/media/wmf/WMFSourceReaderCallback.cpp +++ b/content/media/wmf/WMFSourceReaderCallback.cpp @@ -24,17 +24,17 @@ WMFSourceReaderCallback::QueryInterface( if (aIId == IID_IMFSourceReaderCallback) { return DoGetInterface(static_cast<WMFSourceReaderCallback*>(this), aInterface); } if (aIId == IID_IUnknown) { return DoGetInterface(static_cast<WMFSourceReaderCallback*>(this), aInterface); } - *aInterface = NULL; + *aInterface = nullptr; return E_NOINTERFACE; } NS_IMPL_ADDREF(WMFSourceReaderCallback) NS_IMPL_RELEASE(WMFSourceReaderCallback) WMFSourceReaderCallback::WMFSourceReaderCallback() : mMonitor("WMFSourceReaderCallback") @@ -53,17 +53,17 @@ WMFSourceReaderCallback::WMFSourceReader HRESULT WMFSourceReaderCallback::NotifyReadComplete(HRESULT aReadStatus, DWORD aStreamIndex, DWORD aStreamFlags, LONGLONG aTimestamp, IMFSample *aSample) { - // Note: aSample can be NULL on success if more data is required! + // Note: aSample can be nullptr on success if more data is required! ReentrantMonitorAutoEnter mon(mMonitor); if (mSample) { // The WMFReader should have called Wait() to retrieve the last // sample returned by the last ReadSample() call, but if we're // aborting the read before Wait() is called the sample ref // can be non-null. mSample->Release();
--- a/content/media/wmf/WMFUtils.cpp +++ b/content/media/wmf/WMFUtils.cpp @@ -231,17 +231,17 @@ IsSupportedDecoder(const GUID& aDecoderG static HRESULT DisableBlockedDecoders(IMFPluginControl* aPluginControl, const GUID& aCategory) { HRESULT hr = S_OK; UINT32 numMFTs = 0; - IMFActivate **ppActivate = NULL; + IMFActivate **ppActivate = nullptr; hr = wmf::MFTEnumEx(aCategory, MFT_ENUM_FLAG_ALL, nullptr, // Input type, nullptr -> match all. nullptr, // Output type, nullptr -> match all. &ppActivate, &numMFTs); if (SUCCEEDED(hr) && numMFTs == 0) { @@ -290,21 +290,21 @@ static bool sDLLsLoaded = false; static bool sFailedToLoadDlls = false; struct WMFModule { const wchar_t* name; HMODULE handle; }; static WMFModule sDLLs[] = { - { L"mfplat.dll", NULL }, - { L"mfreadwrite.dll", NULL }, - { L"propsys.dll", NULL }, - { L"mf.dll", NULL }, - { L"dxva2.dll", NULL } + { L"mfplat.dll", nullptr }, + { L"mfreadwrite.dll", nullptr }, + { L"propsys.dll", nullptr }, + { L"mf.dll", nullptr }, + { L"dxva2.dll", nullptr } }; HRESULT LoadDLLs() { NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); if (sDLLsLoaded) { @@ -344,17 +344,17 @@ HRESULT UnloadDLLs() { NS_ASSERTION(NS_IsMainThread(), "Should be on main thread."); uint32_t length = NS_ARRAY_LENGTH(sDLLs); for (uint32_t i = 0; i < length; i++) { if (sDLLs[i].handle) { FreeLibrary(sDLLs[i].handle); - sDLLs[i].handle = NULL; + sDLLs[i].handle = nullptr; } sDLLsLoaded = false; } return S_OK; } #define ENSURE_FUNCTION_PTR(FunctionName, DLL) \ static FunctionName##Ptr_t FunctionName##Ptr = nullptr; \
--- a/content/svg/content/src/SVGFETurbulenceElement.cpp +++ b/content/svg/content/src/SVGFETurbulenceElement.cpp @@ -271,17 +271,17 @@ SVGFETurbulenceElement::Noise2(int aColo rx0 = t - (int) t; rx1 = rx0 - 1.0f; t = aVec[1] + sPerlinN; by0 = (int) t; by1 = by0 + 1; ry0 = t - (int) t; ry1 = ry0 - 1.0f; // If stitching, adjust lattice points accordingly. - if (aStitchInfo != NULL) { + if (aStitchInfo != nullptr) { if (bx0 >= aStitchInfo->mWrapX) bx0 -= aStitchInfo->mWidth; if (bx1 >= aStitchInfo->mWrapX) bx1 -= aStitchInfo->mWidth; if (by0 >= aStitchInfo->mWrapY) by0 -= aStitchInfo->mHeight; if (by1 >= aStitchInfo->mWrapY) by1 -= aStitchInfo->mHeight; @@ -317,17 +317,17 @@ double SVGFETurbulenceElement::Turbulence(int aColorChannel, double* aPoint, double aBaseFreqX, double aBaseFreqY, int aNumOctaves, bool aFractalSum, bool aDoStitching, double aTileX, double aTileY, double aTileWidth, double aTileHeight) { StitchInfo stitch; - StitchInfo *stitchInfo = NULL; // Not stitching when NULL. + StitchInfo *stitchInfo = nullptr; // Not stitching when nullptr. // Adjust the base frequencies if necessary for stitching. if (aDoStitching) { // When stitching tiled turbulence, the frequencies must be adjusted // so that the tile borders will be continuous. if (aBaseFreqX != 0.0) { double loFreq = double (floor(aTileWidth * aBaseFreqX)) / aTileWidth; double hiFreq = double (ceil(aTileWidth * aBaseFreqX)) / aTileWidth; if (aBaseFreqX / loFreq < hiFreq / aBaseFreqX) @@ -358,17 +358,17 @@ SVGFETurbulenceElement::Turbulence(int a for (int octave = 0; octave < aNumOctaves; octave++) { if (aFractalSum) sum += double (Noise2(aColorChannel, vec, stitchInfo) / ratio); else sum += double (fabs(Noise2(aColorChannel, vec, stitchInfo)) / ratio); vec[0] *= 2; vec[1] *= 2; ratio *= 2; - if (stitchInfo != NULL) { + if (stitchInfo != nullptr) { // Update stitch values. Subtracting sPerlinN before the multiplication // and adding it afterward simplifies to subtracting it once. stitch.mWidth *= 2; stitch.mWrapX = 2 * stitch.mWrapX - sPerlinN; stitch.mHeight *= 2; stitch.mWrapY = 2 * stitch.mWrapY - sPerlinN; } }