author | Nicolas Silva <nsilva@mozilla.com> |
Thu, 17 Mar 2016 14:58:58 +0100 | |
changeset 289653 | 232fda30a2ad18db9acf4c01455c427f26992627 |
parent 289652 | 73bef86f117222773ad2d06d55964915c41ad180 |
child 289654 | a85b0c8031321f84b5b63a4b4b2d3196a3037b2d |
push id | 30108 |
push user | cbook@mozilla.com |
push date | Tue, 22 Mar 2016 11:14:31 +0000 |
treeherder | mozilla-central@ea6298e1b4f7 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | sotaro |
bugs | 1256693 |
milestone | 48.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/gfx/layers/BufferTexture.cpp +++ b/gfx/layers/BufferTexture.cpp @@ -111,20 +111,21 @@ BufferTextureData* BufferTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat, gfx::BackendType aMoz2DBackend, TextureFlags aFlags, TextureAllocationFlags aAllocFlags, ISurfaceAllocator* aAllocator) { if (!aAllocator || aAllocator->IsSameProcess()) { return MemoryTextureData::Create(aSize, aFormat, aMoz2DBackend, aFlags, aAllocFlags, aAllocator); - } else { + } else if (aAllocator->AsShmemAllocator()) { return ShmemTextureData::Create(aSize, aFormat, aMoz2DBackend, aFlags, aAllocFlags, aAllocator); } + return nullptr; } BufferTextureData* BufferTextureData::CreateInternal(ISurfaceAllocator* aAllocator, const BufferDescriptor& aDesc, gfx::BackendType aMoz2DBackend, int32_t aBufferSize, TextureFlags aTextureFlags) @@ -133,24 +134,25 @@ BufferTextureData::CreateInternal(ISurfa uint8_t* buffer = new (fallible) uint8_t[aBufferSize]; if (!buffer) { return nullptr; } GfxMemoryImageReporter::DidAlloc(buffer); return new MemoryTextureData(aDesc, aMoz2DBackend, buffer, aBufferSize); - } else { + } else if (aAllocator->AsShmemAllocator()) { ipc::Shmem shm; - if (!aAllocator->AllocUnsafeShmem(aBufferSize, OptimalShmemType(), &shm)) { + if (!aAllocator->AsShmemAllocator()->AllocUnsafeShmem(aBufferSize, OptimalShmemType(), &shm)) { return nullptr; } return new ShmemTextureData(aDesc, aMoz2DBackend, shm); } + return nullptr; } BufferTextureData* BufferTextureData::CreateForYCbCrWithBufferSize(ISurfaceAllocator* aAllocator, gfx::SurfaceFormat aFormat, int32_t aBufferSize, TextureFlags aTextureFlags) { @@ -495,32 +497,32 @@ ShmemTextureData::Create(gfx::IntSize aS gfx::BackendType aMoz2DBackend, TextureFlags aFlags, TextureAllocationFlags aAllocFlags, ISurfaceAllocator* aAllocator) { MOZ_ASSERT(aAllocator); // Should have used CreateForYCbCr. MOZ_ASSERT(aFormat != gfx::SurfaceFormat::YUV); - if (!aAllocator) { + if (!aAllocator || !aAllocator->AsShmemAllocator()) { return nullptr; } if (aSize.width <= 0 || aSize.height <= 0) { gfxDebug() << "Asking for buffer of invalid size " << aSize.width << "x" << aSize.height; return nullptr; } uint32_t bufSize = ImageDataSerializer::ComputeRGBBufferSize(aSize, aFormat); if (!bufSize) { return nullptr; } mozilla::ipc::Shmem shm; - if (!aAllocator->AllocUnsafeShmem(bufSize, OptimalShmemType(), &shm)) { + if (!aAllocator->AsShmemAllocator()->AllocUnsafeShmem(bufSize, OptimalShmemType(), &shm)) { return nullptr; } uint8_t* buf = shm.get<uint8_t>(); if (!InitBuffer(buf, bufSize, aFormat, aAllocFlags)) { return nullptr; } @@ -543,13 +545,13 @@ ShmemTextureData::CreateSimilar(ISurface { return ShmemTextureData::Create(GetSize(), GetFormat(), mMoz2DBackend, aFlags, aAllocFlags, aAllocator); } void ShmemTextureData::Deallocate(ISurfaceAllocator* aAllocator) { - aAllocator->DeallocShmem(mShmem); + aAllocator->AsShmemAllocator()->DeallocShmem(mShmem); } } // namespace } // namespace
--- a/gfx/layers/TextureDIB.cpp +++ b/gfx/layers/TextureDIB.cpp @@ -265,17 +265,17 @@ ShmemDIBTextureData::Serialize(SurfaceDe aOutDescriptor = SurfaceDescriptorFileMapping((WindowsHandle)mHostHandle, mFormat, mSize); return true; } DIBTextureData* ShmemDIBTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat, ISurfaceAllocator* aAllocator) { - MOZ_ASSERT(aAllocator->ParentPid() != base::ProcessId()); + MOZ_ASSERT(aAllocator->AsLayerForwarder()->GetParentPid() != base::ProcessId()); DWORD mapSize = aSize.width * aSize.height * BytesPerPixel(aFormat); HANDLE fileMapping = ::CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, mapSize, NULL); if (!fileMapping) { gfxCriticalError() << "Failed to create memory file mapping for " << mapSize << " bytes."; return nullptr; } @@ -327,17 +327,17 @@ ShmemDIBTextureData::Create(gfx::IntSize ::CloseHandle(fileMapping); gfxCriticalError() << "Could not create surface, status: " << surface->CairoStatus(); return nullptr; } HANDLE hostHandle = NULL; - if (!ipc::DuplicateHandle(fileMapping, aAllocator->ParentPid(), + if (!ipc::DuplicateHandle(fileMapping, aAllocator->AsLayerForwarder()->GetParentPid(), &hostHandle, 0, DUPLICATE_SAME_ACCESS)) { gfxCriticalError() << "Failed to duplicate handle to parent process for surface."; ::DeleteObject(bitmap); ::DeleteDC(dc); ::CloseHandle(fileMapping); return nullptr; }
--- a/gfx/layers/client/ClientLayerManager.cpp +++ b/gfx/layers/client/ClientLayerManager.cpp @@ -533,17 +533,17 @@ ClientLayerManager::MakeSnapshotIfRequir gfx::Matrix oldMatrix = dt->GetTransform(); dt->SetTransform(rotate * oldMatrix); dt->DrawSurface(surf, dstRect, srcRect, DrawSurfaceOptions(), DrawOptions(1.0f, CompositionOp::OP_OVER)); dt->SetTransform(oldMatrix); } - mForwarder->DestroySharedSurface(&inSnapshot); + mForwarder->DestroySurfaceDescriptor(&inSnapshot); } } } mShadowTarget = nullptr; } void ClientLayerManager::FlushRendering()
--- a/gfx/layers/client/CompositableClient.cpp +++ b/gfx/layers/client/CompositableClient.cpp @@ -57,22 +57,22 @@ public: uint64_t mAsyncID; }; void RemoveTextureFromCompositableTracker::ReleaseTextureClient() { if (mTextureClient && mTextureClient->GetAllocator() && - !mTextureClient->GetAllocator()->IsImageBridgeChild()) + !mTextureClient->GetAllocator()->UsesImageBridge()) { TextureClientReleaseTask* task = new TextureClientReleaseTask(mTextureClient); RefPtr<ISurfaceAllocator> allocator = mTextureClient->GetAllocator(); mTextureClient = nullptr; - allocator->GetMessageLoop()->PostTask(FROM_HERE, task); + allocator->AsClientAllocator()->GetMessageLoop()->PostTask(FROM_HERE, task); } else { mTextureClient = nullptr; } } /* static */ void CompositableClient::TransactionCompleteted(PCompositableChild* aActor, uint64_t aTransactionId) {
--- a/gfx/layers/client/ImageClient.cpp +++ b/gfx/layers/client/ImageClient.cpp @@ -73,18 +73,17 @@ ImageClient::RemoveTexture(TextureClient { RemoveTextureWithWaiter(aTexture); } void ImageClient::RemoveTextureWithWaiter(TextureClient* aTexture, AsyncTransactionWaiter* aAsyncTransactionWaiter) { - if ((aAsyncTransactionWaiter || - GetForwarder()->IsImageBridgeChild()) + if ((aAsyncTransactionWaiter || GetForwarder()->UsesImageBridge()) #ifndef MOZ_WIDGET_GONK // If the texture client is taking part in recycling then we should make sure // the host has finished with it before dropping the ref and triggering // the recycle callback. && aTexture->GetRecycleAllocator() #endif ) { RefPtr<AsyncTransactionTracker> request =
--- a/gfx/layers/client/TextureClient.cpp +++ b/gfx/layers/client/TextureClient.cpp @@ -244,17 +244,17 @@ DeallocateTextureClient(TextureDeallocPa // Nothing to do return; } TextureChild* actor = params.actor; MessageLoop* ipdlMsgLoop = nullptr; if (params.allocator) { - ipdlMsgLoop = params.allocator->GetMessageLoop(); + ipdlMsgLoop = params.allocator->AsClientAllocator()->GetMessageLoop(); if (!ipdlMsgLoop) { // An allocator with no message loop means we are too late in the shutdown // sequence. gfxCriticalError() << "Texture deallocated too late during shutdown"; return; } } @@ -692,17 +692,17 @@ TextureClient::SetRecycleAllocator(IText } else { ClearRecycleCallback(); } } bool TextureClient::InitIPDLActor(CompositableForwarder* aForwarder) { - MOZ_ASSERT(aForwarder && aForwarder->GetMessageLoop() == mAllocator->GetMessageLoop()); + MOZ_ASSERT(aForwarder && aForwarder->GetMessageLoop() == mAllocator->AsClientAllocator()->GetMessageLoop()); if (mActor && !mActor->mDestroyed && mActor->GetForwarder() == aForwarder) { return true; } MOZ_ASSERT(!mActor || mActor->mDestroyed, "Cannot use a texture on several IPC channels."); SurfaceDescriptor desc; if (!ToSurfaceDescriptor(desc)) { return false;
--- a/gfx/layers/client/TiledContentClient.cpp +++ b/gfx/layers/client/TiledContentClient.cpp @@ -390,17 +390,18 @@ gfxMemorySharedReadLock::GetReadCount() gfxShmSharedReadLock::gfxShmSharedReadLock(ISurfaceAllocator* aAllocator) : mAllocator(aAllocator) , mAllocSuccess(false) { MOZ_COUNT_CTOR(gfxShmSharedReadLock); MOZ_ASSERT(mAllocator); if (mAllocator) { #define MOZ_ALIGN_WORD(x) (((x) + 3) & ~3) - if (mAllocator->AllocShmemSection(MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) { + if (mAllocator->AsLayerForwarder()->GetTileLockAllocator()->AllocShmemSection( + MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) { ShmReadLockInfo* info = GetShmReadLockInfoPtr(); info->readCount = 1; mAllocSuccess = true; } } } gfxShmSharedReadLock::~gfxShmSharedReadLock() @@ -422,17 +423,23 @@ int32_t gfxShmSharedReadLock::ReadUnlock() { if (!mAllocSuccess) { return 0; } ShmReadLockInfo* info = GetShmReadLockInfoPtr(); int32_t readCount = PR_ATOMIC_DECREMENT(&info->readCount); MOZ_ASSERT(readCount >= 0); if (readCount <= 0) { - mAllocator->FreeShmemSection(mShmemSection); + auto fwd = mAllocator->AsLayerForwarder(); + if (fwd) { + fwd->GetTileLockAllocator()->DeallocShmemSection(mShmemSection); + } else { + // we are on the compositor process + FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mShmemSection); + } } return readCount; } int32_t gfxShmSharedReadLock::GetReadCount() { NS_ASSERT_OWNINGTHREAD(gfxShmSharedReadLock); if (!mAllocSuccess) {
--- a/gfx/layers/composite/TextureHost.cpp +++ b/gfx/layers/composite/TextureHost.cpp @@ -796,17 +796,17 @@ ShmemTextureHost::~ShmemTextureHost() } void ShmemTextureHost::DeallocateSharedData() { if (mShmem) { MOZ_ASSERT(mDeallocator, "Shared memory would leak without a ISurfaceAllocator"); - mDeallocator->DeallocShmem(*mShmem); + mDeallocator->AsShmemAllocator()->DeallocShmem(*mShmem); mShmem = nullptr; } } void ShmemTextureHost::ForgetSharedData() { if (mShmem) {
--- a/gfx/layers/ipc/CompositableForwarder.h +++ b/gfx/layers/ipc/CompositableForwarder.h @@ -35,17 +35,17 @@ class PTextureChild; * should be sent to the compositor side. * CompositableForwarder is an interface to manage a transaction of * compositable objetcs. * * ShadowLayerForwarder is an example of a CompositableForwarder (that can * additionally forward modifications of the Layer tree). * ImageBridgeChild is another CompositableForwarder. */ -class CompositableForwarder : public ISurfaceAllocator +class CompositableForwarder : public ClientIPCAllocator { public: CompositableForwarder() : mSerial(++sSerialCounter) {} /** @@ -136,18 +136,16 @@ public: void IdentifyTextureHost(const TextureFactoryIdentifier& aIdentifier); virtual int32_t GetMaxTextureSize() const override { return mTextureFactoryIdentifier.mMaxTextureSize; } - bool IsOnCompositorSide() const override { return false; } - /** * Returns the type of backend that is used off the main thread. * We only don't allow changing the backend type at runtime so this value can * be queried once and will not change until Gecko is restarted. */ LayersBackend GetCompositorBackendType() const { return mTextureFactoryIdentifier.mParentBackend;
--- a/gfx/layers/ipc/CompositableTransactionParent.cpp +++ b/gfx/layers/ipc/CompositableTransactionParent.cpp @@ -139,17 +139,17 @@ CompositableParentManager::ReceiveCompos case CompositableOperation::TOpRemoveTextureAsync: { const OpRemoveTextureAsync& op = aEdit.get_OpRemoveTextureAsync(); CompositableHost* compositable = AsCompositable(op); RefPtr<TextureHost> tex = TextureHost::AsTextureHost(op.textureParent()); MOZ_ASSERT(tex.get()); compositable->RemoveTextureHost(tex); - if (!IsAsync() && ImageBridgeParent::GetInstance(GetChildProcessId())) { + if (!UsesImageBridge() && ImageBridgeParent::GetInstance(GetChildProcessId())) { // send FenceHandle if present via ImageBridge. ImageBridgeParent::AppendDeliverFenceMessage( GetChildProcessId(), op.holderId(), op.transactionId(), op.textureParent()); // If the message is recievied via PLayerTransaction, @@ -189,31 +189,31 @@ CompositableParentManager::ReceiveCompos FenceHandle fence = maybeFence.get_FenceHandle(); if (fence.IsValid()) { t->mTexture->SetAcquireFenceHandle(fence); } } } compositable->UseTextureHost(textures); - if (IsAsync() && compositable->GetLayer()) { + if (UsesImageBridge() && compositable->GetLayer()) { ScheduleComposition(op); } break; } case CompositableOperation::TOpUseComponentAlphaTextures: { const OpUseComponentAlphaTextures& op = aEdit.get_OpUseComponentAlphaTextures(); CompositableHost* compositable = AsCompositable(op); RefPtr<TextureHost> texOnBlack = TextureHost::AsTextureHost(op.textureOnBlackParent()); RefPtr<TextureHost> texOnWhite = TextureHost::AsTextureHost(op.textureOnWhiteParent()); MOZ_ASSERT(texOnBlack && texOnWhite); compositable->UseComponentAlphaTextures(texOnBlack, texOnWhite); - if (IsAsync()) { + if (UsesImageBridge()) { ScheduleComposition(op); } break; } #ifdef MOZ_WIDGET_GONK case CompositableOperation::TOpUseOverlaySource: { const OpUseOverlaySource& op = aEdit.get_OpUseOverlaySource(); CompositableHost* compositable = AsCompositable(op);
--- a/gfx/layers/ipc/CompositableTransactionParent.h +++ b/gfx/layers/ipc/CompositableTransactionParent.h @@ -43,24 +43,16 @@ public: protected: /** * Handle the IPDL messages that affect PCompositable actors. */ bool ReceiveCompositableUpdate(const CompositableOperation& aEdit, EditReplyVector& replyv); - bool IsOnCompositorSide() const override { return true; } - - /** - * Return true if this protocol is asynchronous with respect to the content - * thread (ImageBridge for instance). - */ - virtual bool IsAsync() const { return false; } - virtual void ReplyRemoveTexture(const OpReplyRemoveTexture& aReply) {} std::vector<AsyncParentMessageData> mPendingAsyncMessage; }; } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/ISurfaceAllocator.cpp +++ b/gfx/layers/ipc/ISurfaceAllocator.cpp @@ -1,392 +1,23 @@ /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: sw=2 ts=8 et : */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "ISurfaceAllocator.h" -#include <sys/types.h> // for int32_t -#include "gfx2DGlue.h" // for IntSize -#include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat -#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface -#include "mozilla/Assertions.h" // for MOZ_ASSERT, etc -#include "mozilla/Atomics.h" // for PrimitiveIntrinsics -#include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc -#include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc -#include "mozilla/layers/SharedBufferManagerChild.h" -#include "ShadowLayerUtils.h" -#include "mozilla/mozalloc.h" // for operator delete[], etc -#include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc -#include "nsDebug.h" // for NS_RUNTIMEABORT -#include "nsXULAppAPI.h" // for XRE_GetProcessType, etc -#include "mozilla/ipc/Shmem.h" -#include "mozilla/layers/ImageDataSerializer.h" -#ifdef DEBUG -#include "prenv.h" -#endif - -using namespace mozilla::ipc; namespace mozilla { namespace layers { NS_IMPL_ISUPPORTS(GfxMemoryImageReporter, nsIMemoryReporter) mozilla::Atomic<ptrdiff_t> GfxMemoryImageReporter::sAmount(0); mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType() { - return mozilla::ipc::SharedMemory::TYPE_BASIC; -} - -bool -IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface) -{ - return aSurface.type() != SurfaceDescriptor::T__None && - aSurface.type() != SurfaceDescriptor::Tnull_t; -} - -ISurfaceAllocator::~ISurfaceAllocator() -{ - // Check if we're not leaking.. - MOZ_ASSERT(mUsedShmems.empty()); -} - -void -ISurfaceAllocator::Finalize() -{ - ShrinkShmemSectionHeap(); -} - -static inline uint8_t* -GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor) -{ - MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor)); - MOZ_RELEASE_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorBuffer); - - auto memOrShmem = aDescriptor.get_SurfaceDescriptorBuffer().data(); - if (memOrShmem.type() == MemoryOrShmem::TShmem) { - return memOrShmem.get_Shmem().get<uint8_t>(); - } else { - return reinterpret_cast<uint8_t*>(memOrShmem.get_uintptr_t()); - } -} - -already_AddRefed<gfx::DrawTarget> -GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend) -{ - uint8_t* data = GetAddressFromDescriptor(aDescriptor); - auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor(); - uint32_t stride = ImageDataSerializer::GetRGBStride(rgb); - return gfx::Factory::CreateDrawTargetForData(gfx::BackendType::CAIRO, - data, rgb.size(), - stride, rgb.format()); -} - -already_AddRefed<gfx::DataSourceSurface> -GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor) -{ - uint8_t* data = GetAddressFromDescriptor(aDescriptor); - auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor(); - uint32_t stride = ImageDataSerializer::GetRGBStride(rgb); - return gfx::Factory::CreateWrappingDataSourceSurface(data, stride, rgb.size(), - rgb.format()); -} - -bool -ISurfaceAllocator::AllocSurfaceDescriptor(const gfx::IntSize& aSize, - gfxContentType aContent, - SurfaceDescriptor* aBuffer) -{ - if (!IPCOpen()) { - return false; - } - return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer); -} - -bool -ISurfaceAllocator::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, - gfxContentType aContent, - uint32_t aCaps, - SurfaceDescriptor* aBuffer) -{ - if (!IPCOpen()) { - return false; - } - gfx::SurfaceFormat format = - gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent); - size_t size = ImageDataSerializer::ComputeRGBBufferSize(aSize, format); - if (!size) { - return false; - } - - MemoryOrShmem bufferDesc; - if (IsSameProcess()) { - uint8_t* data = new (std::nothrow) uint8_t[size]; - if (!data) { - return false; - } - GfxMemoryImageReporter::DidAlloc(data); -#ifdef XP_MACOSX - // Workaround a bug in Quartz where drawing an a8 surface to another a8 - // surface with OP_SOURCE still requires the destination to be clear. - if (format == gfx::SurfaceFormat::A8) { - memset(data, 0, size); - } -#endif - bufferDesc = reinterpret_cast<uintptr_t>(data); - } else { - - mozilla::ipc::SharedMemory::SharedMemoryType shmemType = OptimalShmemType(); - mozilla::ipc::Shmem shmem; - if (!AllocUnsafeShmem(size, shmemType, &shmem)) { - return false; - } - - bufferDesc = shmem; - } - - // Use an intermediate buffer by default. Skipping the intermediate buffer is - // only possible in certain configurations so let's keep it simple here for now. - const bool hasIntermediateBuffer = true; - *aBuffer = SurfaceDescriptorBuffer(RGBDescriptor(aSize, format, hasIntermediateBuffer), - bufferDesc); - - return true; -} - -/* static */ bool -ISurfaceAllocator::IsShmem(SurfaceDescriptor* aSurface) -{ - return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorBuffer) - && (aSurface->get_SurfaceDescriptorBuffer().data().type() == MemoryOrShmem::TShmem); -} - -void -ISurfaceAllocator::DestroySharedSurface(SurfaceDescriptor* aSurface) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return; - } - - MOZ_ASSERT(aSurface); - if (!aSurface) { - return; - } - if (!IPCOpen()) { - return; - } - SurfaceDescriptorBuffer& desc = aSurface->get_SurfaceDescriptorBuffer(); - switch (desc.data().type()) { - case MemoryOrShmem::TShmem: { - DeallocShmem(desc.data().get_Shmem()); - break; - } - case MemoryOrShmem::Tuintptr_t: { - uint8_t* ptr = (uint8_t*)desc.data().get_uintptr_t(); - GfxMemoryImageReporter::WillFree(ptr); - delete [] ptr; - break; - } - default: - NS_RUNTIMEABORT("surface type not implemented!"); - } - *aSurface = SurfaceDescriptor(); -} - -// XXX - We should actually figure out the minimum shmem allocation size on -// a certain platform and use that. -const uint32_t sShmemPageSize = 4096; - -#ifdef DEBUG -const uint32_t sSupportedBlockSize = 4; -#endif - -enum AllocationStatus -{ - STATUS_ALLOCATED, - STATUS_FREED -}; - -struct ShmemSectionHeapHeader -{ - Atomic<uint32_t> mTotalBlocks; - Atomic<uint32_t> mAllocatedBlocks; -}; - -struct ShmemSectionHeapAllocation -{ - Atomic<uint32_t> mStatus; - uint32_t mSize; -}; - -bool -ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return false; - } - // For now we only support sizes of 4. If we want to support different sizes - // some more complicated bookkeeping should be added. - MOZ_ASSERT(aSize == sSupportedBlockSize); - MOZ_ASSERT(aShmemSection); - - uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation)); - - for (size_t i = 0; i < mUsedShmems.size(); i++) { - ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); - if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) { - aShmemSection->shmem() = mUsedShmems[i]; - MOZ_ASSERT(mUsedShmems[i].IsWritable()); - break; - } - } - - if (!aShmemSection->shmem().IsWritable()) { - ipc::Shmem tmp; - if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) { - return false; - } - - ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>(); - header->mTotalBlocks = 0; - header->mAllocatedBlocks = 0; - - mUsedShmems.push_back(tmp); - aShmemSection->shmem() = tmp; - } - - MOZ_ASSERT(aShmemSection->shmem().IsWritable()); - - ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>(); - uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader); - - ShmemSectionHeapAllocation* allocHeader = nullptr; - - if (header->mTotalBlocks > header->mAllocatedBlocks) { - // Search for the first available block. - for (size_t i = 0; i < header->mTotalBlocks; i++) { - allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); - - if (allocHeader->mStatus == STATUS_FREED) { - break; - } - heap += allocationSize; - } - MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED); - MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize); - } else { - heap += header->mTotalBlocks * allocationSize; - - header->mTotalBlocks++; - allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); - allocHeader->mSize = aSize; - } - - MOZ_ASSERT(allocHeader); - header->mAllocatedBlocks++; - allocHeader->mStatus = STATUS_ALLOCATED; - - aShmemSection->size() = aSize; - aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>(); - ShrinkShmemSectionHeap(); - return true; -} - -void -ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return; - } - - MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize); - MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize); - - ShmemSectionHeapAllocation* allocHeader = - reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() + - aShmemSection.offset() - - sizeof(ShmemSectionHeapAllocation)); - - MOZ_ASSERT(allocHeader->mSize == aShmemSection.size()); - - DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED); - // If this fails something really weird is going on. - MOZ_ASSERT(success); - - ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>(); - header->mAllocatedBlocks--; - - ShrinkShmemSectionHeap(); -} - - -void -ISurfaceAllocator::ShrinkShmemSectionHeap() -{ - if (!IPCOpen()) { - return; - } - - // The loop will terminate as we either increase i, or decrease size - // every time through. - size_t i = 0; - while (i < mUsedShmems.size()) { - ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); - if (header->mAllocatedBlocks == 0) { - DeallocShmem(mUsedShmems[i]); - - // We don't particularly care about order, move the last one in the array - // to this position. - if (i < mUsedShmems.size() - 1) { - mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1]; - } - mUsedShmems.pop_back(); - } else { - i++; - } - } -} - -bool -ISurfaceAllocator::AllocGrallocBuffer(const gfx::IntSize& aSize, - uint32_t aFormat, - uint32_t aUsage, - MaybeMagicGrallocBufferHandle* aHandle) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return false; - } - - return SharedBufferManagerChild::GetSingleton()->AllocGrallocBuffer(aSize, aFormat, aUsage, aHandle); -} - -void -ISurfaceAllocator::DeallocGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return; - } - - SharedBufferManagerChild::GetSingleton()->DeallocGrallocBuffer(*aHandle); -} - -void -ISurfaceAllocator::DropGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle) -{ - MOZ_ASSERT(IPCOpen()); - if (!IPCOpen()) { - return; - } - - SharedBufferManagerChild::GetSingleton()->DropGrallocBuffer(*aHandle); + return ipc::SharedMemory::SharedMemoryType::TYPE_BASIC; } } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/ISurfaceAllocator.h +++ b/gfx/layers/ipc/ISurfaceAllocator.h @@ -11,21 +11,22 @@ #include "gfxTypes.h" #include "mozilla/gfx/Point.h" // for IntSize #include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc #include "mozilla/RefPtr.h" #include "nsIMemoryReporter.h" // for nsIMemoryReporter #include "mozilla/Atomics.h" // for Atomic #include "mozilla/layers/LayersMessages.h" // for ShmemSection #include "LayersTypes.h" -#include <vector> +#include "gfxPrefs.h" #include "mozilla/layers/AtomicRefCountedWithFinalize.h" /* * FIXME [bjacob] *** PURE CRAZYNESS WARNING *** + * (I think that this doesn't apply anymore.) * * This #define is actually needed here, because subclasses of ISurfaceAllocator, * namely ShadowLayerForwarder, will or will not override AllocGrallocBuffer * depending on whether MOZ_HAVE_SURFACEDESCRIPTORGRALLOC is defined. */ #ifdef MOZ_WIDGET_GONK #define MOZ_HAVE_SURFACEDESCRIPTORGRALLOC #endif @@ -35,18 +36,23 @@ namespace ipc { class Shmem; } // namespace ipc namespace gfx { class DataSourceSurface; } // namespace gfx namespace layers { -class MaybeMagicGrallocBufferHandle; class CompositableForwarder; +class ShadowLayerForwarder; + +class ShmemAllocator; +class ShmemSectionAllocator; +class LegacySurfaceDescriptorAllocator; +class ClientIPCAllocator; enum BufferCapabilities { DEFAULT_BUFFER_CAPS = 0, /** * The allocated buffer must be efficiently mappable as a DataSourceSurface. */ MAP_AS_IMAGE_SURFACE = 1 << 0, /** @@ -54,134 +60,129 @@ enum BufferCapabilities { */ USING_GL_RENDERING_ONLY = 1 << 1 }; class SurfaceDescriptor; mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType(); -bool IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface); -bool IsSurfaceDescriptorOwned(const SurfaceDescriptor& aDescriptor); -bool ReleaseOwnedSurfaceDescriptor(const SurfaceDescriptor& aDescriptor); -already_AddRefed<gfx::DrawTarget> GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend); -already_AddRefed<gfx::DataSourceSurface> GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor); /** * An interface used to create and destroy surfaces that are shared with the * Compositor process (using shmem, or gralloc, or other platform specific memory) * * Most of the methods here correspond to methods that are implemented by IPDL * actors without a common polymorphic interface. * These methods should be only called in the ipdl implementor's thread, unless * specified otherwise in the implementing class. */ class ISurfaceAllocator : public AtomicRefCountedWithFinalize<ISurfaceAllocator> { public: MOZ_DECLARE_REFCOUNTED_TYPENAME(ISurfaceAllocator) - ISurfaceAllocator() - : mDefaultMessageLoop(MessageLoop::current()) - {} + + // down-casting + + virtual ShmemAllocator* AsShmemAllocator() { return nullptr; } + + virtual ShmemSectionAllocator* AsShmemSectionAllocator() { return nullptr; } + + virtual CompositableForwarder* AsCompositableForwarder() { return nullptr; } + + virtual ShadowLayerForwarder* AsLayerForwarder() { return nullptr; } + + virtual ClientIPCAllocator* AsClientAllocator() { return nullptr; } - void Finalize(); + virtual LegacySurfaceDescriptorAllocator* + AsLegacySurfaceDescriptorAllocator() { return nullptr; } + + // ipc info + + virtual bool IPCOpen() const { return true; } + + virtual bool IsSameProcess() const = 0; + + virtual bool UsesImageBridge() const { return false; } + +protected: + void Finalize() {} - /** - * Allocate shared memory that can be accessed by only one process at a time. - * Ownership of this memory is passed when the memory is sent in an IPDL - * message. - */ + virtual ~ISurfaceAllocator() {} + + friend class AtomicRefCountedWithFinalize<ISurfaceAllocator>; +}; + +/// Methods that are specific to the client/child side. +class ClientIPCAllocator : public ISurfaceAllocator +{ +public: + virtual ClientIPCAllocator* AsClientAllocator() override { return this; } + + virtual MessageLoop * GetMessageLoop() const = 0; + + virtual int32_t GetMaxTextureSize() const { return gfxPrefs::MaxTextureSize(); } +}; + +/// An allocator can provide shared memory. +/// +/// The allocated shmems can be deallocated on either process, as long as they +/// belong to the same channel. +class ShmemAllocator +{ +public: virtual bool AllocShmem(size_t aSize, - mozilla::ipc::SharedMemory::SharedMemoryType aType, + mozilla::ipc::SharedMemory::SharedMemoryType aShmType, mozilla::ipc::Shmem* aShmem) = 0; - - /** - * Allocate shared memory that can be accessed by both processes at the - * same time. Safety is left for the user of the memory to care about. - */ virtual bool AllocUnsafeShmem(size_t aSize, - mozilla::ipc::SharedMemory::SharedMemoryType aType, + mozilla::ipc::SharedMemory::SharedMemoryType aShmType, mozilla::ipc::Shmem* aShmem) = 0; - - /** - * Allocate memory in shared memory that can always be accessed by both - * processes at a time. Safety is left for the user of the memory to care - * about. - */ - bool AllocShmemSection(size_t aSize, - mozilla::layers::ShmemSection* aShmemSection); + virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) = 0; +}; - /** - * Deallocates a shmem section. - */ - void FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection); +/// An allocator that can group allocations in bigger chunks of shared memory. +/// +/// The allocated shmem sections can only be deallocated by the same allocator +/// instance (and only in the child process). +class ShmemSectionAllocator +{ +public: + virtual bool AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) = 0; - /** - * Deallocate memory allocated by either AllocShmem or AllocUnsafeShmem. - */ - virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) = 0; + virtual void DeallocShmemSection(ShmemSection& aShmemSection) = 0; + + virtual void MemoryPressure() {} +}; - // was AllocBuffer +/// Some old stuff that's still around and used for screenshots. +/// +/// New code should not need this (see TextureClient). +class LegacySurfaceDescriptorAllocator +{ +public: virtual bool AllocSurfaceDescriptor(const gfx::IntSize& aSize, gfxContentType aContent, - SurfaceDescriptor* aBuffer); + SurfaceDescriptor* aBuffer) = 0; - // was AllocBufferWithCaps virtual bool AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, gfxContentType aContent, uint32_t aCaps, - SurfaceDescriptor* aBuffer); - - /** - * Returns the maximum texture size supported by the compositor. - */ - virtual int32_t GetMaxTextureSize() const { return INT32_MAX; } - - virtual void DestroySharedSurface(SurfaceDescriptor* aSurface); + SurfaceDescriptor* aBuffer) = 0; - // method that does the actual allocation work - bool AllocGrallocBuffer(const gfx::IntSize& aSize, - uint32_t aFormat, - uint32_t aUsage, - MaybeMagicGrallocBufferHandle* aHandle); - - void DeallocGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle); - - void DropGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle); - - virtual bool IPCOpen() const { return true; } - virtual bool IsSameProcess() const = 0; - virtual base::ProcessId ParentPid() const { return base::ProcessId(); } - - virtual bool IsImageBridgeChild() const { return false; } + virtual void DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) = 0; +}; - virtual MessageLoop * GetMessageLoop() const - { - return mDefaultMessageLoop; - } - - // Returns true if aSurface wraps a Shmem. - static bool IsShmem(SurfaceDescriptor* aSurface); - - virtual CompositableForwarder* AsCompositableForwarder() { return nullptr; } -protected: +already_AddRefed<gfx::DrawTarget> +GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend); - virtual bool IsOnCompositorSide() const = 0; - - virtual ~ISurfaceAllocator(); - - void ShrinkShmemSectionHeap(); +already_AddRefed<gfx::DataSourceSurface> +GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor); - // This is used to implement an extremely simple & naive heap allocator. - std::vector<mozilla::ipc::Shmem> mUsedShmems; - - MessageLoop* mDefaultMessageLoop; - - friend class AtomicRefCountedWithFinalize<ISurfaceAllocator>; -}; +uint8_t* +GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor); class GfxMemoryImageReporter final : public nsIMemoryReporter { ~GfxMemoryImageReporter() {} public: NS_DECL_ISUPPORTS
--- a/gfx/layers/ipc/ImageBridgeChild.cpp +++ b/gfx/layers/ipc/ImageBridgeChild.cpp @@ -936,17 +936,18 @@ ImageBridgeChild::AllocUnsafeShmem(size_ bool ImageBridgeChild::AllocShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) { MOZ_ASSERT(!mShuttingDown); if (InImageBridgeChildThread()) { - return PImageBridgeChild::AllocShmem(aSize, aType, aShmem); + return PImageBridgeChild::AllocShmem(aSize, aType, + aShmem); } else { return DispatchAllocShmemInternal(aSize, aType, aShmem, false); // false: unsafe } } // NewRunnableFunction accepts a limited number of parameters so we need a // struct here struct AllocShmemParams { @@ -961,24 +962,25 @@ struct AllocShmemParams { static void ProxyAllocShmemNow(AllocShmemParams* aParams, ReentrantMonitor* aBarrier, bool* aDone) { MOZ_ASSERT(aParams); MOZ_ASSERT(aDone); MOZ_ASSERT(aBarrier); + auto shmAllocator = aParams->mAllocator->AsShmemAllocator(); if (aParams->mUnsafe) { - aParams->mSuccess = aParams->mAllocator->AllocUnsafeShmem(aParams->mSize, - aParams->mType, - aParams->mShmem); + aParams->mSuccess = shmAllocator->AllocUnsafeShmem(aParams->mSize, + aParams->mType, + aParams->mShmem); } else { - aParams->mSuccess = aParams->mAllocator->AllocShmem(aParams->mSize, - aParams->mType, - aParams->mShmem); + aParams->mSuccess = shmAllocator->AllocShmem(aParams->mSize, + aParams->mType, + aParams->mShmem); } ReentrantMonitorAutoEnter autoMon(*aBarrier); *aDone = true; aBarrier->NotifyAll(); } bool @@ -1010,17 +1012,17 @@ static void ProxyDeallocShmemNow(ISurfac ipc::Shmem* aShmem, ReentrantMonitor* aBarrier, bool* aDone) { MOZ_ASSERT(aShmem); MOZ_ASSERT(aDone); MOZ_ASSERT(aBarrier); - aAllocator->DeallocShmem(*aShmem); + aAllocator->AsShmemAllocator()->DeallocShmem(*aShmem); ReentrantMonitorAutoEnter autoMon(*aBarrier); *aDone = true; aBarrier->NotifyAll(); } void ImageBridgeChild::DeallocShmem(ipc::Shmem& aShmem)
--- a/gfx/layers/ipc/ImageBridgeChild.h +++ b/gfx/layers/ipc/ImageBridgeChild.h @@ -100,21 +100,24 @@ bool InImageBridgeChildThread(); * * Since sending an image through imageBridge triggers compositing, the main thread is * not used at all (except for the very first transaction that provides the * CompositableHost with an AsyncID). */ class ImageBridgeChild final : public PImageBridgeChild , public CompositableForwarder , public AsyncTransactionTrackersHolder + , public ShmemAllocator { friend class ImageContainer; typedef InfallibleTArray<AsyncParentMessageData> AsyncParentMessageArray; public: + virtual ShmemAllocator* AsShmemAllocator() override { return this; } + /** * Creates the image bridge with a dedicated thread for ImageBridgeChild. * * We may want to use a specifi thread in the future. In this case, use * CreateWithThread instead. */ static void StartUp(); @@ -241,17 +244,17 @@ public: */ static void FlushAllImages(ImageClient* aClient, ImageContainer* aContainer); // CompositableForwarder virtual void Connect(CompositableClient* aCompositable, ImageContainer* aImageContainer) override; - virtual bool IsImageBridgeChild() const override { return true; } + virtual bool UsesImageBridge() const override { return true; } /** * See CompositableForwarder::UseTextures */ virtual void UseTextures(CompositableClient* aCompositable, const nsTArray<TimedTextureClient>& aTextures) override; virtual void UseComponentAlphaTextures(CompositableClient* aCompositable, TextureClient* aClientOnBlack, @@ -288,27 +291,22 @@ public: /** * See ISurfaceAllocator.h * Can be used from any thread. * If used outside the ImageBridgeChild thread, it will proxy a synchronous * call on the ImageBridgeChild thread. */ virtual bool AllocUnsafeShmem(size_t aSize, - mozilla::ipc::SharedMemory::SharedMemoryType aType, + mozilla::ipc::SharedMemory::SharedMemoryType aShmType, mozilla::ipc::Shmem* aShmem) override; - /** - * See ISurfaceAllocator.h - * Can be used from any thread. - * If used outside the ImageBridgeChild thread, it will proxy a synchronous - * call on the ImageBridgeChild thread. - */ virtual bool AllocShmem(size_t aSize, - mozilla::ipc::SharedMemory::SharedMemoryType aType, + mozilla::ipc::SharedMemory::SharedMemoryType aShmType, mozilla::ipc::Shmem* aShmem) override; + /** * See ISurfaceAllocator.h * Can be used from any thread. * If used outside the ImageBridgeChild thread, it will proxy a synchronous * call on the ImageBridgeChild thread. */ virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) override;
--- a/gfx/layers/ipc/ImageBridgeParent.cpp +++ b/gfx/layers/ipc/ImageBridgeParent.cpp @@ -345,20 +345,16 @@ ImageBridgeParent::NotifyImageComposites if (!GetInstance(pid)->SendDidComposite(notifications)) { ok = false; } i = end; } return ok; } -MessageLoop * ImageBridgeParent::GetMessageLoop() const { - return mMessageLoop; -} - void ImageBridgeParent::DeferredDestroy() { MOZ_ASSERT(mCompositorThreadHolder); mCompositorThreadHolder = nullptr; mSelfRef = nullptr; } @@ -394,18 +390,18 @@ void ImageBridgeParent::OnChannelConnected(int32_t aPid) { mCompositorThreadHolder = GetCompositorThreadHolder(); } bool ImageBridgeParent::AllocShmem(size_t aSize, - ipc::SharedMemory::SharedMemoryType aType, - ipc::Shmem* aShmem) + ipc::SharedMemory::SharedMemoryType aType, + ipc::Shmem* aShmem) { if (mStopped) { return false; } return PImageBridgeParent::AllocShmem(aSize, aType, aShmem); } bool
--- a/gfx/layers/ipc/ImageBridgeParent.h +++ b/gfx/layers/ipc/ImageBridgeParent.h @@ -34,27 +34,30 @@ class Shmem; namespace layers { /** * ImageBridgeParent is the manager Protocol of ImageContainerParent. * It's purpose is mainly to setup the IPDL connection. Most of the * interesting stuff is in ImageContainerParent. */ class ImageBridgeParent final : public PImageBridgeParent, - public CompositableParentManager + public CompositableParentManager, + public ShmemAllocator { public: typedef InfallibleTArray<CompositableOperation> EditArray; typedef InfallibleTArray<OpDestroy> OpDestroyArray; typedef InfallibleTArray<EditReply> EditReplyArray; typedef InfallibleTArray<AsyncChildMessageData> AsyncChildMessageArray; ImageBridgeParent(MessageLoop* aLoop, Transport* aTransport, ProcessId aChildProcessId); ~ImageBridgeParent(); + virtual ShmemAllocator* AsShmemAllocator() override { return this; } + virtual void ActorDestroy(ActorDestroyReason aWhy) override; static PImageBridgeParent* Create(Transport* aTransport, ProcessId aChildProcessId); // CompositableParentManager virtual void SendFenceHandleIfPresent(PTextureParent* aTexture) override; @@ -66,18 +69,16 @@ public: } // PImageBridge virtual bool RecvImageBridgeThreadId(const PlatformThreadId& aThreadId) override; virtual bool RecvUpdate(EditArray&& aEdits, OpDestroyArray&& aToDestroy, EditReplyArray* aReply) override; virtual bool RecvUpdateNoSwap(EditArray&& aEdits, OpDestroyArray&& aToDestroy) override; - virtual bool IsAsync() const override { return true; } - PCompositableParent* AllocPCompositableParent(const TextureInfo& aInfo, PImageContainerParent* aImageContainer, uint64_t*) override; bool DeallocPCompositableParent(PCompositableParent* aActor) override; virtual PTextureParent* AllocPTextureParent(const SurfaceDescriptor& aSharedData, const LayersBackend& aLayersBackend, const TextureFlags& aFlags) override; @@ -91,20 +92,19 @@ public: virtual bool RecvChildAsyncMessages(InfallibleTArray<AsyncChildMessageData>&& aMessages) override; // Shutdown step 1 virtual bool RecvWillStop() override; // Shutdown step 2 virtual bool RecvStop() override; - virtual MessageLoop* GetMessageLoop() const override; + MessageLoop* GetMessageLoop() const { return mMessageLoop; } - - // ISurfaceAllocator + // ShmemAllocator virtual bool AllocShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) override; virtual bool AllocUnsafeShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) override; @@ -135,16 +135,20 @@ public: static bool NotifyImageComposites(nsTArray<ImageCompositeNotification>& aNotifications); // Overriden from IToplevelProtocol IToplevelProtocol* CloneToplevel(const InfallibleTArray<ProtocolFdMapping>& aFds, base::ProcessHandle aPeerProcess, mozilla::ipc::ProtocolCloneContext* aCtx) override; + virtual bool UsesImageBridge() const override { return true; } + + virtual bool IPCOpen() const override { return !mStopped; } + protected: void OnChannelConnected(int32_t pid) override; private: void DeferredDestroy(); MessageLoop* mMessageLoop; Transport* mTransport; // This keeps us alive until ActorDestroy(), at which point we do a
--- a/gfx/layers/ipc/LayerTransactionParent.h +++ b/gfx/layers/ipc/LayerTransactionParent.h @@ -31,17 +31,18 @@ namespace layers { class Layer; class LayerManagerComposite; class ShadowLayerParent; class CompositableParent; class ShadowLayersManager; class LayerTransactionParent final : public PLayerTransactionParent, - public CompositableParentManager + public CompositableParentManager, + public ShmemAllocator { typedef mozilla::layout::RenderFrameParent RenderFrameParent; typedef InfallibleTArray<Edit> EditArray; typedef InfallibleTArray<OpDestroy> OpDestroyArray; typedef InfallibleTArray<EditReply> EditReplyArray; typedef InfallibleTArray<AsyncChildMessageData> AsyncChildMessageArray; typedef InfallibleTArray<PluginWindowData> PluginsArray; @@ -56,17 +57,18 @@ protected: public: void Destroy(); LayerManagerComposite* layer_manager() const { return mLayerManager; } uint64_t GetId() const { return mId; } Layer* GetRoot() const { return mRoot; } - // ISurfaceAllocator + virtual ShmemAllocator* AsShmemAllocator() override { return this; } + virtual bool AllocShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) override; virtual bool AllocUnsafeShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType, ipc::Shmem* aShmem) override;
--- a/gfx/layers/ipc/ShadowLayers.cpp +++ b/gfx/layers/ipc/ShadowLayers.cpp @@ -10,26 +10,28 @@ #include <vector> // for vector #include "GeckoProfiler.h" // for PROFILER_LABEL #include "ISurfaceAllocator.h" // for IsSurfaceDescriptorValid #include "Layers.h" // for Layer #include "RenderTrace.h" // for RenderTraceScope #include "ShadowLayerChild.h" // for ShadowLayerChild #include "gfx2DGlue.h" // for Moz2D transition helpers #include "gfxPlatform.h" // for gfxImageFormat, gfxPlatform -#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface +//#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface #include "ipc/IPCMessageUtils.h" // for gfxContentType, null_t #include "IPDLActor.h" #include "mozilla/Assertions.h" // for MOZ_ASSERT, etc #include "mozilla/gfx/Point.h" // for IntSize #include "mozilla/layers/CompositableClient.h" // for CompositableClient, etc +#include "mozilla/layers/ImageDataSerializer.h" #include "mozilla/layers/LayersMessages.h" // for Edit, etc #include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc #include "mozilla/layers/LayersTypes.h" // for MOZ_LAYERS_LOG #include "mozilla/layers/LayerTransactionChild.h" +#include "mozilla/layers/SharedBufferManagerChild.h" #include "ShadowLayerUtils.h" #include "mozilla/layers/TextureClient.h" // for TextureClient #include "mozilla/mozalloc.h" // for operator new, etc #include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc #include "nsTArray.h" // for AutoTArray, nsTArray, etc #include "nsXULAppAPI.h" // for XRE_GetProcessType, etc #include "mozilla/ReentrantMonitor.h" @@ -192,44 +194,208 @@ private: Transaction& operator=(const Transaction&); }; struct AutoTxnEnd { explicit AutoTxnEnd(Transaction* aTxn) : mTxn(aTxn) {} ~AutoTxnEnd() { mTxn->End(); } Transaction* mTxn; }; + +// XXX - We should actually figure out the minimum shmem allocation size on +// a certain platform and use that. +const uint32_t sShmemPageSize = 4096; + +#ifdef DEBUG +const uint32_t sSupportedBlockSize = 4; +#endif + +FixedSizeSmallShmemSectionAllocator::FixedSizeSmallShmemSectionAllocator(ShmemAllocator* aShmProvider) +: mShmProvider(aShmProvider) +{ + MOZ_ASSERT(mShmProvider); +} + +FixedSizeSmallShmemSectionAllocator::~FixedSizeSmallShmemSectionAllocator() +{ + ShrinkShmemSectionHeap(); + // Check if we're not leaking.. + MOZ_ASSERT(mUsedShmems.empty()); +} + +bool +FixedSizeSmallShmemSectionAllocator::AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) +{ + // For now we only support sizes of 4. If we want to support different sizes + // some more complicated bookkeeping should be added. + MOZ_ASSERT(aSize == sSupportedBlockSize); + MOZ_ASSERT(aShmemSection); + + uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation)); + + for (size_t i = 0; i < mUsedShmems.size(); i++) { + ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); + if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) { + aShmemSection->shmem() = mUsedShmems[i]; + MOZ_ASSERT(mUsedShmems[i].IsWritable()); + break; + } + } + + if (!aShmemSection->shmem().IsWritable()) { + ipc::Shmem tmp; + if (!mShmProvider->AllocUnsafeShmem(sShmemPageSize, OptimalShmemType(), &tmp)) { + return false; + } + + ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>(); + header->mTotalBlocks = 0; + header->mAllocatedBlocks = 0; + + mUsedShmems.push_back(tmp); + aShmemSection->shmem() = tmp; + } + + MOZ_ASSERT(aShmemSection->shmem().IsWritable()); + + ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>(); + uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader); + + ShmemSectionHeapAllocation* allocHeader = nullptr; + + if (header->mTotalBlocks > header->mAllocatedBlocks) { + // Search for the first available block. + for (size_t i = 0; i < header->mTotalBlocks; i++) { + allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); + + if (allocHeader->mStatus == STATUS_FREED) { + break; + } + heap += allocationSize; + } + MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED); + MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize); + } else { + heap += header->mTotalBlocks * allocationSize; + + header->mTotalBlocks++; + allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap); + allocHeader->mSize = aSize; + } + + MOZ_ASSERT(allocHeader); + header->mAllocatedBlocks++; + allocHeader->mStatus = STATUS_ALLOCATED; + + aShmemSection->size() = aSize; + aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>(); + ShrinkShmemSectionHeap(); + return true; +} + +void +FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection) +{ + MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize); + MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize); + + ShmemSectionHeapAllocation* allocHeader = + reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() + + aShmemSection.offset() - + sizeof(ShmemSectionHeapAllocation)); + + MOZ_ASSERT(allocHeader->mSize == aShmemSection.size()); + + DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED); + // If this fails something really weird is going on. + MOZ_ASSERT(success); + + ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>(); + header->mAllocatedBlocks--; +} + +void +FixedSizeSmallShmemSectionAllocator::DeallocShmemSection(mozilla::layers::ShmemSection& aShmemSection) +{ + FreeShmemSection(aShmemSection); + ShrinkShmemSectionHeap(); +} + + +void +FixedSizeSmallShmemSectionAllocator::ShrinkShmemSectionHeap() +{ + // The loop will terminate as we either increase i, or decrease size + // every time through. + size_t i = 0; + while (i < mUsedShmems.size()) { + ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>(); + if (header->mAllocatedBlocks == 0) { + mShmProvider->DeallocShmem(mUsedShmems[i]); + + // We don't particularly care about order, move the last one in the array + // to this position. + if (i < mUsedShmems.size() - 1) { + mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1]; + } + mUsedShmems.pop_back(); + } else { + i++; + } + } +} + +FixedSizeSmallShmemSectionAllocator* +ShadowLayerForwarder::GetTileLockAllocator() +{ + MOZ_ASSERT(IPCOpen()); + if (!IPCOpen()) { + return nullptr; + } + + if (!mSectionAllocator) { + mSectionAllocator = new FixedSizeSmallShmemSectionAllocator(this); + } + return mSectionAllocator; +} + void CompositableForwarder::IdentifyTextureHost(const TextureFactoryIdentifier& aIdentifier) { mTextureFactoryIdentifier = aIdentifier; mSyncObject = SyncObject::CreateSyncObject(aIdentifier.mSyncHandle); } ShadowLayerForwarder::ShadowLayerForwarder() - : mDiagnosticTypes(DiagnosticTypes::NO_DIAGNOSTIC) + : mMessageLoop(MessageLoop::current()) + , mDiagnosticTypes(DiagnosticTypes::NO_DIAGNOSTIC) , mIsFirstPaint(false) , mWindowOverlayChanged(false) , mPaintSyncId(0) + , mSectionAllocator(nullptr) { mTxn = new Transaction(); } ShadowLayerForwarder::~ShadowLayerForwarder() { MOZ_ASSERT(mTxn->Finished(), "unfinished transaction?"); if (!mTxn->mDestroyedActors.IsEmpty()) { mTxn->FallbackDestroyActors(); } delete mTxn; if (mShadowManager) { mShadowManager->SetForwarder(nullptr); mShadowManager->Destroy(); } + + if (mSectionAllocator) { + delete mSectionAllocator; + } } void ShadowLayerForwarder::BeginTransaction(const gfx::IntRect& aTargetBounds, ScreenRotation aRotation, dom::ScreenOrientationInternal aOrientation) { MOZ_ASSERT(HasShadowManager(), "no manager to forward to"); @@ -749,51 +915,50 @@ ShadowLayerForwarder::EndTransaction(Inf *aSent = true; mIsFirstPaint = false; mPaintSyncId = 0; MOZ_LAYERS_LOG(("[LayersForwarder] ... done")); return true; } bool -ShadowLayerForwarder::AllocShmem(size_t aSize, - ipc::SharedMemory::SharedMemoryType aType, - ipc::Shmem* aShmem) +ShadowLayerForwarder::AllocUnsafeShmem(size_t aSize, + ipc::SharedMemory::SharedMemoryType aShmType, + ipc::Shmem* aShmem) { MOZ_ASSERT(HasShadowManager(), "no shadow manager"); - if (!HasShadowManager() || - !mShadowManager->IPCOpen()) { + if (!IPCOpen()) { return false; } ShmemAllocated(mShadowManager); - return mShadowManager->AllocShmem(aSize, aType, aShmem); + return mShadowManager->AllocUnsafeShmem(aSize, aShmType, aShmem); } + bool -ShadowLayerForwarder::AllocUnsafeShmem(size_t aSize, - ipc::SharedMemory::SharedMemoryType aType, - ipc::Shmem* aShmem) +ShadowLayerForwarder::AllocShmem(size_t aSize, + ipc::SharedMemory::SharedMemoryType aShmType, + ipc::Shmem* aShmem) { MOZ_ASSERT(HasShadowManager(), "no shadow manager"); - if (!HasShadowManager() || - !mShadowManager->IPCOpen()) { + if (!IPCOpen()) { return false; } + ShmemAllocated(mShadowManager); - return mShadowManager->AllocUnsafeShmem(aSize, aType, aShmem); + return mShadowManager->AllocShmem(aSize, aShmType, aShmem); } + void ShadowLayerForwarder::DeallocShmem(ipc::Shmem& aShmem) { MOZ_ASSERT(HasShadowManager(), "no shadow manager"); - if (!HasShadowManager() || - !mShadowManager->IPCOpen()) { - return; + if (HasShadowManager() && mShadowManager->IPCOpen()) { + mShadowManager->DeallocShmem(aShmem); } - mShadowManager->DeallocShmem(aShmem); } bool ShadowLayerForwarder::IPCOpen() const { return HasShadowManager() && mShadowManager->IPCOpen(); } @@ -802,17 +967,17 @@ ShadowLayerForwarder::IsSameProcess() co { if (!HasShadowManager() || !mShadowManager->IPCOpen()) { return false; } return mShadowManager->OtherPid() == base::GetCurrentProcId(); } base::ProcessId -ShadowLayerForwarder::ParentPid() const +ShadowLayerForwarder::GetParentPid() const { if (!HasShadowManager() || !mShadowManager->IPCOpen()) { return base::ProcessId(); } return mShadowManager->OtherPid(); } @@ -942,10 +1107,147 @@ void ShadowLayerForwarder::SendPendingAs // Prepare pending messages. for (size_t i = 0; i < mPendingAsyncMessages.size(); i++) { replies.AppendElement(mPendingAsyncMessages[i]); } mPendingAsyncMessages.clear(); mShadowManager->SendChildAsyncMessages(replies); } +bool +IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface) +{ + return aSurface.type() != SurfaceDescriptor::T__None && + aSurface.type() != SurfaceDescriptor::Tnull_t; +} + +uint8_t* +GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor) +{ + MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor)); + MOZ_RELEASE_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorBuffer); + + auto memOrShmem = aDescriptor.get_SurfaceDescriptorBuffer().data(); + if (memOrShmem.type() == MemoryOrShmem::TShmem) { + return memOrShmem.get_Shmem().get<uint8_t>(); + } else { + return reinterpret_cast<uint8_t*>(memOrShmem.get_uintptr_t()); + } +} + +already_AddRefed<gfx::DataSourceSurface> +GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor) +{ + uint8_t* data = GetAddressFromDescriptor(aDescriptor); + auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor(); + uint32_t stride = ImageDataSerializer::GetRGBStride(rgb); + return gfx::Factory::CreateWrappingDataSourceSurface(data, stride, rgb.size(), + rgb.format()); +} + +already_AddRefed<gfx::DrawTarget> +GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend) +{ + uint8_t* data = GetAddressFromDescriptor(aDescriptor); + auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor(); + uint32_t stride = ImageDataSerializer::GetRGBStride(rgb); + return gfx::Factory::CreateDrawTargetForData(gfx::BackendType::CAIRO, + data, rgb.size(), + stride, rgb.format()); +} + +bool +ShadowLayerForwarder::AllocSurfaceDescriptor(const gfx::IntSize& aSize, + gfxContentType aContent, + SurfaceDescriptor* aBuffer) +{ + if (!IPCOpen()) { + return false; + } + return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer); +} + +bool +ShadowLayerForwarder::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, + gfxContentType aContent, + uint32_t aCaps, + SurfaceDescriptor* aBuffer) +{ + if (!IPCOpen()) { + return false; + } + gfx::SurfaceFormat format = + gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent); + size_t size = ImageDataSerializer::ComputeRGBBufferSize(aSize, format); + if (!size) { + return false; + } + + MemoryOrShmem bufferDesc; + if (IsSameProcess()) { + uint8_t* data = new (std::nothrow) uint8_t[size]; + if (!data) { + return false; + } + GfxMemoryImageReporter::DidAlloc(data); +#ifdef XP_MACOSX + // Workaround a bug in Quartz where drawing an a8 surface to another a8 + // surface with OP_SOURCE still requires the destination to be clear. + if (format == gfx::SurfaceFormat::A8) { + memset(data, 0, size); + } +#endif + bufferDesc = reinterpret_cast<uintptr_t>(data); + } else { + + mozilla::ipc::Shmem shmem; + if (!AllocUnsafeShmem(size, OptimalShmemType(), &shmem)) { + return false; + } + + bufferDesc = shmem; + } + + // Use an intermediate buffer by default. Skipping the intermediate buffer is + // only possible in certain configurations so let's keep it simple here for now. + const bool hasIntermediateBuffer = true; + *aBuffer = SurfaceDescriptorBuffer(RGBDescriptor(aSize, format, hasIntermediateBuffer), + bufferDesc); + + return true; +} + +/* static */ bool +ShadowLayerForwarder::IsShmem(SurfaceDescriptor* aSurface) +{ + return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorBuffer) + && (aSurface->get_SurfaceDescriptorBuffer().data().type() == MemoryOrShmem::TShmem); +} + +void +ShadowLayerForwarder::DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) +{ + MOZ_ASSERT(aSurface); + MOZ_ASSERT(IPCOpen()); + if (!IPCOpen() || !aSurface) { + return; + } + + SurfaceDescriptorBuffer& desc = aSurface->get_SurfaceDescriptorBuffer(); + switch (desc.data().type()) { + case MemoryOrShmem::TShmem: { + DeallocShmem(desc.data().get_Shmem()); + break; + } + case MemoryOrShmem::Tuintptr_t: { + uint8_t* ptr = (uint8_t*)desc.data().get_uintptr_t(); + GfxMemoryImageReporter::WillFree(ptr); + delete [] ptr; + break; + } + default: + NS_RUNTIMEABORT("surface type not implemented!"); + } + *aSurface = SurfaceDescriptor(); +} + } // namespace layers } // namespace mozilla
--- a/gfx/layers/ipc/ShadowLayers.h +++ b/gfx/layers/ipc/ShadowLayers.h @@ -17,34 +17,35 @@ #include "mozilla/dom/ScreenOrientation.h" // for ScreenOrientation #include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc #include "mozilla/layers/CompositableForwarder.h" #include "mozilla/layers/CompositorTypes.h" // for OpenMode, etc #include "nsCOMPtr.h" // for already_AddRefed #include "nsRegion.h" // for nsIntRegion #include "nsTArrayForwardDeclare.h" // for InfallibleTArray #include "nsIWidget.h" +#include <vector> namespace mozilla { namespace layers { class EditReply; +class FixedSizeSmallShmemSectionAllocator; class ImageContainer; class Layer; class PLayerChild; class PLayerTransactionChild; class LayerTransactionChild; class ShadowableLayer; class SurfaceDescriptor; class TextureClient; class ThebesBuffer; class ThebesBufferData; class Transaction; - /** * We want to share layer trees across thread contexts and address * spaces for several reasons; chief among them * * - a parent process can paint a child process's layer tree while * the child process is blocked, say on content script. This is * important on mobile devices where UI responsiveness is key. * @@ -109,22 +110,33 @@ class Transaction; * synchronize the texture data held by compositables. Layer transactions * are always between the content thread and the compositor thread. * Compositable transactions are subset of a layer transaction with which only * compositables and textures can be manipulated, and does not always originate * from the content thread. (See CompositableForwarder.h and ImageBridgeChild.h) */ class ShadowLayerForwarder final : public CompositableForwarder + , public ShmemAllocator + , public LegacySurfaceDescriptorAllocator { friend class ClientLayerManager; public: virtual ~ShadowLayerForwarder(); + virtual ShmemAllocator* AsShmemAllocator() override { return this; } + + virtual ShadowLayerForwarder* AsLayerForwarder() override { return this; } + + virtual LegacySurfaceDescriptorAllocator* + AsLegacySurfaceDescriptorAllocator() override { return this; } + + FixedSizeSmallShmemSectionAllocator* GetTileLockAllocator(); + /** * Setup the IPDL actor for aCompositable to be part of layers * transactions. */ virtual void Connect(CompositableClient* aCompositable, ImageContainer* aImageContainer) override; virtual PTextureChild* CreateTexture(const SurfaceDescriptor& aSharedData, @@ -317,66 +329,86 @@ public: * calls Destroyed*Buffer(), which gives up control of the back * buffer descriptor. The actual back buffer surface is then * destroyed using DestroySharedSurface() just before notifying * the parent process. When the parent process is notified, the * LayerComposite also calls DestroySharedSurface() on its front * buffer, and the double-buffer pair is gone. */ - // ISurfaceAllocator + virtual bool AllocUnsafeShmem(size_t aSize, mozilla::ipc::SharedMemory::SharedMemoryType aType, mozilla::ipc::Shmem* aShmem) override; virtual bool AllocShmem(size_t aSize, mozilla::ipc::SharedMemory::SharedMemoryType aType, mozilla::ipc::Shmem* aShmem) override; virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) override; virtual bool IPCOpen() const override; + virtual bool IsSameProcess() const override; - virtual base::ProcessId ParentPid() const override; + + virtual MessageLoop* GetMessageLoop() const override { return mMessageLoop; } + + base::ProcessId GetParentPid() const; /** * Construct a shadow of |aLayer| on the "other side", at the * LayerManagerComposite. */ PLayerChild* ConstructShadowFor(ShadowableLayer* aLayer); /** * Flag the next paint as the first for a document. */ void SetIsFirstPaint() { mIsFirstPaint = true; } void SetPaintSyncId(int32_t aSyncId) { mPaintSyncId = aSyncId; } static void PlatformSyncBeforeUpdate(); + virtual bool AllocSurfaceDescriptor(const gfx::IntSize& aSize, + gfxContentType aContent, + SurfaceDescriptor* aBuffer) override; + + virtual bool AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize, + gfxContentType aContent, + uint32_t aCaps, + SurfaceDescriptor* aBuffer) override; + + virtual void DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) override; + + // Returns true if aSurface wraps a Shmem. + static bool IsShmem(SurfaceDescriptor* aSurface); + protected: ShadowLayerForwarder(); #ifdef DEBUG void CheckSurfaceDescriptor(const SurfaceDescriptor* aDescriptor) const; #else void CheckSurfaceDescriptor(const SurfaceDescriptor* aDescriptor) const {} #endif bool InWorkerThread(); RefPtr<LayerTransactionChild> mShadowManager; private: Transaction* mTxn; + MessageLoop* mMessageLoop; std::vector<AsyncChildMessageData> mPendingAsyncMessages; DiagnosticTypes mDiagnosticTypes; bool mIsFirstPaint; bool mWindowOverlayChanged; int32_t mPaintSyncId; InfallibleTArray<PluginWindowData> mPluginWindowData; + FixedSizeSmallShmemSectionAllocator* mSectionAllocator; }; class CompositableClient; /** * A ShadowableLayer is a Layer can be shared with a parent context * through a ShadowLayerForwarder. A ShadowableLayer maps to a * Shadow*Layer in a parent context. @@ -403,12 +435,56 @@ public: virtual CompositableClient* GetCompositableClient() { return nullptr; } protected: ShadowableLayer() : mShadow(nullptr) {} PLayerChild* mShadow; }; +/// A simple shmem section allocator that can only allocate small +/// fixed size elements (only intended to be used to store tile +/// copy-on-write locks for now). +class FixedSizeSmallShmemSectionAllocator final : public ShmemSectionAllocator +{ +public: + enum AllocationStatus + { + STATUS_ALLOCATED, + STATUS_FREED + }; + + struct ShmemSectionHeapHeader + { + Atomic<uint32_t> mTotalBlocks; + Atomic<uint32_t> mAllocatedBlocks; + }; + + struct ShmemSectionHeapAllocation + { + Atomic<uint32_t> mStatus; + uint32_t mSize; + }; + + explicit FixedSizeSmallShmemSectionAllocator(ShmemAllocator* aShmProvider); + + ~FixedSizeSmallShmemSectionAllocator(); + + virtual bool AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) override; + + virtual void DeallocShmemSection(ShmemSection& aShmemSection) override; + + virtual void MemoryPressure() override { ShrinkShmemSectionHeap(); } + + // can be called on the compositor process. + static void FreeShmemSection(ShmemSection& aShmemSection); + + void ShrinkShmemSectionHeap(); + +protected: + std::vector<mozilla::ipc::Shmem> mUsedShmems; + ShmemAllocator* mShmProvider; +}; + } // namespace layers } // namespace mozilla #endif // ifndef mozilla_layers_ShadowLayers_h
--- a/gfx/layers/opengl/GrallocTextureClient.cpp +++ b/gfx/layers/opengl/GrallocTextureClient.cpp @@ -6,16 +6,17 @@ #ifdef MOZ_WIDGET_GONK #include "mozilla/gfx/2D.h" #include "mozilla/layers/AsyncTransactionTracker.h" // for AsyncTransactionTracker #include "mozilla/layers/GrallocTextureClient.h" #include "mozilla/layers/CompositableForwarder.h" #include "mozilla/layers/ISurfaceAllocator.h" #include "mozilla/layers/ShadowLayerUtilsGralloc.h" +#include "mozilla/layers/SharedBufferManagerChild.h" #include "gfx2DGlue.h" #include "gfxPrefs.h" // for gfxPrefs #include "SharedSurfaceGralloc.h" #if defined(MOZ_WIDGET_GONK) && ANDROID_VERSION >= 17 #include <ui/Fence.h> #endif @@ -107,30 +108,30 @@ GrallocTextureData::~GrallocTextureData( { MOZ_COUNT_DTOR(GrallocTextureData); } void GrallocTextureData::Deallocate(ISurfaceAllocator* aAllocator) { MOZ_ASSERT(aAllocator); - if (aAllocator) { - aAllocator->DeallocGrallocBuffer(&mGrallocHandle); + if (aAllocator && aAllocator->IPCOpen()) { + SharedBufferManagerChild::GetSingleton()->DeallocGrallocBuffer(mGrallocHandle); } mGrallocHandle = null_t(); mGraphicBuffer = nullptr; } void GrallocTextureData::Forget(ISurfaceAllocator* aAllocator) { MOZ_ASSERT(aAllocator); - if (aAllocator) { - aAllocator->DropGrallocBuffer(&mGrallocHandle); + if (aAllocator && aAllocator->IPCOpen()) { + SharedBufferManagerChild::GetSingleton()->DropGrallocBuffer(mGrallocHandle); } mGrallocHandle = null_t(); mGraphicBuffer = nullptr; } bool GrallocTextureData::Serialize(SurfaceDescriptor& aOutDescriptor) @@ -275,20 +276,20 @@ GrallocTextureData::UpdateFromSurface(gf } // static GrallocTextureData* GrallocTextureData::Create(gfx::IntSize aSize, AndroidFormat aAndroidFormat, gfx::BackendType aMoz2dBackend, uint32_t aUsage, ISurfaceAllocator* aAllocator) { - if (!aAllocator) { + if (!aAllocator || !aAllocator->IPCOpen()) { return nullptr; } - int32_t maxSize = aAllocator->GetMaxTextureSize(); + int32_t maxSize = aAllocator->AsClientAllocator()->GetMaxTextureSize(); if (aSize.width > maxSize || aSize.height > maxSize) { return nullptr; } gfx::SurfaceFormat format; switch (aAndroidFormat) { case android::PIXEL_FORMAT_RGBA_8888: format = gfx::SurfaceFormat::B8G8R8A8; break; @@ -308,17 +309,17 @@ GrallocTextureData::Create(gfx::IntSize format = gfx::SurfaceFormat::UNKNOWN; } if (DisableGralloc(format, aSize)) { return nullptr; } MaybeMagicGrallocBufferHandle handle; - if (!aAllocator->AllocGrallocBuffer(aSize, aAndroidFormat, aUsage, &handle)) { + if (!SharedBufferManagerChild::GetSingleton()->AllocGrallocBuffer(aSize, aAndroidFormat, aUsage, &handle)) { return nullptr; } sp<GraphicBuffer> graphicBuffer = GetGraphicBufferFrom(handle); if (!graphicBuffer.get()) { return nullptr; }
--- a/gfx/thebes/gfxReusableSharedImageSurfaceWrapper.cpp +++ b/gfx/thebes/gfxReusableSharedImageSurfaceWrapper.cpp @@ -33,17 +33,17 @@ gfxReusableSharedImageSurfaceWrapper::Re void gfxReusableSharedImageSurfaceWrapper::ReadUnlock() { int32_t readCount = mSurface->ReadUnlock(); MOZ_ASSERT(readCount >= 0, "Read count should not be negative"); if (readCount == 0) { - mAllocator->DeallocShmem(mSurface->GetShmem()); + mAllocator->AsShmemAllocator()->DeallocShmem(mSurface->GetShmem()); } } gfxReusableSurfaceWrapper* gfxReusableSharedImageSurfaceWrapper::GetWritable(gfxImageSurface** aSurface) { NS_ASSERT_OWNINGTHREAD(gfxReusableSharedImageSurfaceWrapper); @@ -51,17 +51,17 @@ gfxReusableSharedImageSurfaceWrapper::Ge MOZ_ASSERT(readCount > 0, "A ReadLock must be held when calling GetWritable"); if (readCount == 1) { *aSurface = mSurface; return this; } // Something else is reading the surface, copy it RefPtr<gfxSharedImageSurface> copySurface = - gfxSharedImageSurface::CreateUnsafe(mAllocator.get(), mSurface->GetSize(), mSurface->Format()); + gfxSharedImageSurface::CreateUnsafe(mAllocator->AsShmemAllocator(), mSurface->GetSize(), mSurface->Format()); copySurface->CopyFrom(mSurface); *aSurface = copySurface; // We need to create a new wrapper since this wrapper has an external ReadLock gfxReusableSurfaceWrapper* wrapper = new gfxReusableSharedImageSurfaceWrapper(mAllocator, copySurface); // No need to release the ReadLock on the surface, this will happen when // the wrapper is destroyed.
--- a/widget/nsBaseWidget.cpp +++ b/widget/nsBaseWidget.cpp @@ -2115,26 +2115,26 @@ nsIWidget::SnapshotWidgetOnScreen() if (!cc->SendMakeWidgetSnapshot(surface)) { return nullptr; } RefPtr<gfx::DataSourceSurface> snapshot = GetSurfaceForDescriptor(surface); RefPtr<gfx::DrawTarget> dt = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(size, gfx::SurfaceFormat::B8G8R8A8); if (!snapshot || !dt) { - forwarder->DestroySharedSurface(&surface); + forwarder->DestroySurfaceDescriptor(&surface); return nullptr; } dt->DrawSurface(snapshot, gfx::Rect(gfx::Point(), gfx::Size(size)), gfx::Rect(gfx::Point(), gfx::Size(size)), gfx::DrawSurfaceOptions(gfx::Filter::POINT)); - forwarder->DestroySharedSurface(&surface); + forwarder->DestroySurfaceDescriptor(&surface); return dt->Snapshot(); } NS_IMETHODIMP_(nsIWidget::NativeIMEContext) nsIWidget::GetNativeIMEContext() { return NativeIMEContext(this); }