--- a/gfx/gl/GLContext.cpp
+++ b/gfx/gl/GLContext.cpp
@@ -76,16 +76,17 @@ static const char* const sExtensionNames
"GL_ANGLE_depth_texture",
"GL_ANGLE_framebuffer_blit",
"GL_ANGLE_framebuffer_multisample",
"GL_ANGLE_instanced_arrays",
"GL_ANGLE_texture_compression_dxt3",
"GL_ANGLE_texture_compression_dxt5",
"GL_ANGLE_timer_query",
"GL_APPLE_client_storage",
+ "GL_APPLE_fence",
"GL_APPLE_framebuffer_multisample",
"GL_APPLE_sync",
"GL_APPLE_texture_range",
"GL_APPLE_vertex_array_object",
"GL_ARB_ES2_compatibility",
"GL_ARB_ES3_compatibility",
"GL_ARB_color_buffer_float",
"GL_ARB_compatibility",
@@ -1075,16 +1076,24 @@ GLContext::LoadMoreSymbols(const char* p
if (IsExtensionSupported(APPLE_texture_range)) {
const SymLoadStruct symbols[] = {
{ (PRFuncPtr*) &mSymbols.fTextureRangeAPPLE, { "TextureRangeAPPLE", nullptr } },
END_SYMBOLS
};
fnLoadForExt(symbols, APPLE_texture_range);
}
+ if (IsExtensionSupported(APPLE_fence)) {
+ const SymLoadStruct symbols[] = {
+ { (PRFuncPtr*) &mSymbols.fFinishObjectAPPLE, { "FinishObjectAPPLE", nullptr } },
+ END_SYMBOLS
+ };
+ fnLoadForExt(symbols, APPLE_fence);
+ }
+
if (IsSupported(GLFeature::vertex_array_object)) {
const SymLoadStruct coreSymbols[] = {
{ (PRFuncPtr*) &mSymbols.fIsVertexArray, { "IsVertexArray", nullptr } },
{ (PRFuncPtr*) &mSymbols.fGenVertexArrays, { "GenVertexArrays", nullptr } },
{ (PRFuncPtr*) &mSymbols.fBindVertexArray, { "BindVertexArray", nullptr } },
{ (PRFuncPtr*) &mSymbols.fDeleteVertexArrays, { "DeleteVertexArrays", nullptr } },
END_SYMBOLS
};
--- a/gfx/gl/GLContext.h
+++ b/gfx/gl/GLContext.h
@@ -375,16 +375,17 @@ public:
ANGLE_depth_texture,
ANGLE_framebuffer_blit,
ANGLE_framebuffer_multisample,
ANGLE_instanced_arrays,
ANGLE_texture_compression_dxt3,
ANGLE_texture_compression_dxt5,
ANGLE_timer_query,
APPLE_client_storage,
+ APPLE_fence,
APPLE_framebuffer_multisample,
APPLE_sync,
APPLE_texture_range,
APPLE_vertex_array_object,
ARB_ES2_compatibility,
ARB_ES3_compatibility,
ARB_color_buffer_float,
ARB_compatibility,
@@ -3298,16 +3299,26 @@ public:
void fResolveMultisampleFramebufferAPPLE() {
BEFORE_GL_CALL;
ASSERT_SYMBOL_PRESENT(fResolveMultisampleFramebufferAPPLE);
mSymbols.fResolveMultisampleFramebufferAPPLE();
AFTER_GL_CALL;
}
// -----------------------------------------------------------------------------
+// APPLE_fence
+
+ void fFinishObjectAPPLE(GLenum object, GLint name) {
+ BEFORE_GL_CALL;
+ ASSERT_SYMBOL_PRESENT(fFinishObjectAPPLE);
+ mSymbols.fFinishObjectAPPLE(object, name);
+ AFTER_GL_CALL;
+ }
+
+// -----------------------------------------------------------------------------
// prim_restart
void fPrimitiveRestartIndex(GLuint index) {
BEFORE_GL_CALL;
ASSERT_SYMBOL_PRESENT(fPrimitiveRestartIndex);
mSymbols.fPrimitiveRestartIndex(index);
AFTER_GL_CALL;
}
--- a/gfx/gl/GLContextSymbols.h
+++ b/gfx/gl/GLContextSymbols.h
@@ -132,16 +132,17 @@ struct GLContextSymbols final
void (GLAPIENTRY * fStencilMaskSeparate)(GLenum, GLuint);
void (GLAPIENTRY * fStencilOp)(GLenum, GLenum, GLenum);
void (GLAPIENTRY * fStencilOpSeparate)(GLenum, GLenum, GLenum, GLenum);
void (GLAPIENTRY * fTexImage2D)(GLenum, GLint, GLint, GLsizei, GLsizei, GLint,
GLenum, GLenum, const GLvoid*);
void (GLAPIENTRY * fTexSubImage2D)(GLenum, GLint, GLint, GLint, GLsizei,
GLsizei, GLenum, GLenum, const void*);
void (GLAPIENTRY * fTextureRangeAPPLE)(GLenum, GLsizei, GLvoid*);
+ void (GLAPIENTRY * fFinishObjectAPPLE)(GLenum, GLint);
void (GLAPIENTRY * fUniform1f)(GLint, GLfloat);
void (GLAPIENTRY * fUniform1fv)(GLint, GLsizei, const GLfloat*);
void (GLAPIENTRY * fUniform1i)(GLint, GLint);
void (GLAPIENTRY * fUniform1iv)(GLint, GLsizei, const GLint*);
void (GLAPIENTRY * fUniform2f)(GLint, GLfloat, GLfloat);
void (GLAPIENTRY * fUniform2fv)(GLint, GLsizei, const GLfloat*);
void (GLAPIENTRY * fUniform2i)(GLint, GLint, GLint);
void (GLAPIENTRY * fUniform2iv)(GLint, GLsizei, const GLint*);
--- a/gfx/layers/TextureSourceProvider.cpp
+++ b/gfx/layers/TextureSourceProvider.cpp
@@ -1,32 +1,43 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/layers/TextureSourceProvider.h"
#include "mozilla/layers/TextureHost.h"
+#include "mozilla/layers/PTextureParent.h"
namespace mozilla {
namespace layers {
TextureSourceProvider::~TextureSourceProvider()
{
ReadUnlockTextures();
}
void
TextureSourceProvider::ReadUnlockTextures()
{
+ nsClassHashtable<nsUint32HashKey, nsTArray<uint64_t>> texturesIdsToUnlockByPid;
for (auto& texture : mUnlockAfterComposition) {
texture->ReadUnlock();
+ auto actor = texture->GetIPDLActor();
+ if (actor) {
+ pid_t pid = actor->OtherPid();
+ nsTArray<uint64_t>* textureIds = texturesIdsToUnlockByPid.LookupOrAdd(pid);
+ textureIds->AppendElement(TextureHost::GetTextureSerial(actor));
+ }
}
mUnlockAfterComposition.Clear();
+ for (auto it = texturesIdsToUnlockByPid.ConstIter(); !it.Done(); it.Next()) {
+ ipc::SharedMemoryBasic::SetTexturesUnlocked(it.Key(), *it.UserData());
+ }
}
void
TextureSourceProvider::UnlockAfterComposition(TextureHost* aTexture)
{
mUnlockAfterComposition.AppendElement(aTexture);
}
--- a/gfx/layers/client/ClientLayerManager.cpp
+++ b/gfx/layers/client/ClientLayerManager.cpp
@@ -396,16 +396,20 @@ ClientLayerManager::EndTransactionIntern
void
ClientLayerManager::StorePluginWidgetConfigurations(const nsTArray<nsIWidget::Configuration>& aConfigurations)
{
if (mForwarder) {
mForwarder->StorePluginWidgetConfigurations(aConfigurations);
}
}
+void ClientLayerManager::SyncTextures()
+{
+}
+
void
ClientLayerManager::EndTransaction(DrawPaintedLayerCallback aCallback,
void* aCallbackData,
EndTransactionFlags aFlags)
{
if (!mForwarder->IPCOpen()) {
mInTransaction = false;
return;
--- a/gfx/layers/client/ClientLayerManager.h
+++ b/gfx/layers/client/ClientLayerManager.h
@@ -147,16 +147,18 @@ public:
aConfigurations) override;
// Drop cached resources and ask our shadow manager to do the same,
// if we have one.
virtual void ClearCachedResources(Layer* aSubtree = nullptr) override;
void HandleMemoryPressure();
+ void SyncTextures();
+
void SetRepeatTransaction() { mRepeatTransaction = true; }
bool GetRepeatTransaction() { return mRepeatTransaction; }
bool IsRepeatTransaction() { return mIsRepeatTransaction; }
void SetTransactionIncomplete() { mTransactionIncomplete = true; }
void SetQueuedAsyncPaints() { mQueuedAsyncPaints = true; }
--- a/gfx/layers/client/MultiTiledContentClient.cpp
+++ b/gfx/layers/client/MultiTiledContentClient.cpp
@@ -255,16 +255,46 @@ void ClientMultiTiledLayerBuffer::Update
oldRetainedTiles[oldIndex].DiscardBuffers();
}
}
oldRetainedTiles.Clear();
nsIntRegion paintRegion = aPaintRegion;
nsIntRegion dirtyRegion = aDirtyRegion;
+
+ AutoTArray<uint64_t, 10> syncTextureSerials;
+ SurfaceMode mode;
+ Unused << GetContentType(&mode);
+
+ // Pre-pass through the tiles (mirroring the filter logic below) to gather
+ // texture IDs that we need to ensure are unused by the GPU before we
+ // continue.
+ if (!paintRegion.IsEmpty()) {
+ MOZ_ASSERT(mPaintStates.size() == 0);
+ for (size_t i = 0; i < newTileCount; ++i) {
+ const TileCoordIntPoint tileCoord = newTiles.TileCoord(i);
+
+ IntPoint tileOffset = GetTileOffset(tileCoord);
+ nsIntRegion tileDrawRegion = IntRect(tileOffset, scaledTileSize);
+ tileDrawRegion.AndWith(paintRegion);
+
+ if (tileDrawRegion.IsEmpty()) {
+ continue;
+ }
+
+ TileClient& tile = mRetainedTiles[i];
+ tile.GetSyncTextureSerials(mode, syncTextureSerials);
+ }
+ }
+
+ if (syncTextureSerials.Length() > 0) {
+ mManager->AsShadowForwarder()->SyncTextures(syncTextureSerials);
+ }
+
if (!paintRegion.IsEmpty()) {
MOZ_ASSERT(mPaintStates.size() == 0);
for (size_t i = 0; i < newTileCount; ++i) {
const TileCoordIntPoint tileCoord = newTiles.TileCoord(i);
IntPoint tileOffset = GetTileOffset(tileCoord);
nsIntRegion tileDrawRegion = IntRect(tileOffset, scaledTileSize);
tileDrawRegion.AndWith(paintRegion);
@@ -498,16 +528,22 @@ ClientMultiTiledLayerBuffer::ValidateTil
mPaintTiles.push_back(paintTile);
mTilingOrigin.x = std::min(mTilingOrigin.x, paintTile.mTileOrigin.x);
mTilingOrigin.y = std::min(mTilingOrigin.y, paintTile.mTileOrigin.y);
// The new buffer is now validated, remove the dirty region from it.
aTile.mInvalidBack.SubOut(tileDirtyRegion);
+ uint64_t fwdTransactionId = mCompositableClient.GetForwarder()->GetFwdTransactionId();
+ backBuffer->SetLastFwdTransactionId(fwdTransactionId);
+ if (backBufferOnWhite) {
+ backBufferOnWhite->SetLastFwdTransactionId(fwdTransactionId);
+ }
+
aTile.Flip();
return true;
}
/**
* This function takes the transform stored in aTransformToCompBounds
* (which was generated in GetTransformToAncestorsParentLayer), and
--- a/gfx/layers/client/SingleTiledContentClient.cpp
+++ b/gfx/layers/client/SingleTiledContentClient.cpp
@@ -135,16 +135,22 @@ ClientSingleTiledLayerBuffer::PaintThebe
SurfaceMode mode;
gfxContentType content = GetContentType(&mode);
mFormat = gfxPlatform::GetPlatform()->OptimalFormatForContent(content);
if (mTile.IsPlaceholderTile()) {
mTile.SetTextureAllocator(this);
}
+ AutoTArray<uint64_t, 2> syncTextureSerials;
+ mTile.GetSyncTextureSerials(mode, syncTextureSerials);
+ if (syncTextureSerials.Length() > 0) {
+ mManager->AsShadowForwarder()->SyncTextures(syncTextureSerials);
+ }
+
// The dirty region relative to the top-left of the tile.
nsIntRegion tileVisibleRegion = aNewValidRegion.MovedBy(-mTilingOrigin);
nsIntRegion tileDirtyRegion = paintRegion.MovedBy(-mTilingOrigin);
std::vector<RefPtr<TextureClient>> paintClients;
std::vector<CapturedTiledPaintState::Copy> paintCopies;
std::vector<CapturedTiledPaintState::Clear> paintClears;
--- a/gfx/layers/client/TiledContentClient.cpp
+++ b/gfx/layers/client/TiledContentClient.cpp
@@ -602,16 +602,39 @@ CreateBackBufferTexture(TextureClient* a
if (!aCompositable.AddTextureClient(texture)) {
gfxCriticalError() << "[Tiling:Client] Failed to connect a TextureClient";
return nullptr;
}
return texture.forget();
}
+void
+TileClient::GetSyncTextureSerials(SurfaceMode aMode, nsTArray<uint64_t>& aSerials)
+{
+ if (mFrontBuffer &&
+ mFrontBuffer->HasIntermediateBuffer() &&
+ !mFrontBuffer->IsReadLocked() &&
+ (aMode != SurfaceMode::SURFACE_COMPONENT_ALPHA || (
+ mFrontBufferOnWhite && !mFrontBufferOnWhite->IsReadLocked())))
+ {
+ return;
+ }
+
+ if (mBackBuffer && mBackBuffer->IsReadLocked()) {
+ aSerials.AppendElement(mBackBuffer->GetSerial());
+ }
+
+ if (aMode == SurfaceMode::SURFACE_COMPONENT_ALPHA &&
+ mBackBufferOnWhite &&
+ mBackBufferOnWhite->IsReadLocked()) {
+ aSerials.AppendElement(mBackBufferOnWhite->GetSerial());
+ }
+}
+
TextureClient*
TileClient::GetBackBuffer(CompositableClient& aCompositable,
const nsIntRegion& aDirtyRegion,
const nsIntRegion& aVisibleRegion,
gfxContentType aContent,
SurfaceMode aMode,
nsIntRegion& aAddPaintedRegion,
TilePaintFlags aFlags,
@@ -648,39 +671,45 @@ TileClient::GetBackBuffer(CompositableCl
// later (copying pixels and texture upload). But this could increase
// our memory usage and lead to OOM more frequently from spikes in usage,
// so we have this behavior behind a pref.
if (!gfxPrefs::LayersTileRetainBackBuffer()) {
DiscardBackBuffer();
}
Flip();
} else {
- if (!mBackBuffer || mBackBuffer->IsReadLocked()) {
+ if (!mBackBuffer) {
mBackBuffer.Set(this,
CreateBackBufferTexture(mBackBuffer, aCompositable, mAllocator)
);
+
if (!mBackBuffer) {
DiscardBackBuffer();
DiscardFrontBuffer();
return nullptr;
}
mInvalidBack = IntRect(IntPoint(), mBackBuffer->GetSize());
}
- if (aMode == SurfaceMode::SURFACE_COMPONENT_ALPHA
- && (!mBackBufferOnWhite || mBackBufferOnWhite->IsReadLocked())) {
- mBackBufferOnWhite = CreateBackBufferTexture(
- mBackBufferOnWhite, aCompositable, mAllocator
- );
+ MOZ_ASSERT(!mBackBuffer->IsReadLocked());
+
+ if (aMode == SurfaceMode::SURFACE_COMPONENT_ALPHA) {
if (!mBackBufferOnWhite) {
- DiscardBackBuffer();
- DiscardFrontBuffer();
- return nullptr;
+ mBackBufferOnWhite = CreateBackBufferTexture(
+ mBackBufferOnWhite, aCompositable, mAllocator
+ );
+ if (!mBackBufferOnWhite) {
+ DiscardBackBuffer();
+ DiscardFrontBuffer();
+ return nullptr;
+ }
+ mInvalidBack = IntRect(IntPoint(), mBackBufferOnWhite->GetSize());
}
- mInvalidBack = IntRect(IntPoint(), mBackBufferOnWhite->GetSize());
+
+ MOZ_ASSERT(!mBackBufferOnWhite->IsReadLocked());
}
ValidateBackBufferFromFront(aDirtyRegion, aVisibleRegion, aAddPaintedRegion, aFlags, aCopies, aClients);
}
OpenMode lockMode = aFlags & TilePaintFlags::Async ? OpenMode::OPEN_READ_WRITE_ASYNC
: OpenMode::OPEN_READ_WRITE;
--- a/gfx/layers/client/TiledContentClient.h
+++ b/gfx/layers/client/TiledContentClient.h
@@ -109,16 +109,18 @@ struct TileClient
*/
void Flip();
void DumpTexture(std::stringstream& aStream, TextureDumpMode aCompress) {
// TODO We should combine the OnWhite/OnBlack here an just output a single image.
CompositableClient::DumpTextureClient(aStream, mFrontBuffer, aCompress);
}
+ void GetSyncTextureSerials(SurfaceMode aMode, nsTArray<uint64_t>& aSerials);
+
/**
* Returns an unlocked TextureClient that can be used for writing new
* data to the tile. This may flip the front-buffer to the back-buffer if
* the front-buffer is still locked by the host, or does not have an
* internal buffer (and so will always be locked).
*
* If getting the back buffer required copying pixels from the front buffer
* then the copied region is stored in aAddPaintedRegion so the host side
@@ -306,16 +308,19 @@ public:
{}
virtual void PaintThebes(const nsIntRegion& aNewValidRegion,
const nsIntRegion& aPaintRegion,
const nsIntRegion& aDirtyRegion,
LayerManager::DrawPaintedLayerCallback aCallback,
void* aCallbackData,
TilePaintFlags aFlags) = 0;
+ virtual void GetSyncTextureSerials(const nsIntRegion& aPaintRegion,
+ const nsIntRegion& aDirtyRegion,
+ nsTArray<uint64_t>& aSerials) { return; }
virtual bool SupportsProgressiveUpdate() = 0;
virtual bool ProgressiveUpdate(const nsIntRegion& aValidRegion,
const nsIntRegion& aInvalidRegion,
const nsIntRegion& aOldValidRegion,
nsIntRegion& aOutDrawnRegion,
BasicTiledLayerPaintData* aPaintData,
LayerManager::DrawPaintedLayerCallback aCallback,
--- a/gfx/layers/composite/TextureHost.cpp
+++ b/gfx/layers/composite/TextureHost.cpp
@@ -163,16 +163,22 @@ TextureHost::GetTextureSerial(PTexturePa
}
PTextureParent*
TextureHost::GetIPDLActor()
{
return mActor;
}
+uint64_t
+TextureHost::GetLastFwdTransactionId()
+{
+ return mFwdTransactionId;
+}
+
void
TextureHost::SetLastFwdTransactionId(uint64_t aTransactionId)
{
MOZ_ASSERT(mFwdTransactionId <= aTransactionId);
mFwdTransactionId = aTransactionId;
}
// implemented in TextureHostOGL.cpp
@@ -363,21 +369,29 @@ TextureHost::TextureHost(TextureFlags aF
, mCompositableCount(0)
, mFwdTransactionId(0)
, mReadLocked(false)
{
}
TextureHost::~TextureHost()
{
- // If we still have a ReadLock, unlock it. At this point we don't care about
- // the texture client being written into on the other side since it should be
- // destroyed by now. But we will hit assertions if we don't ReadUnlock before
- // destroying the lock itself.
- ReadUnlock();
+ if (mReadLocked) {
+ auto actor = GetIPDLActor();
+ if (actor) {
+ AutoTArray<uint64_t, 1> serials;
+ serials.AppendElement(TextureHost::GetTextureSerial(actor));
+ ipc::SharedMemoryBasic::SetTexturesUnlocked(actor->OtherPid(), serials);
+ }
+ // If we still have a ReadLock, unlock it. At this point we don't care about
+ // the texture client being written into on the other side since it should be
+ // destroyed by now. But we will hit assertions if we don't ReadUnlock before
+ // destroying the lock itself.
+ ReadUnlock();
+ }
}
void TextureHost::Finalize()
{
if (!(GetFlags() & TextureFlags::DEALLOCATE_CLIENT)) {
DeallocateSharedData();
DeallocateDeviceData();
}
@@ -905,16 +919,23 @@ BufferTextureHost::UnbindTextureSource()
// the ReadUnlock() to the next end of composition.
if (mFirstSource && mFirstSource->IsDirectMap()) {
if (mProvider) {
mProvider->UnlockAfterComposition(this);
return;
}
}
+ auto actor = GetIPDLActor();
+ if (actor) {
+ AutoTArray<uint64_t, 1> serials;
+ serials.AppendElement(TextureHost::GetTextureSerial(actor));
+ ipc::SharedMemoryBasic::SetTexturesUnlocked(actor->OtherPid(), serials);
+ }
+
// This texture is not used by any layer anymore.
// If the texture doesn't have an intermediate buffer, it means we are
// compositing synchronously on the CPU, so we don't need to wait until
// the end of the next composition to ReadUnlock (which other textures do
// by default).
// If the texture has an intermediate buffer we don't care either because
// texture uploads are also performed synchronously for BufferTextureHost.
ReadUnlock();
@@ -1299,19 +1320,25 @@ TextureParent::Init(const SurfaceDescrip
void
TextureParent::Destroy()
{
if (!mTextureHost) {
return;
}
- // ReadUnlock here to make sure the ReadLock's shmem does not outlive the
- // protocol that created it.
- mTextureHost->ReadUnlock();
+ if (mTextureHost->mReadLocked) {
+ AutoTArray<uint64_t, 1> serials;
+ serials.AppendElement(GetSerial());
+ ipc::SharedMemoryBasic::SetTexturesUnlocked(OtherPid(), serials);
+
+ // ReadUnlock here to make sure the ReadLock's shmem does not outlive the
+ // protocol that created it.
+ mTextureHost->ReadUnlock();
+ }
if (mTextureHost->GetFlags() & TextureFlags::DEALLOCATE_CLIENT) {
mTextureHost->ForgetSharedData();
}
mTextureHost->mActor = nullptr;
mTextureHost = nullptr;
}
--- a/gfx/layers/composite/TextureHost.h
+++ b/gfx/layers/composite/TextureHost.h
@@ -613,16 +613,18 @@ public:
UnbindTextureSource();
// Send mFwdTransactionId to client side if necessary.
NotifyNotUsed();
}
}
int NumCompositableRefs() const { return mCompositableCount; }
+ uint64_t GetLastFwdTransactionId();
+
void SetLastFwdTransactionId(uint64_t aTransactionId);
void DeserializeReadLock(const ReadLockDescriptor& aDesc,
ISurfaceAllocator* aAllocator);
void SetReadLocked();
TextureReadLock* GetReadLock() { return mReadLock; }
--- a/gfx/layers/composite/TiledContentHost.cpp
+++ b/gfx/layers/composite/TiledContentHost.cpp
@@ -293,16 +293,19 @@ TiledLayerBufferComposite::UseTiles(cons
TilesPlacement newTiles(aTiles.firstTileX(), aTiles.firstTileY(),
aTiles.retainedWidth(), aTiles.retainedHeight());
const InfallibleTArray<TileDescriptor>& tileDescriptors = aTiles.tiles();
TextureSourceRecycler oldRetainedTiles(Move(mRetainedTiles));
mRetainedTiles.SetLength(tileDescriptors.Length());
+ AutoTArray<uint64_t, 10> lockedTextureSerials;
+ pid_t lockedTexturePid = 0;
+
// Step 1, deserialize the incoming set of tiles into mRetainedTiles, and attempt
// to recycle the TextureSource for any repeated tiles.
//
// Since we don't have any retained 'tile' object, we have to search for instances
// of the same TextureHost in the old tile set. The cost of binding a TextureHost
// to a TextureSource for gralloc (binding EGLImage to GL texture) can be really
// high, so we avoid this whenever possible.
for (size_t i = 0; i < tileDescriptors.Length(); i++) {
@@ -317,24 +320,37 @@ TiledLayerBufferComposite::UseTiles(cons
continue;
}
const TexturedTileDescriptor& texturedDesc = tileDesc.get_TexturedTileDescriptor();
tile.mTextureHost = TextureHost::AsTextureHost(texturedDesc.textureParent());
if (texturedDesc.readLocked()) {
tile.mTextureHost->SetReadLocked();
+ auto actor = tile.mTextureHost->GetIPDLActor();
+ if (actor) {
+ lockedTextureSerials.AppendElement(TextureHost::GetTextureSerial(actor));
+
+ if (lockedTexturePid) {
+ MOZ_ASSERT(lockedTexturePid == actor->OtherPid());
+ }
+ lockedTexturePid = actor->OtherPid();
+ }
}
if (texturedDesc.textureOnWhite().type() == MaybeTexture::TPTextureParent) {
tile.mTextureHostOnWhite = TextureHost::AsTextureHost(
texturedDesc.textureOnWhite().get_PTextureParent()
);
if (texturedDesc.readLockedOnWhite()) {
tile.mTextureHostOnWhite->SetReadLocked();
+ auto actor = tile.mTextureHostOnWhite->GetIPDLActor();
+ if (actor) {
+ lockedTextureSerials.AppendElement(TextureHost::GetTextureSerial(actor));
+ }
}
}
tile.mTileCoord = newTiles.TileCoord(i);
// If this same tile texture existed in the old tile set then this will move the texture
// source into our new tile.
oldRetainedTiles.RecycleTextureSourceForTile(tile);
@@ -349,16 +365,18 @@ TiledLayerBufferComposite::UseTiles(cons
// We need to begin fading it in (if enabled via layers.tiles.fade-in.enabled)
tile.mFadeStart = TimeStamp::Now();
aLayerManager->CompositeUntil(
tile.mFadeStart + TimeDuration::FromMilliseconds(gfxPrefs::LayerTileFadeInDuration()));
}
}
+ ipc::SharedMemoryBasic::SetTexturesLocked(lockedTexturePid, lockedTextureSerials);
+
// Step 2, attempt to recycle unused texture sources from the old tile set into new tiles.
//
// For gralloc, binding a new TextureHost to the existing TextureSource is the fastest way
// to ensure that any implicit locking on the old gralloc image is released.
for (TileHost& tile : mRetainedTiles) {
if (!tile.mTextureHost || tile.mTextureSource) {
continue;
}
--- a/gfx/layers/ipc/CompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CompositorBridgeParent.cpp
@@ -1054,16 +1054,17 @@ CompositorBridgeParent::CompositeToTarge
#ifdef MOZ_DUMP_PAINTING
if (gfxPrefs::DumpHostLayers()) {
printf_stderr("Painting --- compositing layer tree:\n");
mLayerManager->Dump(/* aSorted = */ true);
}
#endif
mLayerManager->SetDebugOverlayWantsNextFrame(false);
+
mLayerManager->EndTransaction(time);
if (!aTarget) {
TimeStamp end = TimeStamp::Now();
DidComposite(start, end);
}
// We're not really taking advantage of the stored composite-again-time here.
--- a/gfx/layers/ipc/LayerTransactionParent.cpp
+++ b/gfx/layers/ipc/LayerTransactionParent.cpp
@@ -161,16 +161,17 @@ LayerTransactionParent::RecvUpdate(const
{
AUTO_PROFILER_TRACING("Paint", "LayerTransaction");
AUTO_PROFILER_LABEL("LayerTransactionParent::RecvUpdate", GRAPHICS);
TimeStamp updateStart = TimeStamp::Now();
MOZ_LAYERS_LOG(("[ParentSide] received txn with %zu edits", aInfo.cset().Length()));
+
UpdateFwdTransactionId(aInfo.fwdTransactionId());
if (mDestroyed || !mLayerManager || mLayerManager->IsDestroyed()) {
for (const auto& op : aInfo.toDestroy()) {
DestroyActor(op);
}
return IPC_OK();
}
--- a/gfx/layers/ipc/ShadowLayers.cpp
+++ b/gfx/layers/ipc/ShadowLayers.cpp
@@ -804,16 +804,30 @@ ShadowLayerForwarder::SetLayerObserverEp
{
if (!IPCOpen()) {
return;
}
Unused << mShadowManager->SendSetLayerObserverEpoch(aLayerObserverEpoch);
}
void
+ShadowLayerForwarder::SyncTextures(const nsTArray<uint64_t>& aSerials)
+{
+ if (!IPCOpen()) {
+ return;
+ }
+
+ auto compositorBridge = GetCompositorBridgeChild();
+ if (compositorBridge) {
+ auto pid = compositorBridge->OtherPid();
+ ipc::SharedMemoryBasic::WaitForTextures(pid, aSerials);
+ }
+}
+
+void
ShadowLayerForwarder::ReleaseLayer(const LayerHandle& aHandle)
{
if (!IPCOpen()) {
return;
}
Unused << mShadowManager->SendReleaseLayer(aHandle);
}
--- a/gfx/layers/ipc/ShadowLayers.h
+++ b/gfx/layers/ipc/ShadowLayers.h
@@ -116,16 +116,17 @@ class Transaction;
* compositables and textures can be manipulated, and does not always originate
* from the content thread. (See CompositableForwarder.h and ImageBridgeChild.h)
*/
class ShadowLayerForwarder final : public LayersIPCActor
, public CompositableForwarder
, public LegacySurfaceDescriptorAllocator
{
+ typedef mozilla::ipc::SharedMemoryBasic SharedMemoryBasic;
friend class ClientLayerManager;
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ShadowLayerForwarder, override);
/**
* Setup the IPDL actor for aCompositable to be part of layers
* transactions.
@@ -356,16 +357,18 @@ public:
uint32_t aCaps,
SurfaceDescriptor* aBuffer) override;
virtual void DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) override;
virtual void UpdateFwdTransactionId() override;
virtual uint64_t GetFwdTransactionId() override;
+ void SyncTextures(const nsTArray<uint64_t>& aSerials);
+
void ReleaseLayer(const LayerHandle& aHandle);
bool InForwarderThread() override {
return NS_IsMainThread();
}
PaintTiming& GetPaintTiming() {
return mPaintTiming;
--- a/gfx/layers/opengl/TextureHostOGL.cpp
+++ b/gfx/layers/opengl/TextureHostOGL.cpp
@@ -311,17 +311,17 @@ GLTextureSource::IsValid() const
////////////////////////////////////////////////////////////////////////
// DirectMapTextureSource
DirectMapTextureSource::DirectMapTextureSource(TextureSourceProvider* aProvider,
gfx::DataSourceSurface* aSurface)
: GLTextureSource(aProvider,
0,
- LOCAL_GL_TEXTURE_2D,
+ LOCAL_GL_TEXTURE_RECTANGLE_ARB,
aSurface->GetSize(),
aSurface->GetFormat())
, mSync(0)
{
MOZ_ASSERT(aSurface);
UpdateInternal(aSurface, nullptr, nullptr, true);
}
@@ -345,48 +345,48 @@ DirectMapTextureSource::Update(gfx::Data
}
return UpdateInternal(aSurface, aDestRegion, aSrcOffset, false);
}
void
DirectMapTextureSource::Sync()
{
- if (mSync) {
- gl()->MakeCurrent();
- gl()->fClientWaitSync(mSync, LOCAL_GL_SYNC_FLUSH_COMMANDS_BIT, LOCAL_GL_TIMEOUT_IGNORED);
+ gl()->MakeCurrent();
+ if (!gl()->IsDestroyed()) {
+ gl()->fFinishObjectAPPLE(LOCAL_GL_TEXTURE, mTextureHandle);
}
}
bool
DirectMapTextureSource::UpdateInternal(gfx::DataSourceSurface* aSurface,
nsIntRegion* aDestRegion,
gfx::IntPoint* aSrcOffset,
bool aInit)
{
gl()->MakeCurrent();
if (aInit) {
+ gl()->fEnable(LOCAL_GL_TEXTURE_RECTANGLE_ARB);
+
gl()->fGenTextures(1, &mTextureHandle);
- gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, mTextureHandle);
+ gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, mTextureHandle);
- // APPLE_texture_range
- // TODO: test with LOCAL_GL_STORAGE_SHARED_APPLE
- gl()->fTextureRangeAPPLE(LOCAL_GL_TEXTURE_2D,
+ gl()->fTextureRangeAPPLE(LOCAL_GL_TEXTURE_RECTANGLE_ARB,
aSurface->Stride() * aSurface->GetSize().height,
aSurface->GetData());
- gl()->fTexParameteri(LOCAL_GL_TEXTURE_2D,
+ gl()->fTexParameteri(LOCAL_GL_TEXTURE_RECTANGLE_ARB,
LOCAL_GL_TEXTURE_STORAGE_HINT_APPLE,
- LOCAL_GL_STORAGE_CACHED_APPLE);
+ LOCAL_GL_STORAGE_SHARED_APPLE);
- gl()->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_WRAP_S, LOCAL_GL_CLAMP_TO_EDGE);
- gl()->fTexParameteri(LOCAL_GL_TEXTURE_2D, LOCAL_GL_TEXTURE_WRAP_T, LOCAL_GL_CLAMP_TO_EDGE);
+ gl()->fTexParameteri(LOCAL_GL_TEXTURE_RECTANGLE_ARB, LOCAL_GL_TEXTURE_WRAP_S, LOCAL_GL_CLAMP_TO_EDGE);
+ gl()->fTexParameteri(LOCAL_GL_TEXTURE_RECTANGLE_ARB, LOCAL_GL_TEXTURE_WRAP_T, LOCAL_GL_CLAMP_TO_EDGE);
- gl()->fBindTexture(LOCAL_GL_TEXTURE_2D, 0);
+ gl()->fBindTexture(LOCAL_GL_TEXTURE_RECTANGLE_ARB, 0);
}
MOZ_ASSERT(mTextureHandle);
// APPLE_client_storage
gl()->fPixelStorei(LOCAL_GL_UNPACK_CLIENT_STORAGE_APPLE, LOCAL_GL_TRUE);
nsIntRegion destRegion = aDestRegion ? *aDestRegion
@@ -399,23 +399,17 @@ DirectMapTextureSource::UpdateInternal(g
aSurface,
destRegion,
mTextureHandle,
aSurface->GetSize(),
nullptr,
aInit,
srcPoint,
LOCAL_GL_TEXTURE0,
- LOCAL_GL_TEXTURE_2D);
-
- // Delete the previous sync object.
- if (mSync) {
- gl()->fDeleteSync(mSync);
- }
- mSync = gl()->fFenceSync(LOCAL_GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
+ LOCAL_GL_TEXTURE_RECTANGLE_ARB);
gl()->fPixelStorei(LOCAL_GL_UNPACK_CLIENT_STORAGE_APPLE, LOCAL_GL_FALSE);
return true;
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// SurfaceTextureHost
--- a/ipc/glue/SharedMemory.h
+++ b/ipc/glue/SharedMemory.h
@@ -4,16 +4,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ipc_SharedMemory_h
#define mozilla_ipc_SharedMemory_h
#include "nsDebug.h"
#include "nsISupportsImpl.h" // NS_INLINE_DECL_REFCOUNTING
+#include "nsTArray.h"
#include "mozilla/Attributes.h"
#include "base/process.h"
#include "chrome/common/ipc_message_utils.h"
//
// This is a low-level wrapper around platform shared memory. Don't
// use it directly; use Shmem allocated through IPDL interfaces.
--- a/ipc/glue/SharedMemoryBasic_mach.h
+++ b/ipc/glue/SharedMemoryBasic_mach.h
@@ -4,16 +4,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ipc_SharedMemoryBasic_mach_h
#define mozilla_ipc_SharedMemoryBasic_mach_h
#include "base/file_descriptor_posix.h"
#include "base/process.h"
+#include "nsTArray.h"
#include "SharedMemory.h"
#include <mach/port.h>
#ifdef FUZZING
#include "SharedMemoryFuzzer.h"
#endif
@@ -37,16 +38,20 @@ public:
MachPortSender* send_port,
ReceivePort* send_port_ack,
bool pidIsParent);
static void CleanupForPid(pid_t pid);
static void Shutdown();
+ static bool WaitForTextures(base::ProcessId aProcessId, const nsTArray<uint64_t>& aTextureIds);
+ static void SetTexturesLocked(pid_t pid, const nsTArray<uint64_t>& textureIds);
+ static void SetTexturesUnlocked(pid_t pid, const nsTArray<uint64_t>& textureIds);
+
SharedMemoryBasic();
virtual bool SetHandle(const Handle& aHandle, OpenRights aRights) override;
virtual bool Create(size_t aNbytes) override;
virtual bool Map(size_t nBytes) override;
@@ -72,16 +77,17 @@ public:
}
virtual bool IsHandleValid(const Handle &aHandle) const override;
virtual bool ShareToProcess(base::ProcessId aProcessId,
Handle* aNewHandle) override;
+
private:
~SharedMemoryBasic();
void Unmap();
mach_port_t mPort;
// Pointer to mapped region, null if unmapped.
void *mMemory;
// Access rights to map an existing region with.
--- a/ipc/glue/SharedMemoryBasic_mach.mm
+++ b/ipc/glue/SharedMemoryBasic_mach.mm
@@ -1,16 +1,17 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: sw=2 ts=8 et :
*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <map>
+#include <set>
#include <mach/vm_map.h>
#include <mach/mach_port.h>
#if defined(XP_IOS)
#include <mach/vm_map.h>
#define mach_vm_address_t vm_address_t
#define mach_vm_allocate vm_allocate
#define mach_vm_deallocate vm_deallocate
@@ -24,25 +25,27 @@
#include <pthread.h>
#include <unistd.h>
#include "SharedMemoryBasic.h"
#include "chrome/common/mach_ipc_mac.h"
#include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/Printf.h"
#include "mozilla/StaticMutex.h"
+#include "mozilla/Monitor.h"
#ifdef DEBUG
#define LOG_ERROR(str, args...) \
PR_BEGIN_MACRO \
mozilla::SmprintfPointer msg = mozilla::Smprintf(str, ## args); \
NS_WARNING(msg.get()); \
PR_END_MACRO
#else
-#define LOG_ERROR(str, args...) do { /* nothing */ } while(0)
+#define LOG_ERROR(str, args...) printf(str, ## args);
+// #define LOG_ERROR(str, args...) do { /* nothing */ } while(0)
#endif
#define CHECK_MACH_ERROR(kr, msg) \
PR_BEGIN_MACRO \
if (kr != KERN_SUCCESS) { \
LOG_ERROR("%s %s (%x)\n", msg, mach_error_string(kr), kr); \
return false; \
} \
@@ -83,29 +86,35 @@
namespace mozilla {
namespace ipc {
struct MemoryPorts {
MachPortSender* mSender;
ReceivePort* mReceiver;
+ uint64_t mGenerationId;
+
MemoryPorts() = default;
MemoryPorts(MachPortSender* sender, ReceivePort* receiver)
- : mSender(sender), mReceiver(receiver) {}
+ : mSender(sender), mReceiver(receiver), mGenerationId(0) {}
};
+static StaticMutex gGenerationMonitorMutex;
+static std::map<pid_t, std::set<uint64_t>> gProcessTextureSerials;
+static Monitor* gGenerationMonitor;
+
// Protects gMemoryCommPorts and gThreads.
static StaticMutex gMutex;
-
static std::map<pid_t, MemoryPorts> gMemoryCommPorts;
enum {
kGetPortsMsg = 1,
kSharePortsMsg,
+ kWaitForTexturesMsg,
kReturnIdMsg,
kReturnPortsMsg,
kShutdownMsg,
kCleanupMsg,
};
const int kTimeout = 1000;
const int kLongTimeout = 60 * kTimeout;
@@ -129,18 +138,37 @@ struct ListeningThread {
: mThread(thread), mPorts(ports) {}
};
struct SharePortsReply {
uint64_t serial;
mach_port_t port;
};
+struct WaitForTexturesReply {
+ bool success;
+};
+
+struct WaitForTexturesRequest {
+ pid_t pid;
+};
+
std::map<pid_t, ListeningThread> gThreads;
+std::set<uint64_t>* GetGenerationForProcess(pid_t pid)
+{
+ gGenerationMonitorMutex.AssertCurrentThreadOwns();
+
+ if (gProcessTextureSerials.find(pid) == gProcessTextureSerials.end()) {
+ gProcessTextureSerials[pid] = std::set<uint64_t>();
+ }
+
+ return &gProcessTextureSerials.at(pid);
+}
+
static void *
PortServerThread(void *argument);
static void
SetupMachMemory(pid_t pid,
ReceivePort* listen_port,
MachPortSender* listen_port_ack,
@@ -151,16 +179,20 @@ SetupMachMemory(pid_t pid,
if (pidIsParent) {
gParentPid = pid;
}
auto* listen_ports = new MemoryPorts(listen_port_ack, listen_port);
pthread_t thread;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ if (!gGenerationMonitor) {
+ gGenerationMonitor = new Monitor("gGenerationMonitor");
+ }
+
int err = pthread_create(&thread, &attr, PortServerThread, listen_ports);
if (err) {
LOG_ERROR("pthread_create failed with %x\n", err);
return;
}
gMutex.AssertCurrentThreadOwns();
gThreads[pid] = ListeningThread(thread, listen_ports);
@@ -237,17 +269,17 @@ GetMemoryPortsForPid(pid_t pid)
{
gMutex.AssertCurrentThreadOwns();
if (gMemoryCommPorts.find(pid) == gMemoryCommPorts.end()) {
// We don't have the ports open to communicate with that pid, so we're going to
// ask our parent process over IPC to set them up for us.
if (gParentPid == 0) {
// If we're the top level parent process, we have no parent to ask.
- LOG_ERROR("request for ports for pid %d, but we're the chrome process\n", pid);
+ // LOG_ERROR("request for ports for pid %d, but we're the chrome process\n", pid);
return nullptr;
}
const MemoryPorts& parent = gMemoryCommPorts[gParentPid];
// Create two receiving ports in this process to send to the parent. One will be used for
// for listening for incoming memory to be shared, the other for getting the Handle of
// memory we share to the other process.
auto* ports_in_receiver = new ReceivePort();
@@ -272,16 +304,47 @@ GetMemoryPortsForPid(pid_t pid)
ports_out_sender,
ports_out_receiver,
false);
MOZ_ASSERT(gMemoryCommPorts.find(pid) != gMemoryCommPorts.end());
}
return &gMemoryCommPorts.at(pid);
}
+bool
+WaitForTextureIdsToUnlock(pid_t pid, uint64_t* textureIds, uint32_t textureIdsLength)
+{
+ std::set<uint64_t>* freedTextureIds;
+ {
+ StaticMutexAutoLock smal(gGenerationMonitorMutex);
+ freedTextureIds = GetGenerationForProcess(pid);
+ }
+
+ {
+ MonitorAutoLock lock(*gGenerationMonitor);
+
+ while (true) {
+ bool allCleared = true;
+ for (uint32_t i = 0; i < textureIdsLength; ++i) {
+ if (freedTextureIds->find(textureIds[i]) != freedTextureIds->end()) {
+ allCleared = false;
+ }
+ }
+
+ if (allCleared) {
+ return true;
+ }
+
+ if (lock.Wait(TimeDuration::FromMilliseconds(kTimeout)) == CVStatus::Timeout) {
+ return false;
+ }
+ }
+ }
+}
+
// We just received a port representing a region of shared memory, reply to
// the process that set it with the mach_port_t that represents it in this process.
// That will be the Handle to be shared over normal IPC
void
HandleSharePortsMessage(MachReceiveMessage* rmsg, MemoryPorts* ports)
{
mach_port_t port = rmsg->GetTranslatedPort(0);
uint64_t* serial = reinterpret_cast<uint64_t*>(rmsg->GetData());
@@ -292,16 +355,40 @@ HandleSharePortsMessage(MachReceiveMessa
replydata.serial = *serial;
msg.SetData(&replydata, sizeof(SharePortsReply));
kern_return_t err = ports->mSender->SendMessage(msg, kTimeout);
if (KERN_SUCCESS != err) {
LOG_ERROR("SendMessage failed 0x%x %s\n", err, mach_error_string(err));
}
}
+void
+HandleWaitForTexturesMessage(MachReceiveMessage* rmsg, MemoryPorts* ports)
+{
+ WaitForTexturesRequest* req = reinterpret_cast<WaitForTexturesRequest*>(rmsg->GetData());
+ uint64_t* textureIds = (uint64_t*)(req + 1);
+ uint32_t textureIdsLength = (rmsg->GetDataLength() - sizeof(WaitForTexturesRequest)) / sizeof(uint64_t);
+
+ bool success = WaitForTextureIdsToUnlock(req->pid, textureIds, textureIdsLength);
+
+ if (!success) {
+ LOG_ERROR("Waiting for textures to unlock failed.\n");
+ }
+
+ MachSendMessage msg(kReturnIdMsg);
+ // Construct the reply message, echoing the serial, and adding the port
+ WaitForTexturesReply replydata;
+ replydata.success = success;
+ msg.SetData(&replydata, sizeof(WaitForTexturesReply));
+ kern_return_t err = ports->mSender->SendMessage(msg, kTimeout);
+ if (KERN_SUCCESS != err) {
+ LOG_ERROR("SendMessage failed 0x%x %s\n", err, mach_error_string(err));
+ }
+}
+
// We were asked by another process to get communications ports to some process. Return
// those ports via an IPC message.
bool
SendReturnPortsMsg(MachPortSender* sender,
mach_port_t raw_ports_in_sender,
mach_port_t raw_ports_out_sender)
{
MachSendMessage getPortsMsg(kReturnPortsMsg);
@@ -402,60 +489,96 @@ PortServerThread(void *argument)
continue;
}
if (rmsg.GetMessageID() == kShutdownMsg) {
delete ports->mSender;
delete ports->mReceiver;
delete ports;
return nullptr;
}
- StaticMutexAutoLock smal(gMutex);
- switch (rmsg.GetMessageID()) {
- case kSharePortsMsg:
- HandleSharePortsMessage(&rmsg, ports);
- break;
- case kGetPortsMsg:
- HandleGetPortsMessage(&rmsg, ports);
- break;
- case kCleanupMsg:
- if (gParentPid == 0) {
- LOG_ERROR("Cleanup message not valid for parent process");
- continue;
- }
+ if (rmsg.GetMessageID() == kWaitForTexturesMsg) {
+ HandleWaitForTexturesMessage(&rmsg, ports);
+ } else {
+ StaticMutexAutoLock smal(gMutex);
+ switch (rmsg.GetMessageID()) {
+ case kSharePortsMsg:
+ HandleSharePortsMessage(&rmsg, ports);
+ break;
+ case kGetPortsMsg:
+ HandleGetPortsMessage(&rmsg, ports);
+ break;
+ case kCleanupMsg:
+ if (gParentPid == 0) {
+ LOG_ERROR("Cleanup message not valid for parent process");
+ continue;
+ }
- pid_t* pid;
- if (rmsg.GetDataLength() != sizeof(pid_t)) {
- LOG_ERROR("Improperly formatted message\n");
- continue;
+ pid_t* pid;
+ if (rmsg.GetDataLength() != sizeof(pid_t)) {
+ LOG_ERROR("Improperly formatted message\n");
+ continue;
+ }
+ pid = reinterpret_cast<pid_t*>(rmsg.GetData());
+ SharedMemoryBasic::CleanupForPid(*pid);
+ break;
+ default:
+ LOG_ERROR("Unknown message\n");
}
- pid = reinterpret_cast<pid_t*>(rmsg.GetData());
- SharedMemoryBasic::CleanupForPid(*pid);
- break;
- default:
- LOG_ERROR("Unknown message\n");
}
}
}
+void SharedMemoryBasic::SetTexturesLocked(pid_t pid, const nsTArray<uint64_t>& textureIds)
+{
+ StaticMutexAutoLock smal(gGenerationMonitorMutex);
+ MonitorAutoLock mal(*gGenerationMonitor);
+ std::set<uint64_t>* textureGenerations = GetGenerationForProcess(pid);
+ for (uint64_t textureId : textureIds) {
+ textureGenerations->insert(textureId);
+ }
+}
+
+void SharedMemoryBasic::SetTexturesUnlocked(pid_t pid, const nsTArray<uint64_t>& textureIds)
+{
+ StaticMutexAutoLock smal(gGenerationMonitorMutex);
+ MonitorAutoLock mal(*gGenerationMonitor);
+ std::set<uint64_t>* textureGenerations = GetGenerationForProcess(pid);
+ bool oneErased = false;
+ for (uint64_t textureId : textureIds) {
+ if (textureGenerations->erase(textureId)) {
+ oneErased = true;
+ }
+ }
+ if (oneErased) {
+ gGenerationMonitor->NotifyAll();
+ }
+}
+
void
SharedMemoryBasic::SetupMachMemory(pid_t pid,
ReceivePort* listen_port,
MachPortSender* listen_port_ack,
MachPortSender* send_port,
ReceivePort* send_port_ack,
bool pidIsParent)
{
StaticMutexAutoLock smal(gMutex);
mozilla::ipc::SetupMachMemory(pid, listen_port, listen_port_ack, send_port, send_port_ack, pidIsParent);
}
void
SharedMemoryBasic::Shutdown()
{
StaticMutexAutoLock smal(gMutex);
+ StaticMutexAutoLock generationSmal(gGenerationMonitorMutex);
+
+ if (gGenerationMonitor) {
+ gGenerationMonitor->NotifyAll();
+ delete gGenerationMonitor;
+ }
for (auto& thread : gThreads) {
MachSendMessage shutdownMsg(kShutdownMsg);
thread.second.mPorts->mReceiver->SendMessageToSelf(shutdownMsg, kTimeout);
}
gThreads.clear();
for (auto& memoryCommPort : gMemoryCommPorts) {
@@ -466,16 +589,29 @@ SharedMemoryBasic::Shutdown()
}
void
SharedMemoryBasic::CleanupForPid(pid_t pid)
{
if (gThreads.find(pid) == gThreads.end()) {
return;
}
+
+ std::set<uint64_t>* freedTextureIds;
+ {
+ StaticMutexAutoLock smal(gGenerationMonitorMutex);
+ freedTextureIds = GetGenerationForProcess(pid);
+ }
+
+ {
+ MonitorAutoLock lock(*gGenerationMonitor);
+ freedTextureIds->clear();
+ gGenerationMonitor->NotifyAll();
+ }
+
const ListeningThread& listeningThread = gThreads[pid];
MachSendMessage shutdownMsg(kShutdownMsg);
kern_return_t ret = listeningThread.mPorts->mReceiver->SendMessageToSelf(shutdownMsg, kTimeout);
if (ret != KERN_SUCCESS) {
LOG_ERROR("sending shutdown msg failed %s %x\n", mach_error_string(ret), ret);
}
gThreads.erase(pid);
@@ -647,16 +783,84 @@ SharedMemoryBasic::ShareToProcess(base::
if (serial_check != my_serial) {
LOG_ERROR("Serials do not match up: %" PRIu64 " vs %" PRIu64 "", serial_check, my_serial);
return false;
}
*aNewHandle = id;
return true;
}
+bool
+SharedMemoryBasic::WaitForTextures(base::ProcessId pid, const nsTArray<uint64_t>& generationIds)
+{
+ if (pid == getpid()) {
+ uint64_t* textureIds = (uint64_t*)malloc(sizeof(uint64_t) * generationIds.Length());
+ for (uint32_t i = 0; i < generationIds.Length(); ++i) {
+ textureIds[i] = generationIds[i];
+ }
+ bool success = WaitForTextureIdsToUnlock(pid, textureIds, generationIds.Length());
+ free(textureIds);
+
+ if (!success) {
+ LOG_ERROR("Failed waiting for textures to unlock.\n");
+ }
+
+ return success;
+ }
+ StaticMutexAutoLock smal(gMutex);
+
+ MemoryPorts* ports = GetMemoryPortsForPid(pid);
+ if (!ports) {
+ LOG_ERROR("Unable to get ports for process.\n");
+ return false;
+ }
+ MachSendMessage smsg(kWaitForTexturesMsg);
+ size_t messageSize = sizeof(WaitForTexturesRequest) + generationIds.Length() * sizeof(uint64_t);
+ char* messageData = (char*)malloc(messageSize);
+ WaitForTexturesRequest* req = (WaitForTexturesRequest*)messageData;
+ uint64_t* reqGenerationIds = (uint64_t*)(req + 1);
+
+ for (uint32_t i = 0; i < generationIds.Length(); ++i) {
+ reqGenerationIds[i] = generationIds[i];
+ }
+
+ req->pid = getpid();
+ bool dataWasSet = smsg.SetData(req, messageSize);
+ free(messageData);
+
+ if (!dataWasSet) {
+ LOG_ERROR("Data was too large: %zu\n", messageSize);
+ return false;
+ }
+
+ kern_return_t err = ports->mSender->SendMessage(smsg, kTimeout);
+ if (err != KERN_SUCCESS) {
+ LOG_ERROR("sending port failed %s %x\n", mach_error_string(err), err);
+ return false;
+ }
+ MachReceiveMessage msg;
+ err = ports->mReceiver->WaitForMessage(&msg, kTimeout);
+ if (err != KERN_SUCCESS) {
+ LOG_ERROR("short timeout didn't get an id %s %x\n", mach_error_string(err), err);
+ return false;
+ }
+ if (msg.GetDataLength() != sizeof(WaitForTexturesReply)) {
+ LOG_ERROR("Improperly formatted reply\n");
+ return false;
+ }
+
+ WaitForTexturesReply* msg_data = reinterpret_cast<WaitForTexturesReply*>(msg.GetData());
+ if (!msg_data->success) {
+ LOG_ERROR("Failed waiting for textures to unlock.\n");
+ return false;
+ }
+
+ return true;
+}
+
void
SharedMemoryBasic::Unmap()
{
if (!mMemory) {
return;
}
vm_address_t address = toVMAddress(mMemory);
kern_return_t kr = vm_deallocate(mach_task_self(), address, round_page(mMappedSize));