Merge m-c to b2g-inbound.
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 19 Feb 2014 20:27:43 -0500
changeset 169996 7c01cbcd31ebdeeb74233349d9b76191ee0d07ad
parent 169995 bfd52575b6d39a61fbab1387c3f6d846ea2949fe (current diff)
parent 169965 660b626089513a6bcbc018880a34cb54615eadb6 (diff)
child 169997 4b54d3adf4c2f0afd4e6ae44b60fb7702c619ec5
push id270
push userpvanderbeken@mozilla.com
push dateThu, 06 Mar 2014 09:24:21 +0000
milestone30.0a1
Merge m-c to b2g-inbound.
--- a/gfx/layers/ipc/ShadowLayerUtilsGralloc.cpp
+++ b/gfx/layers/ipc/ShadowLayerUtilsGralloc.cpp
@@ -286,26 +286,16 @@ GrallocBufferActor::Create(const gfx::In
   uint32_t format = aFormat;
   uint32_t usage = aUsage;
 
   if (format == 0 || usage == 0) {
     printf_stderr("GrallocBufferActor::Create -- format and usage must be non-zero");
     return actor;
   }
 
-  // If the requested size is too big (i.e. exceeds the commonly used max GL texture size)
-  // then we risk OOMing the parent process. It's better to just deny the allocation and
-  // kill the child process, which is what the following code does.
-  // TODO: actually use GL_MAX_TEXTURE_SIZE instead of hardcoding 4096
-  if (aSize.width > 4096 || aSize.height > 4096) {
-    printf_stderr("GrallocBufferActor::Create -- requested gralloc buffer is too big. Killing child instead.");
-    delete actor;
-    return nullptr;
-  }
-
   sp<GraphicBuffer> buffer(new GraphicBuffer(aSize.width, aSize.height, format, usage));
   if (buffer->initCheck() != OK)
     return actor;
 
   size_t bpp = BytesPerPixelForPixelFormat(format);
   actor->mAllocBytes = aSize.width * aSize.height * bpp;
   GrallocReporter::sAmount += actor->mAllocBytes;