Bug 1501319 - Update webrender to commit e7d340b0f39bbd0046e983a75245bdde54013cdb. r=kats
authorWR Updater Bot <graphics-team@mozilla.staktrace.com>
Wed, 24 Oct 2018 00:48:58 +0000
changeset 491035 61b89032c551c59ab82f64689c1b271c5c0d2263
parent 491034 9f15c79e58d135c7e710210a9d0242eb42d9f5bc
child 491036 faaec607a74f01e81bc5a6b54d0c4141ad5cf8f5
push id247
push userfmarier@mozilla.com
push dateSat, 27 Oct 2018 01:06:44 +0000
reviewerskats
bugs1501319
milestone65.0a1
Bug 1501319 - Update webrender to commit e7d340b0f39bbd0046e983a75245bdde54013cdb. r=kats Differential Revision: https://phabricator.services.mozilla.com/D9610
gfx/webrender/src/device/gl.rs
gfx/webrender/src/gpu_glyph_renderer.rs
gfx/webrender/src/renderer.rs
gfx/webrender/src/tiling.rs
gfx/webrender_bindings/revision.txt
--- a/gfx/webrender/src/device/gl.rs
+++ b/gfx/webrender/src/device/gl.rs
@@ -740,18 +740,18 @@ pub struct Device {
     gl: Rc<gl::Gl>,
     // device state
     bound_textures: [gl::GLuint; 16],
     bound_program: gl::GLuint,
     bound_vao: gl::GLuint,
     bound_read_fbo: FBOId,
     bound_draw_fbo: FBOId,
     program_mode_id: UniformLocation,
-    default_read_fbo: gl::GLuint,
-    default_draw_fbo: gl::GLuint,
+    default_read_fbo: FBOId,
+    default_draw_fbo: FBOId,
 
     device_pixel_ratio: f32,
     upload_method: UploadMethod,
 
     // HW or API capabilities
     #[cfg(feature = "debug_renderer")]
     capabilities: Capabilities,
 
@@ -783,41 +783,71 @@ pub struct Device {
     /// otherwise are on some drivers, particularly ANGLE), If it's not
     /// supported, we fall back to glTexImage*.
     supports_texture_storage: bool,
 
     // GL extensions
     extensions: Vec<String>,
 }
 
-/// Contains the parameters necessary to bind a texture-backed draw target.
+/// Contains the parameters necessary to bind a draw target.
 #[derive(Clone, Copy)]
-pub struct TextureDrawTarget<'a> {
-    /// The target texture.
-    pub texture: &'a Texture,
-    /// The slice within the texture array to draw to.
-    pub layer: LayerIndex,
-    /// Whether to draw with the texture's associated depth target.
-    pub with_depth: bool,
+pub enum DrawTarget<'a> {
+    /// Use the device's default draw target, with the provided dimensions,
+    /// which are used to set the viewport.
+    Default(DeviceUintSize),
+    /// Use the provided texture.
+    Texture {
+        /// The target texture.
+        texture: &'a Texture,
+        /// The slice within the texture array to draw to.
+        layer: LayerIndex,
+        /// Whether to draw with the texture's associated depth target.
+        with_depth: bool,
+    },
+}
+
+impl<'a> DrawTarget<'a> {
+    /// Returns true if this draw target corresponds to the default framebuffer.
+    pub fn is_default(&self) -> bool {
+        match *self {
+            DrawTarget::Default(..) => true,
+            _ => false,
+        }
+    }
+
+    /// Returns the dimensions of this draw-target.
+    pub fn dimensions(&self) -> DeviceUintSize {
+        match *self {
+            DrawTarget::Default(d) => d,
+            DrawTarget::Texture { texture, .. } => texture.get_dimensions(),
+        }
+    }
 }
 
 /// Contains the parameters necessary to bind a texture-backed read target.
 #[derive(Clone, Copy)]
-pub struct TextureReadTarget<'a> {
-    /// The source texture.
-    pub texture: &'a Texture,
-    /// The slice within the texture array to read from.
-    pub layer: LayerIndex,
+pub enum ReadTarget<'a> {
+    /// Use the device's default draw target.
+    Default,
+    /// Use the provided texture,
+    Texture {
+        /// The source texture.
+        texture: &'a Texture,
+        /// The slice within the texture array to read from.
+        layer: LayerIndex,
+    }
 }
 
-impl<'a> From<TextureDrawTarget<'a>> for TextureReadTarget<'a> {
-    fn from(t: TextureDrawTarget<'a>) -> Self {
-        TextureReadTarget {
-            texture: t.texture,
-            layer: t.layer,
+impl<'a> From<DrawTarget<'a>> for ReadTarget<'a> {
+    fn from(t: DrawTarget<'a>) -> Self {
+        match t {
+            DrawTarget::Default(..) => ReadTarget::Default,
+            DrawTarget::Texture { texture, layer, .. } =>
+                ReadTarget::Texture { texture, layer },
         }
     }
 }
 
 impl Device {
     pub fn new(
         gl: Rc<gl::Gl>,
         resource_override_path: Option<PathBuf>,
@@ -890,18 +920,18 @@ impl Device {
             depth_targets: FastHashMap::default(),
 
             bound_textures: [0; 16],
             bound_program: 0,
             bound_vao: 0,
             bound_read_fbo: FBOId(0),
             bound_draw_fbo: FBOId(0),
             program_mode_id: UniformLocation::INVALID,
-            default_read_fbo: 0,
-            default_draw_fbo: 0,
+            default_read_fbo: FBOId(0),
+            default_draw_fbo: FBOId(0),
 
             max_texture_size,
             renderer_name,
             cached_programs,
             frame_id: FrameId(0),
             extensions,
             supports_texture_storage,
         }
@@ -990,22 +1020,22 @@ impl Device {
         debug_assert!(!self.inside_frame);
         self.inside_frame = true;
 
         // Retrieve the currently set FBO.
         let mut default_read_fbo = [0];
         unsafe {
             self.gl.get_integer_v(gl::READ_FRAMEBUFFER_BINDING, &mut default_read_fbo);
         }
-        self.default_read_fbo = default_read_fbo[0] as gl::GLuint;
+        self.default_read_fbo = FBOId(default_read_fbo[0] as gl::GLuint);
         let mut default_draw_fbo = [0];
         unsafe {
             self.gl.get_integer_v(gl::DRAW_FRAMEBUFFER_BINDING, &mut default_draw_fbo);
         }
-        self.default_draw_fbo = default_draw_fbo[0] as gl::GLuint;
+        self.default_draw_fbo = FBOId(default_draw_fbo[0] as gl::GLuint);
 
         // Texture state
         for i in 0 .. self.bound_textures.len() {
             self.bound_textures[i] = 0;
             self.gl.active_texture(gl::TEXTURE0 + i as gl::GLuint);
             self.gl.bind_texture(gl::TEXTURE_2D, 0);
         }
 
@@ -1014,18 +1044,18 @@ impl Device {
         self.program_mode_id = UniformLocation::INVALID;
         self.gl.use_program(0);
 
         // Vertex state
         self.bound_vao = 0;
         self.gl.bind_vertex_array(0);
 
         // FBO state
-        self.bound_read_fbo = FBOId(self.default_read_fbo);
-        self.bound_draw_fbo = FBOId(self.default_draw_fbo);
+        self.bound_read_fbo = self.default_read_fbo;
+        self.bound_draw_fbo = self.default_draw_fbo;
 
         // Pixel op state
         self.gl.pixel_store_i(gl::UNPACK_ALIGNMENT, 1);
         self.gl.bind_buffer(gl::PIXEL_UNPACK_BUFFER, 0);
 
         // Default is sampler 0, always
         self.gl.active_texture(gl::TEXTURE0);
 
@@ -1061,56 +1091,68 @@ impl Device {
         debug_assert!(self.inside_frame);
 
         if self.bound_read_fbo != fbo_id {
             self.bound_read_fbo = fbo_id;
             fbo_id.bind(self.gl(), FBOTarget::Read);
         }
     }
 
-    pub fn bind_read_target(&mut self, texture_target: Option<TextureReadTarget>) {
-        let fbo_id = texture_target.map_or(FBOId(self.default_read_fbo), |target| {
-            target.texture.fbos[target.layer]
-        });
+    pub fn bind_read_target(&mut self, target: ReadTarget) {
+        let fbo_id = match target {
+            ReadTarget::Default => self.default_read_fbo,
+            ReadTarget::Texture { texture, layer } => texture.fbos[layer],
+        };
 
         self.bind_read_target_impl(fbo_id)
     }
 
     fn bind_draw_target_impl(&mut self, fbo_id: FBOId) {
         debug_assert!(self.inside_frame);
 
         if self.bound_draw_fbo != fbo_id {
             self.bound_draw_fbo = fbo_id;
             fbo_id.bind(self.gl(), FBOTarget::Draw);
         }
     }
 
+    pub fn reset_read_target(&mut self) {
+        let fbo = self.default_read_fbo;
+        self.bind_read_target_impl(fbo);
+    }
+
+
+    pub fn reset_draw_target(&mut self) {
+        let fbo = self.default_draw_fbo;
+        self.bind_draw_target_impl(fbo);
+    }
+
     pub fn bind_draw_target(
         &mut self,
-        texture_target: Option<TextureDrawTarget>,
-        dimensions: Option<DeviceUintSize>,
+        target: DrawTarget,
     ) {
-        let fbo_id = texture_target.map_or(FBOId(self.default_draw_fbo), |target| {
-            if target.with_depth {
-                target.texture.fbos_with_depth[target.layer]
-            } else {
-                target.texture.fbos[target.layer]
+        let (fbo_id, dimensions) = match target {
+            DrawTarget::Default(d) => (self.default_draw_fbo, d),
+            DrawTarget::Texture { texture, layer, with_depth } => {
+                let dim = texture.get_dimensions();
+                if with_depth {
+                    (texture.fbos_with_depth[layer], dim)
+                } else {
+                    (texture.fbos[layer], dim)
+                }
             }
-        });
+        };
 
         self.bind_draw_target_impl(fbo_id);
-
-        if let Some(dimensions) = dimensions {
-            self.gl.viewport(
-                0,
-                0,
-                dimensions.width as _,
-                dimensions.height as _,
-            );
-        }
+        self.gl.viewport(
+            0,
+            0,
+            dimensions.width as _,
+            dimensions.height as _,
+        );
     }
 
     pub fn create_fbo_for_external_texture(&mut self, texture_id: u32) -> FBOId {
         let fbo = FBOId(self.gl.gen_framebuffers(1)[0]);
         fbo.bind(self.gl(), FBOTarget::Draw);
         self.gl.framebuffer_texture_2d(
             gl::DRAW_FRAMEBUFFER,
             gl::COLOR_ATTACHMENT0,
@@ -1415,17 +1457,17 @@ impl Device {
         debug_assert!(dst.height >= src.height);
 
         let rect = DeviceIntRect::new(DeviceIntPoint::zero(), src.get_dimensions().to_i32());
         for (read_fbo, draw_fbo) in src.fbos.iter().zip(&dst.fbos) {
             self.bind_read_target_impl(*read_fbo);
             self.bind_draw_target_impl(*draw_fbo);
             self.blit_render_target(rect, rect);
         }
-        self.bind_read_target(None);
+        self.reset_read_target();
     }
 
     /// Notifies the device that the contents of a render target are no longer
     /// needed.
     ///
     /// FIXME(bholley): We could/should invalidate the depth targets earlier
     /// than the color targets, i.e. immediately after each pass.
     pub fn invalidate_render_target(&mut self, texture: &Texture) {
@@ -2156,18 +2198,18 @@ impl Device {
             index_count,
             gl::UNSIGNED_SHORT,
             0,
             instance_count,
         );
     }
 
     pub fn end_frame(&mut self) {
-        self.bind_draw_target(None, None);
-        self.bind_read_target(None);
+        self.reset_draw_target();
+        self.reset_read_target();
 
         debug_assert!(self.inside_frame);
         self.inside_frame = false;
 
         self.gl.bind_texture(gl::TEXTURE_2D, 0);
         self.gl.use_program(0);
 
         for i in 0 .. self.bound_textures.len() {
--- a/gfx/webrender/src/gpu_glyph_renderer.rs
+++ b/gfx/webrender/src/gpu_glyph_renderer.rs
@@ -2,17 +2,17 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 //! GPU glyph rasterization using Pathfinder.
 
 use api::{DeviceIntPoint, DeviceIntRect, DeviceUintSize, FontRenderMode};
 use api::{ImageFormat, TextureTarget};
 use debug_colors;
-use device::{Device, Texture, TextureDrawTarget, TextureFilter, VAO};
+use device::{DrawTarget, Device, Texture, TextureFilter, VAO};
 use euclid::{Point2D, Size2D, Transform3D, TypedVector2D, Vector2D};
 use internal_types::RenderTargetInfo;
 use pathfinder_gfx_utils::ShelfBinPacker;
 use profiler::GpuProfileTag;
 use renderer::{self, ImageBufferKind, Renderer, RendererError, RendererStats};
 use renderer::{TextureSampler, VertexArrayKind, ShaderPrecacheFlags};
 use shade::{LazilyCompiledShader, ShaderKind};
 use tiling::GlyphJob;
@@ -189,21 +189,21 @@ impl Renderer {
             1,
         );
         self.device.upload_texture_immediate(&path_info_texture, &path_info_texels);
 
         self.gpu_glyph_renderer.vector_stencil.bind(&mut self.device,
                                                     projection,
                                                     &mut self.renderer_errors);
 
-        self.device.bind_draw_target(Some(TextureDrawTarget {
+        self.device.bind_draw_target(DrawTarget::Texture {
             texture: &current_page.texture,
             layer: 0,
             with_depth: false,
-        }), Some(*target_size));
+        });
         self.device.clear_target(Some([0.0, 0.0, 0.0, 0.0]), None, None);
 
         self.device.set_blend(true);
         self.device.set_blend_mode_subpixel_pass1();
 
         let mut instance_data = vec![];
         for (path_id, &glyph_id) in glyph_indices.iter().enumerate() {
             let glyph = &glyphs[glyph_id];
--- a/gfx/webrender/src/renderer.rs
+++ b/gfx/webrender/src/renderer.rs
@@ -31,17 +31,17 @@ use api::{RenderApiSender, RenderNotifie
 use api::{channel};
 use api::DebugCommand;
 use api::channel::PayloadReceiverHelperMethods;
 use batch::{BatchKind, BatchTextures, BrushBatchKind};
 #[cfg(any(feature = "capture", feature = "replay"))]
 use capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
 use debug_colors;
 use device::{DepthFunction, Device, FrameId, Program, UploadMethod, Texture, PBO};
-use device::{ExternalTexture, FBOId, TextureDrawTarget, TextureReadTarget, TextureSlot};
+use device::{DrawTarget, ExternalTexture, FBOId, ReadTarget, TextureSlot};
 use device::{ShaderError, TextureFilter,
              VertexUsageHint, VAO, VBO, CustomVAO};
 use device::{ProgramCache, ReadPixelsFormat};
 #[cfg(feature = "debug_renderer")]
 use euclid::rect;
 use euclid::Transform3D;
 use frame_builder::{ChasePrimitive, FrameBuilderConfig};
 use gleam::gl;
@@ -1332,22 +1332,21 @@ impl GpuCacheTexture {
                 rows_dirty
             }
             GpuCacheBus::Scatter { ref program, ref vao, count, .. } => {
                 device.disable_depth();
                 device.set_blend(false);
                 device.bind_program(program);
                 device.bind_custom_vao(vao);
                 device.bind_draw_target(
-                    Some(TextureDrawTarget {
+                    DrawTarget::Texture {
                         texture,
                         layer: 0,
                         with_depth: false,
-                    }),
-                    Some(texture.get_dimensions()),
+                    },
                 );
                 device.draw_nonindexed_points(0, count as _);
                 0
             }
         }
     }
 }
 
@@ -2565,17 +2564,17 @@ impl Renderer {
                     });
 
                 if needs_color_clear || clear_depth_value.is_some() {
                     let clear_color = if needs_color_clear {
                         self.clear_color.map(|color| color.to_array())
                     } else {
                         None
                     };
-                    self.device.bind_draw_target(None, None);
+                    self.device.reset_draw_target();
                     self.device.enable_depth_write();
                     self.device.clear_target(clear_color, clear_depth_value, None);
                     self.device.disable_depth_write();
                 }
             }
 
             #[cfg(feature = "replay")]
             self.texture_resolver.external_images.extend(
@@ -2917,18 +2916,17 @@ impl Renderer {
             }
         }
 
         self.profile_counters.vertices.add(6 * data.len());
     }
 
     fn handle_readback_composite(
         &mut self,
-        render_target: Option<TextureDrawTarget>,
-        framebuffer_size: DeviceUintSize,
+        draw_target: DrawTarget,
         scissor_rect: Option<DeviceIntRect>,
         source: &RenderTask,
         backdrop: &RenderTask,
         readback: &RenderTask,
     ) {
         if scissor_rect.is_some() {
             self.device.disable_scissor();
         }
@@ -2950,43 +2948,44 @@ impl Renderer {
             RenderTaskKind::Picture(ref task_info) => task_info.content_origin,
             _ => panic!("bug: composite on non-picture?"),
         };
 
         // Bind the FBO to blit the backdrop to.
         // Called per-instance in case the layer (and therefore FBO)
         // changes. The device will skip the GL call if the requested
         // target is already bound.
-        let cache_draw_target = TextureDrawTarget {
+        let cache_draw_target = DrawTarget::Texture {
             texture: cache_texture,
             layer: readback_layer.0 as usize,
             with_depth: false,
         };
-        self.device.bind_draw_target(Some(cache_draw_target), None);
+        self.device.bind_draw_target(cache_draw_target);
 
         let mut src = DeviceIntRect::new(
             source_screen_origin + (backdrop_rect.origin - backdrop_screen_origin),
             readback_rect.size,
         );
         let mut dest = readback_rect.to_i32();
 
         // Need to invert the y coordinates and flip the image vertically when
         // reading back from the framebuffer.
-        if render_target.is_none() {
-            src.origin.y = framebuffer_size.height as i32 - src.size.height - src.origin.y;
+        if draw_target.is_default() {
+            src.origin.y = draw_target.dimensions().height as i32 - src.size.height - src.origin.y;
             dest.origin.y += dest.size.height;
             dest.size.height = -dest.size.height;
         }
 
-        self.device.bind_read_target(render_target.map(|r| r.into()));
+        self.device.bind_read_target(draw_target.into());
         self.device.blit_render_target(src, dest);
 
-        // Restore draw target to current pass render target + layer.
-        // Note: leaving the viewport unchanged, it's not a part of FBO state
-        self.device.bind_draw_target(render_target, None);
+        // Restore draw target to current pass render target + layer, and reset
+        // the read target.
+        self.device.bind_draw_target(draw_target);
+        self.device.reset_read_target();
 
         if scissor_rect.is_some() {
             self.device.enable_scissor();
         }
     }
 
     fn handle_blits(
         &mut self,
@@ -3003,29 +3002,29 @@ impl Renderer {
         //           If if ever shows up as an issue, we can easily batch them.
         for blit in blits {
             let source_rect = match blit.source {
                 BlitJobSource::Texture(texture_id, layer, source_rect) => {
                     // A blit from a texture into this target.
                     let texture = self.texture_resolver
                         .resolve(&texture_id)
                         .expect("BUG: invalid source texture");
-                    self.device.bind_read_target(Some(TextureReadTarget { texture, layer: layer as usize }));
+                    self.device.bind_read_target(ReadTarget::Texture { texture, layer: layer as usize });
                     source_rect
                 }
                 BlitJobSource::RenderTask(task_id) => {
                     // A blit from the child render task into this target.
                     // TODO(gw): Support R8 format here once we start
                     //           creating mips for alpha masks.
                     let texture = self.texture_resolver
                         .resolve(&TextureSource::PrevPassColor)
                         .expect("BUG: invalid source texture");
                     let source = &render_tasks[task_id];
                     let (source_rect, layer) = source.get_target_rect();
-                    self.device.bind_read_target(Some(TextureReadTarget { texture, layer: layer.0 }));
+                    self.device.bind_read_target(ReadTarget::Texture { texture, layer: layer.0 });
                     source_rect
                 }
             };
             debug_assert_eq!(source_rect.size, blit.target_rect.size);
             self.device.blit_render_target(
                 source_rect,
                 blit.target_rect,
             );
@@ -3062,75 +3061,78 @@ impl Renderer {
             VertexArrayKind::Scale,
             &BatchTextures::no_texture(),
             stats,
         );
     }
 
     fn draw_color_target(
         &mut self,
-        render_target: Option<TextureDrawTarget>,
+        draw_target: DrawTarget,
         target: &ColorRenderTarget,
         framebuffer_target_rect: DeviceUintRect,
-        target_size: DeviceUintSize,
         depth_is_ready: bool,
         clear_color: Option<[f32; 4]>,
         render_tasks: &RenderTaskTree,
         projection: &Transform3D<f32>,
         frame_id: FrameId,
         stats: &mut RendererStats,
     ) {
         self.profile_counters.color_targets.inc();
         let _gm = self.gpu_profile.start_marker("color target");
 
         // sanity check for the depth buffer
-        if let Some(t) = render_target {
-            assert!(t.texture.supports_depth() >= target.needs_depth());
+        if let DrawTarget::Texture { texture, .. } = draw_target {
+            assert!(texture.supports_depth() >= target.needs_depth());
         }
 
-        let framebuffer_kind = if render_target.is_none() {
+        let framebuffer_kind = if draw_target.is_default() {
             FramebufferKind::Main
         } else {
             FramebufferKind::Other
         };
 
         {
             let _timer = self.gpu_profile.start_timer(GPU_TAG_SETUP_TARGET);
-            self.device
-                .bind_draw_target(render_target, Some(target_size));
+            self.device.bind_draw_target(draw_target);
             self.device.disable_depth();
             self.set_blend(false, framebuffer_kind);
 
             let depth_clear = if !depth_is_ready && target.needs_depth() {
                 self.device.enable_depth_write();
                 Some(1.0)
             } else {
                 None
             };
 
-            let clear_rect = if render_target.is_some() {
+            let clear_rect = if !draw_target.is_default() {
                 if self.enable_clear_scissor {
                     // TODO(gw): Applying a scissor rect and minimal clear here
                     // is a very large performance win on the Intel and nVidia
                     // GPUs that I have tested with. It's possible it may be a
                     // performance penalty on other GPU types - we should test this
                     // and consider different code paths.
+                    //
+                    // Note: The above measurements were taken when render
+                    // target slices were minimum 2048x2048. Now that we size
+                    // them adaptively, this may be less of a win (except perhaps
+                    // on a mostly-unused last slice of a large texture array).
                     Some(target.used_rect())
                 } else {
                     None
                 }
-            } else if framebuffer_target_rect == DeviceUintRect::new(DeviceUintPoint::zero(), target_size) {
+            } else if framebuffer_target_rect == DeviceUintRect::new(DeviceUintPoint::zero(), draw_target.dimensions()) {
                 // whole screen is covered, no need for scissor
                 None
             } else {
                 let mut rect = framebuffer_target_rect.to_i32();
                 // Note: `framebuffer_target_rect` needs a Y-flip before going to GL
                 // Note: at this point, the target rectangle is not guaranteed to be within the main framebuffer bounds
                 // but `clear_target_rect` is totally fine with negative origin, as long as width & height are positive
-                rect.origin.y = target_size.height as i32 - rect.origin.y - rect.size.height;
+                rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
                 Some(rect)
             };
 
             self.device.clear_target(clear_color, depth_clear, clear_rect);
 
             if depth_clear.is_some() {
                 self.device.disable_depth_write();
             }
@@ -3182,21 +3184,21 @@ impl Renderer {
             //Note: depth equality is needed for split planes
             self.device.set_depth_func(DepthFunction::LessEqual);
             self.device.enable_depth();
             self.device.enable_depth_write();
 
             for alpha_batch_container in &target.alpha_batch_containers {
                 if let Some(target_rect) = alpha_batch_container.target_rect {
                     // Note: `framebuffer_target_rect` needs a Y-flip before going to GL
-                    let rect = if render_target.is_none() {
+                    let rect = if draw_target.is_default() {
                         let mut rect = target_rect
                             .intersection(&framebuffer_target_rect.to_i32())
                             .unwrap_or(DeviceIntRect::zero());
-                        rect.origin.y = target_size.height as i32 - rect.origin.y - rect.size.height;
+                        rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
                         rect
                     } else {
                         target_rect
                     };
                     self.device.enable_scissor();
                     self.device.set_scissor_rect(rect);
                 }
 
@@ -3235,21 +3237,21 @@ impl Renderer {
         let _gl = self.gpu_profile.start_marker("alpha batches");
         let transparent_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
         self.set_blend(true, framebuffer_kind);
         let mut prev_blend_mode = BlendMode::None;
 
         for alpha_batch_container in &target.alpha_batch_containers {
             if let Some(target_rect) = alpha_batch_container.target_rect {
                 // Note: `framebuffer_target_rect` needs a Y-flip before going to GL
-                let rect = if render_target.is_none() {
+                let rect = if draw_target.is_default() {
                     let mut rect = target_rect
                         .intersection(&framebuffer_target_rect.to_i32())
                         .unwrap_or(DeviceIntRect::zero());
-                    rect.origin.y = target_size.height as i32 - rect.origin.y - rect.size.height;
+                    rect.origin.y = draw_target.dimensions().height as i32 - rect.origin.y - rect.size.height;
                     rect
                 } else {
                     target_rect
                 };
                 self.device.enable_scissor();
                 self.device.set_scissor_rect(rect);
             }
 
@@ -3299,18 +3301,17 @@ impl Renderer {
                 }
 
                 // Handle special case readback for composites.
                 if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, source_id, backdrop_id }) = batch.key.kind {
                     // composites can't be grouped together because
                     // they may overlap and affect each other.
                     debug_assert_eq!(batch.instances.len(), 1);
                     self.handle_readback_composite(
-                        render_target,
-                        target_size,
+                        draw_target,
                         alpha_batch_container.target_rect,
                         &render_tasks[source_id],
                         &render_tasks[task_id],
                         &render_tasks[backdrop_id],
                     );
                 }
 
                 let _timer = self.gpu_profile.start_timer(batch.key.kind.sampler_tag());
@@ -3375,41 +3376,39 @@ impl Renderer {
                 };
                 let (src_rect, _) = render_tasks[output.task_id].get_target_rect();
                 let mut dest_rect = DeviceIntRect::new(DeviceIntPoint::zero(), output_size);
 
                 // Invert Y coordinates, to correctly convert between coordinate systems.
                 dest_rect.origin.y += dest_rect.size.height;
                 dest_rect.size.height *= -1;
 
-                self.device.bind_read_target(render_target.map(|r| r.into()));
+                self.device.bind_read_target(draw_target.into());
                 self.device.bind_external_draw_target(fbo_id);
                 self.device.blit_render_target(src_rect, dest_rect);
                 handler.unlock(output.pipeline_id);
             }
         }
     }
 
     fn draw_alpha_target(
         &mut self,
-        render_target: TextureDrawTarget,
+        draw_target: DrawTarget,
         target: &AlphaRenderTarget,
-        target_size: DeviceUintSize,
         projection: &Transform3D<f32>,
         render_tasks: &RenderTaskTree,
         stats: &mut RendererStats,
     ) {
         self.profile_counters.alpha_targets.inc();
         let _gm = self.gpu_profile.start_marker("alpha target");
         let alpha_sampler = self.gpu_profile.start_sampler(GPU_SAMPLER_TAG_ALPHA);
 
         {
             let _timer = self.gpu_profile.start_timer(GPU_TAG_SETUP_TARGET);
-            self.device
-                .bind_draw_target(Some(render_target), Some(target_size));
+            self.device.bind_draw_target(draw_target);
             self.device.disable_depth();
             self.device.disable_depth_write();
 
             // TODO(gw): Applying a scissor rect and minimal clear here
             // is a very large performance win on the Intel and nVidia
             // GPUs that I have tested with. It's possible it may be a
             // performance penalty on other GPU types - we should test this
             // and consider different code paths.
@@ -3564,21 +3563,21 @@ impl Renderer {
 
         // Handle any Pathfinder glyphs.
         let stencil_page = self.stencil_glyphs(&target.glyphs, &projection, &target_size, stats);
 
         {
             let texture = self.texture_resolver
                 .resolve(&texture_source)
                 .expect("BUG: invalid target texture");
-            self.device.bind_draw_target(Some(TextureDrawTarget {
+            self.device.bind_draw_target(DrawTarget::Texture {
                 texture,
                 layer,
                 with_depth: false,
-            }), Some(target_size));
+            });
         }
 
         self.device.disable_depth();
         self.device.disable_depth_write();
         self.set_blend(false, FramebufferKind::Other);
 
         for rect in &target.clears {
             self.device.clear_target(Some([0.0, 0.0, 0.0, 0.0]), None, Some(*rect));
@@ -3796,27 +3795,40 @@ impl Renderer {
     /// been used in the last 30 frames, since we could otherwise end up
     /// keeping an enormous target alive indefinitely by constantly using it
     /// in situations where a much smaller target would suffice.
     fn allocate_target_texture<T: RenderTarget>(
         &mut self,
         list: &mut RenderTargetList<T>,
         counters: &mut FrameProfileCounters,
     ) -> Option<ActiveTexture> {
-        debug_assert_ne!(list.max_size, DeviceUintSize::zero());
         if list.targets.is_empty() {
             return None
         }
 
+        // Get a bounding rect of all the layers, and round it up to a multiple
+        // of 256. This improves render target reuse when resizing the window,
+        // since we don't need to create a new render target for each slightly-
+        // larger frame.
+        let mut bounding_rect = DeviceIntRect::zero();
+        for t in list.targets.iter() {
+            bounding_rect = t.used_rect().union(&bounding_rect);
+        }
+        debug_assert_eq!(bounding_rect.origin, DeviceIntPoint::zero());
+        let dimensions = DeviceUintSize::new(
+            (bounding_rect.size.width as u32 + 255) & !255,
+            (bounding_rect.size.height as u32 + 255) & !255,
+        );
+
         counters.targets_used.inc();
 
         // Try finding a match in the existing pool. If there's no match, we'll
         // create a new texture.
         let selector = TargetSelector {
-            size: list.max_size,
+            size: dimensions,
             num_layers: list.targets.len(),
             format: list.format,
         };
         let index = self.texture_resolver.render_target_pool
             .iter()
             .position(|texture| {
                 selector == TargetSelector {
                     size: texture.get_dimensions(),
@@ -3830,18 +3842,18 @@ impl Renderer {
             let mut t = self.texture_resolver.render_target_pool.swap_remove(idx);
             self.device.reuse_render_target::<u8>(&mut t, rt_info);
             t
         } else {
             counters.targets_created.inc();
             self.device.create_texture(
                 TextureTarget::Array,
                 list.format,
-                list.max_size.width,
-                list.max_size.height,
+                dimensions.width,
+                dimensions.height,
                 TextureFilter::Linear,
                 Some(rt_info),
                 list.targets.len() as _,
             )
         };
 
         list.check_ready(&texture);
         Some(ActiveTexture {
@@ -3939,20 +3951,19 @@ impl Renderer {
                             framebuffer_size.width as f32,
                             framebuffer_size.height as f32,
                             0.0,
                             ORTHO_NEAR_PLANE,
                             ORTHO_FAR_PLANE,
                         );
 
                         self.draw_color_target(
-                            None,
+                            DrawTarget::Default(framebuffer_size),
                             target,
                             frame.inner_rect,
-                            framebuffer_size,
                             framebuffer_depth_is_ready,
                             clear_color,
                             &frame.render_tasks,
                             &projection,
                             frame_id,
                             stats,
                         );
                     }
@@ -3975,61 +3986,61 @@ impl Renderer {
                                 &frame.render_tasks,
                                 stats,
                             );
                         }
                     }
 
                     for (target_index, target) in alpha.targets.iter().enumerate() {
                         stats.alpha_target_count += 1;
+                        let draw_target = DrawTarget::Texture {
+                            texture: &alpha_tex.as_ref().unwrap().texture,
+                            layer: target_index,
+                            with_depth: false,
+                        };
 
                         let projection = Transform3D::ortho(
                             0.0,
-                            alpha.max_size.width as f32,
+                            draw_target.dimensions().width as f32,
                             0.0,
-                            alpha.max_size.height as f32,
+                            draw_target.dimensions().height as f32,
                             ORTHO_NEAR_PLANE,
                             ORTHO_FAR_PLANE,
                         );
 
                         self.draw_alpha_target(
-                            TextureDrawTarget {
-                                texture: &alpha_tex.as_ref().unwrap().texture,
-                                layer: target_index,
-                                with_depth: false,
-                            },
+                            draw_target,
                             target,
-                            alpha.max_size,
                             &projection,
                             &frame.render_tasks,
                             stats,
                         );
                     }
 
                     for (target_index, target) in color.targets.iter().enumerate() {
                         stats.color_target_count += 1;
+                        let draw_target = DrawTarget::Texture {
+                            texture: &color_tex.as_ref().unwrap().texture,
+                            layer: target_index,
+                            with_depth: target.needs_depth(),
+                        };
 
                         let projection = Transform3D::ortho(
                             0.0,
-                            color.max_size.width as f32,
+                            draw_target.dimensions().width as f32,
                             0.0,
-                            color.max_size.height as f32,
+                            draw_target.dimensions().height as f32,
                             ORTHO_NEAR_PLANE,
                             ORTHO_FAR_PLANE,
                         );
 
                         self.draw_color_target(
-                            Some(TextureDrawTarget {
-                                texture: &color_tex.as_ref().unwrap().texture,
-                                layer: target_index,
-                                with_depth: target.needs_depth(),
-                            }),
+                            draw_target,
                             target,
                             frame.inner_rect,
-                            color.max_size,
                             false,
                             Some([0.0, 0.0, 0.0, 0.0]),
                             &frame.render_tasks,
                             &projection,
                             frame_id,
                             stats,
                         );
                     }
@@ -4136,17 +4147,17 @@ impl Renderer {
 
         let mut target_index = 0;
         for texture in &self.texture_resolver.render_target_pool {
             let dimensions = texture.get_dimensions();
             let src_rect = DeviceIntRect::new(DeviceIntPoint::zero(), dimensions.to_i32());
 
             let layer_count = texture.get_layer_count() as usize;
             for layer in 0 .. layer_count {
-                self.device.bind_read_target(Some(TextureReadTarget { texture, layer }));
+                self.device.bind_read_target(ReadTarget::Texture { texture, layer });
                 let x = fb_width - (spacing + size) * (target_index + 1);
                 let y = spacing;
 
                 let dest_rect = rect(x, y, size, size);
                 self.device.blit_render_target(src_rect, dest_rect);
                 target_index += 1;
             }
         }
@@ -4183,17 +4194,17 @@ impl Renderer {
             let dimensions = texture.get_dimensions();
             let src_rect = DeviceIntRect::new(
                 DeviceIntPoint::zero(),
                 DeviceIntSize::new(dimensions.width as i32, dimensions.height as i32),
             );
 
             let layer_count = texture.get_layer_count() as usize;
             for layer in 0 .. layer_count {
-                self.device.bind_read_target(Some(TextureReadTarget { texture, layer}));
+                self.device.bind_read_target(ReadTarget::Texture { texture, layer});
 
                 let x = fb_width - (spacing + size) * (i as i32 + 1);
 
                 // If we have more targets than fit on one row in screen, just early exit.
                 if x > fb_width {
                     return;
                 }
 
@@ -4290,23 +4301,23 @@ impl Renderer {
         pixels
     }
 
     pub fn read_gpu_cache(&mut self) -> (DeviceUintSize, Vec<u8>) {
         let texture = self.gpu_cache_texture.texture.as_ref().unwrap();
         let size = texture.get_dimensions();
         let mut texels = vec![0; (size.width * size.height * 16) as usize];
         self.device.begin_frame();
-        self.device.bind_read_target(Some(TextureReadTarget { texture, layer: 0 }));
+        self.device.bind_read_target(ReadTarget::Texture { texture, layer: 0 });
         self.device.read_pixels_into(
             DeviceUintRect::new(DeviceUintPoint::zero(), size),
             ReadPixelsFormat::Standard(ImageFormat::RGBAF32),
             &mut texels,
         );
-        self.device.bind_read_target(None);
+        self.device.reset_read_target();
         self.device.end_frame();
         (size, texels)
     }
 
     // De-initialize the Renderer safely, assuming the GL is still alive and active.
     pub fn deinit(mut self) {
         //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
         self.device.begin_frame();
@@ -4903,17 +4914,17 @@ impl Renderer {
                 info!("\t{}", file_name);
                 let plain = Self::save_texture(texture, &file_name, &config.root, &mut self.device);
                 plain_self.textures.insert(*id, plain);
             }
 
             config.serialize(&plain_self, "renderer");
         }
 
-        self.device.bind_read_target(None);
+        self.device.reset_read_target();
         self.device.end_frame();
         info!("done.");
     }
 
     #[cfg(feature = "replay")]
     fn load_capture(
         &mut self, root: PathBuf, plain_externals: Vec<PlainExternalImage>
     ) {
--- a/gfx/webrender/src/tiling.rs
+++ b/gfx/webrender/src/tiling.rs
@@ -23,20 +23,25 @@ use render_backend::FrameResources;
 use render_task::{BlitSource, RenderTaskAddress, RenderTaskId, RenderTaskKind};
 use render_task::{BlurTask, ClearMode, GlyphTask, RenderTaskLocation, RenderTaskTree, ScalingTask};
 use resource_cache::ResourceCache;
 use std::{cmp, usize, f32, i32, mem};
 use texture_allocator::GuillotineAllocator;
 #[cfg(feature = "pathfinder")]
 use webrender_api::{DevicePixel, FontRenderMode};
 
-const MIN_TARGET_SIZE: u32 = 2048;
 const STYLE_SOLID: i32 = ((BorderStyle::Solid as i32) << 8) | ((BorderStyle::Solid as i32) << 16);
 const STYLE_MASK: i32 = 0x00FF_FF00;
 
+/// According to apitrace, textures larger than 2048 break fast clear
+/// optimizations on some intel drivers. We sometimes need to go larger, but
+/// we try to avoid it. This can go away when proper tiling support lands,
+/// since we can then split large primitives across multiple textures.
+const IDEAL_MAX_TEXTURE_DIMENSION: u32 = 2048;
+
 /// Identifies a given `RenderTarget` in a `RenderTargetList`.
 #[derive(Debug, Copy, Clone)]
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 pub struct RenderTargetIndex(pub usize);
 
 pub struct RenderTargetContext<'a, 'rc> {
     pub device_pixel_scale: DevicePixelScale,
@@ -185,30 +190,36 @@ pub enum RenderTargetKind {
 ///
 /// Note that in some cases (like drop-shadows), we can depend on the output of
 /// a pass earlier than the immediately-preceding pass. See `SavedTargetIndex`.
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 pub struct RenderTargetList<T> {
     screen_size: DeviceIntSize,
     pub format: ImageFormat,
+    /// The maximum width and height of any single primitive we've encountered.
+    ///
+    /// We initially create our per-slice allocators with a width and height of
+    /// IDEAL_MAX_TEXTURE_DIMENSION. If we encounter a larger primitive, the
+    /// allocation will fail, but we'll bump max_size, which will cause the
+    /// allocator for the next slice to be just large enough to accomodate it.
     pub max_size: DeviceUintSize,
     pub targets: Vec<T>,
     pub saved_index: Option<SavedTargetIndex>,
 }
 
 impl<T: RenderTarget> RenderTargetList<T> {
     fn new(
         screen_size: DeviceIntSize,
         format: ImageFormat,
     ) -> Self {
         RenderTargetList {
             screen_size,
             format,
-            max_size: DeviceUintSize::new(MIN_TARGET_SIZE, MIN_TARGET_SIZE),
+            max_size: DeviceUintSize::new(0, 0),
             targets: Vec::new(),
             saved_index: None,
         }
     }
 
     fn build(
         &mut self,
         ctx: &mut RenderTargetContext,
@@ -263,17 +274,24 @@ impl<T: RenderTarget> RenderTargetList<T
     ) -> (DeviceUintPoint, RenderTargetIndex) {
         let existing_origin = self.targets
             .last_mut()
             .and_then(|target| target.allocate(alloc_size));
 
         let origin = match existing_origin {
             Some(origin) => origin,
             None => {
-                let mut new_target = T::new(Some(self.max_size), self.screen_size);
+                // Have the allocator restrict slice sizes to our max ideal
+                // dimensions, unless we've already gone bigger on a previous
+                // slice.
+                let allocator_dimensions = DeviceUintSize::new(
+                    cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, self.max_size.width),
+                    cmp::max(IDEAL_MAX_TEXTURE_DIMENSION, self.max_size.height),
+                );
+                let mut new_target = T::new(Some(allocator_dimensions), self.screen_size);
                 let origin = new_target.allocate(alloc_size).expect(&format!(
                     "Each render task must allocate <= size of one target! ({})",
                     alloc_size
                 ));
                 self.targets.push(new_target);
                 origin
             }
         };
@@ -281,17 +299,19 @@ impl<T: RenderTarget> RenderTargetList<T
         (origin, RenderTargetIndex(self.targets.len() - 1))
     }
 
     pub fn needs_depth(&self) -> bool {
         self.targets.iter().any(|target| target.needs_depth())
     }
 
     pub fn check_ready(&self, t: &Texture) {
-        assert_eq!(t.get_dimensions(), self.max_size);
+        let dimensions = t.get_dimensions();
+        assert!(dimensions.width >= self.max_size.width);
+        assert!(dimensions.height >= self.max_size.height);
         assert_eq!(t.get_format(), self.format);
         assert_eq!(t.get_layer_count() as usize, self.targets.len());
         assert!(t.supports_depth() >= self.needs_depth());
     }
 }
 
 /// Frame output information for a given pipeline ID.
 /// Storing the task ID allows the renderer to find
--- a/gfx/webrender_bindings/revision.txt
+++ b/gfx/webrender_bindings/revision.txt
@@ -1,1 +1,1 @@
-a7052abfe8e41bcc8904cb5b3add99735fedcd1f
+e7d340b0f39bbd0046e983a75245bdde54013cdb