Bug 1441308 - Make WR caches document-aware r=bholley
authorDoug Thayer <dothayer@mozilla.com>
Thu, 10 Jan 2019 16:59:06 +0000
changeset 453300 95537e83071a
parent 453299 5caf4fb0d4f6
child 453301 da3629e101b9
push id35352
push userdvarga@mozilla.com
push dateFri, 11 Jan 2019 04:12:48 +0000
treeherdermozilla-central@65326bd78f83 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbholley
bugs1441308
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1441308 - Make WR caches document-aware r=bholley This change makes the various WR caches segment their cached data by document, so that documents' data are not evicted out from underneath them. Differential Revision: https://phabricator.services.mozilla.com/D13343
gfx/wr/webrender/src/frame_builder.rs
gfx/wr/webrender/src/render_backend.rs
gfx/wr/webrender/src/resource_cache.rs
gfx/wr/webrender/src/texture_cache.rs
gfx/wr/webrender_api/src/api.rs
--- a/gfx/wr/webrender/src/frame_builder.rs
+++ b/gfx/wr/webrender/src/frame_builder.rs
@@ -497,17 +497,17 @@ impl FrameBuilder {
                 }
             }
         }
 
         let gpu_cache_frame_id = gpu_cache.end_frame(gpu_cache_profile);
 
         render_tasks.write_task_data(device_pixel_scale);
 
-        resource_cache.end_frame();
+        resource_cache.end_frame(texture_cache_profile);
 
         Frame {
             window_size: self.window_size,
             inner_rect: self.screen_rect,
             device_pixel_ratio: device_pixel_scale.0,
             background_color: self.background_color,
             layer,
             profile_counters,
--- a/gfx/wr/webrender/src/render_backend.rs
+++ b/gfx/wr/webrender/src/render_backend.rs
@@ -143,22 +143,25 @@ impl ::std::ops::Sub<usize> for FrameId 
 /// we should never have two `FrameStamps` with the same id but different
 /// timestamps.
 #[derive(Copy, Clone, Debug)]
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 pub struct FrameStamp {
     id: FrameId,
     time: SystemTime,
+    document_id: DocumentId,
 }
 
 impl Eq for FrameStamp {}
 
 impl PartialEq for FrameStamp {
     fn eq(&self, other: &Self) -> bool {
+        // We should not be checking equality unless the documents are the same
+        debug_assert!(self.document_id == other.document_id);
         self.id == other.id
     }
 }
 
 impl PartialOrd for FrameStamp {
     fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
         self.id.partial_cmp(&other.id)
     }
@@ -170,34 +173,48 @@ impl FrameStamp {
         self.id
     }
 
     /// Gets the time associated with this FrameStamp.
     pub fn time(&self) -> SystemTime {
         self.time
     }
 
+    /// Gets the DocumentId in this stamp.
+    pub fn document_id(&self) -> DocumentId {
+        self.document_id
+    }
+
+    pub fn is_valid(&self) -> bool {
+        // If any fields are their default values, the whole struct should equal INVALID
+        debug_assert!((self.time != UNIX_EPOCH && self.id != FrameId(0) && self.document_id != DocumentId::INVALID) ||
+                      *self == Self::INVALID);
+        self.document_id != DocumentId::INVALID
+    }
+
     /// Returns a FrameStamp corresponding to the first frame.
-    pub fn first() -> Self {
+    pub fn first(document_id: DocumentId) -> Self {
         FrameStamp {
             id: FrameId::first(),
             time: SystemTime::now(),
+            document_id: document_id,
         }
     }
 
     /// Advances to a new frame.
     pub fn advance(&mut self) {
         self.id.advance();
         self.time = SystemTime::now();
     }
 
     /// An invalid sentinel FrameStamp.
     pub const INVALID: FrameStamp = FrameStamp {
         id: FrameId(0),
         time: UNIX_EPOCH,
+        document_id: DocumentId::INVALID,
     };
 }
 
 // A collection of resources that are shared by clips, primitives
 // between display lists.
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(Default)]
@@ -327,16 +344,17 @@ struct Document {
     /// Contains various vecs of data that is used only during frame building,
     /// where we want to recycle the memory each new display list, to avoid constantly
     /// re-allocating and moving memory around.
     scratch: PrimitiveScratchBuffer,
 }
 
 impl Document {
     pub fn new(
+        id: DocumentId,
         window_size: DeviceIntSize,
         layer: DocumentLayer,
         default_device_pixel_ratio: f32,
     ) -> Self {
         Document {
             scene: Scene::new(),
             removed_pipelines: Vec::new(),
             view: DocumentView {
@@ -344,17 +362,17 @@ impl Document {
                 inner_rect: DeviceIntRect::new(DeviceIntPoint::zero(), window_size),
                 layer,
                 pan: DeviceIntPoint::zero(),
                 page_zoom_factor: 1.0,
                 pinch_zoom_factor: 1.0,
                 device_pixel_ratio: default_device_pixel_ratio,
             },
             clip_scroll_tree: ClipScrollTree::new(),
-            stamp: FrameStamp::first(),
+            stamp: FrameStamp::first(id),
             frame_builder: None,
             output_pipelines: FastHashSet::default(),
             hit_tester: None,
             dynamic_properties: SceneProperties::new(),
             frame_is_valid: false,
             hit_tester_is_valid: false,
             rendered_frame_is_valid: false,
             has_built_scene: false,
@@ -977,16 +995,17 @@ impl RenderBackend {
                 sender.send(self.next_namespace_id()).unwrap();
             }
             ApiMsg::CloneApiByClient(namespace_id) => {
                 assert!(self.namespace_alloc_by_client);
                 debug_assert!(!self.documents.iter().any(|(did, _doc)| did.0 == namespace_id));
             }
             ApiMsg::AddDocument(document_id, initial_size, layer) => {
                 let document = Document::new(
+                    document_id,
                     initial_size,
                     layer,
                     self.default_device_pixel_ratio,
                 );
                 self.documents.insert(document_id, document);
             }
             ApiMsg::DeleteDocument(document_id) => {
                 self.documents.remove(&document_id);
@@ -1742,17 +1761,17 @@ impl RenderBackend {
             let frame_resources = CaptureConfig::deserialize::<FrameResources, _>(root, &frame_resources_name)
                 .expect(&format!("Unable to open {}.ron", frame_resources_name));
 
             let mut doc = Document {
                 scene: scene.clone(),
                 removed_pipelines: Vec::new(),
                 view: view.clone(),
                 clip_scroll_tree: ClipScrollTree::new(),
-                stamp: FrameStamp::first(),
+                stamp: FrameStamp::first(id),
                 frame_builder: Some(FrameBuilder::empty()),
                 output_pipelines: FastHashSet::default(),
                 dynamic_properties: SceneProperties::new(),
                 hit_tester: None,
                 frame_is_valid: false,
                 hit_tester_is_valid: false,
                 rendered_frame_is_valid: false,
                 has_built_scene: false,
--- a/gfx/wr/webrender/src/resource_cache.rs
+++ b/gfx/wr/webrender/src/resource_cache.rs
@@ -1605,17 +1605,16 @@ impl ResourceCache {
         // Apply any updates of new / updated images (incl. blobs) to the texture cache.
         self.update_texture_cache(gpu_cache);
         render_tasks.prepare_for_render();
         self.cached_render_tasks.update(
             gpu_cache,
             &mut self.texture_cache,
             render_tasks,
         );
-        self.texture_cache.end_frame(texture_cache_profile);
     }
 
     fn rasterize_missing_blob_images(&mut self) {
         if self.missing_blob_images.is_empty() {
             return;
         }
 
         self.blob_image_handler
@@ -1762,19 +1761,20 @@ impl ResourceCache {
                     None,
                     UvRectKind::Rect,
                     eviction,
                 );
             }
         }
     }
 
-    pub fn end_frame(&mut self) {
+    pub fn end_frame(&mut self, texture_cache_profile: &mut TextureCacheProfileCounters) {
         debug_assert_eq!(self.state, State::QueryResources);
         self.state = State::Idle;
+        self.texture_cache.end_frame(texture_cache_profile);
     }
 
     pub fn set_debug_flags(&mut self, flags: DebugFlags) {
         self.texture_cache.set_debug_flags(flags);
     }
 
     pub fn clear(&mut self, what: ClearCache) {
         if what.contains(ClearCache::IMAGES) {
--- a/gfx/wr/webrender/src/texture_cache.rs
+++ b/gfx/wr/webrender/src/texture_cache.rs
@@ -1,20 +1,20 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
-use api::{DebugFlags, DeviceIntPoint, DeviceIntRect, DeviceIntSize, DirtyRect, ImageDirtyRect};
-use api::{ExternalImageType, ImageFormat};
-use api::ImageDescriptor;
+use api::{DebugFlags, DeviceIntPoint, DeviceIntRect, DeviceIntSize};
+use api::{DirtyRect, ImageDirtyRect, DocumentId, ExternalImageType, ImageFormat};
+use api::{IdNamespace, ImageDescriptor};
 use device::{TextureFilter, total_gpu_bytes_allocated};
 use freelist::{FreeList, FreeListHandle, UpsertResult, WeakFreeListHandle};
 use gpu_cache::{GpuCache, GpuCacheHandle};
 use gpu_types::{ImageSource, UvRectKind};
-use internal_types::{CacheTextureId, LayerIndex, TextureUpdateList, TextureUpdateSource};
+use internal_types::{CacheTextureId, FastHashMap, LayerIndex, TextureUpdateList, TextureUpdateSource};
 use internal_types::{TextureSource, TextureCacheAllocInfo, TextureCacheUpdate};
 use profiler::{ResourceProfileCounter, TextureCacheProfileCounters};
 use render_backend::{FrameId, FrameStamp};
 use resource_cache::{CacheItem, CachedImageData};
 use std::cell::Cell;
 use std::cmp;
 use std::mem;
 use std::time::{Duration, SystemTime};
@@ -277,17 +277,17 @@ impl SharedTextures {
             (_, _) => unreachable!(),
         }
     }
 }
 
 /// Lists of strong handles owned by the texture cache. There is only one strong
 /// handle for each entry, but unlimited weak handles. Consumers receive the weak
 /// handles, and `TextureCache` owns the strong handles internally.
-#[derive(Default)]
+#[derive(Default, Debug)]
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 struct EntryHandles {
     /// Handles for each standalone texture cache entry.
     standalone: Vec<FreeListHandle<CacheEntryMarker>>,
     /// Handles for each shared texture cache entry.
     shared: Vec<FreeListHandle<CacheEntryMarker>>,
 }
@@ -405,16 +405,37 @@ impl EvictionThresholdBuilder {
 
         EvictionThreshold {
             id: self.now.frame_id() - max_frames,
             time: self.now.time() - Duration::from_millis(max_time_ms),
         }
     }
 }
 
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+pub struct PerDocumentData {
+    /// The last `FrameStamp` in which we expired the shared cache for
+    /// this document.
+    last_shared_cache_expiration: FrameStamp,
+
+    /// Strong handles for all entries that this document has allocated
+    /// from the shared FreeList.
+    handles: EntryHandles,
+}
+
+impl PerDocumentData {
+    pub fn new() -> Self {
+        PerDocumentData {
+            last_shared_cache_expiration: FrameStamp::INVALID,
+            handles: EntryHandles::default(),
+        }
+    }
+}
+
 /// General-purpose manager for images in GPU memory. This includes images,
 /// rasterized glyphs, rasterized blobs, cached render tasks, etc.
 ///
 /// The texture cache is owned and managed by the RenderBackend thread, and
 /// produces a series of commands to manipulate the textures on the Renderer
 /// thread. These commands are executed before any rendering is performed for
 /// a given frame.
 ///
@@ -448,28 +469,33 @@ pub struct TextureCache {
     /// A list of allocations and updates that need to be applied to the texture
     /// cache in the rendering thread this frame.
     #[cfg_attr(all(feature = "serde", any(feature = "capture", feature = "replay")), serde(skip))]
     pending_updates: TextureUpdateList,
 
     /// The current `FrameStamp`. Used for cache eviction policies.
     now: FrameStamp,
 
-    /// The last `FrameStamp` in which we expired the shared cache.
-    last_shared_cache_expiration: FrameStamp,
-
     /// The time at which we first reached the byte threshold for reclaiming
     /// cache memory. `None if we haven't reached the threshold.
     reached_reclaim_threshold: Option<SystemTime>,
 
     /// Maintains the list of all current items in the texture cache.
     entries: FreeList<CacheEntry, CacheEntryMarker>,
 
-    /// Strong handles for all entries allocated from the above `FreeList`.
-    handles: EntryHandles,
+    /// Holds items that need to be maintained on a per-document basis. If we
+    /// modify this data for a document without also building a frame for that
+    /// document, then we might end up erroneously evicting items out from
+    /// under that document.
+    per_doc_data: FastHashMap<DocumentId, PerDocumentData>,
+
+    /// The current document's data. This is moved out of per_doc_data in
+    /// begin_frame and moved back in end_frame to solve borrow checker issues.
+    /// We should try removing this when we require a rustc with NLL.
+    doc_data: PerDocumentData,
 }
 
 impl TextureCache {
     pub fn new(max_texture_size: i32, mut max_texture_layers: usize) -> Self {
         if cfg!(target_os = "macos") {
             // On MBP integrated Intel GPUs, texture arrays appear to be
             // implemented as a single texture of stacked layers, and that
             // texture appears to be subject to the texture size limit. As such,
@@ -492,133 +518,163 @@ impl TextureCache {
             //     with the same bug but a lower max texture size, we might need
             //     to rethink our strategy anyway, since a limit below 32MB might
             //     start to introduce performance issues.
             max_texture_layers = max_texture_layers.min(32);
         }
 
         TextureCache {
             shared_textures: SharedTextures::new(),
+            reached_reclaim_threshold: None,
+            entries: FreeList::new(),
             max_texture_size,
             max_texture_layers,
             debug_flags: DebugFlags::empty(),
             next_id: CacheTextureId(1),
             pending_updates: TextureUpdateList::new(),
             now: FrameStamp::INVALID,
-            last_shared_cache_expiration: FrameStamp::INVALID,
-            reached_reclaim_threshold: None,
-            entries: FreeList::new(),
-            handles: EntryHandles::default(),
+            per_doc_data: FastHashMap::default(),
+            doc_data: PerDocumentData::new(),
         }
     }
 
     /// Creates a TextureCache and sets it up with a valid `FrameStamp`, which
     /// is useful for avoiding panics when instantiating the `TextureCache`
     /// directly from unit test code.
     #[allow(dead_code)]
     pub fn new_for_testing(max_texture_size: i32, max_texture_layers: usize) -> Self {
         let mut cache = Self::new(max_texture_size, max_texture_layers);
-        let mut now = FrameStamp::first();
+        let mut now = FrameStamp::first(DocumentId(IdNamespace(1), 1));
         now.advance();
         cache.begin_frame(now);
         cache
     }
 
     pub fn set_debug_flags(&mut self, flags: DebugFlags) {
         self.debug_flags = flags;
     }
 
     /// Clear all standalone textures in the cache.
     pub fn clear_standalone(&mut self) {
-        let standalone_entry_handles = mem::replace(
-            &mut self.handles.standalone,
-            Vec::new(),
-        );
+        debug_assert!(!self.now.is_valid());
+        let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default());
+        for (&_, doc_data) in per_doc_data.iter_mut() {
+            let standalone_entry_handles = mem::replace(
+                &mut doc_data.handles.standalone,
+                Vec::new(),
+            );
 
-        for handle in standalone_entry_handles {
-            let entry = self.entries.free(handle);
-            entry.evict();
-            self.free(entry);
+            for handle in standalone_entry_handles {
+                let entry = self.entries.free(handle);
+                entry.evict();
+                self.free(entry);
+            }
         }
+        self.per_doc_data = per_doc_data;
     }
 
     /// Clear all shared textures in the cache.
     pub fn clear_shared(&mut self) {
-        let shared_entry_handles = mem::replace(
-            &mut self.handles.shared,
-            Vec::new(),
-        );
+        self.unset_doc_data();
+        let mut per_doc_data = mem::replace(&mut self.per_doc_data, FastHashMap::default());
+        for (&_, doc_data) in per_doc_data.iter_mut() {
+            let shared_entry_handles = mem::replace(
+                &mut doc_data.handles.shared,
+                Vec::new(),
+            );
 
-        for handle in shared_entry_handles {
-            let entry = self.entries.free(handle);
-            entry.evict();
-            self.free(entry);
+            for handle in shared_entry_handles {
+                let entry = self.entries.free(handle);
+                entry.evict();
+                self.free(entry);
+            }
         }
 
         self.shared_textures.clear(&mut self.pending_updates);
+        self.per_doc_data = per_doc_data;
+        self.set_doc_data();
     }
 
     /// Clear all entries in the texture cache. This is a fairly drastic
     /// step that should only be called very rarely.
     pub fn clear(&mut self) {
         self.clear_standalone();
         self.clear_shared();
     }
 
+    fn set_doc_data(&mut self) {
+        let document_id = self.now.document_id();
+        self.doc_data = self.per_doc_data
+                            .remove(&document_id)
+                            .unwrap_or_else(|| PerDocumentData::new());
+    }
+
+    fn unset_doc_data(&mut self) {
+        self.per_doc_data.insert(self.now.document_id(),
+                                 mem::replace(&mut self.doc_data, PerDocumentData::new()));
+    }
+
     /// Called at the beginning of each frame.
     pub fn begin_frame(&mut self, stamp: FrameStamp) {
+        debug_assert!(!self.now.is_valid());
         self.now = stamp;
+        self.set_doc_data();
         self.maybe_reclaim_shared_cache_memory();
     }
 
     /// Called at the beginning of each frame to periodically GC and reclaim
     /// storage if the cache has grown too large.
     fn maybe_reclaim_shared_cache_memory(&mut self) {
+        debug_assert!(self.now.is_valid());
         // The minimum number of bytes that we must be able to reclaim in order
         // to justify clearing the entire shared cache in order to shrink it.
         const RECLAIM_THRESHOLD_BYTES: usize = 5 * 1024 * 1024;
 
         // Normally the shared cache only gets GCed when we fail to allocate.
         // However, we also perform a periodic, conservative GC to ensure that
         // we recover unused memory in bounded time, rather than having it
         // depend on allocation patterns of subsequent content.
         let time_since_last_gc = self.now.time()
-            .duration_since(self.last_shared_cache_expiration.time())
+            .duration_since(self.doc_data.last_shared_cache_expiration.time())
             .unwrap_or(Duration::default());
         let do_periodic_gc = time_since_last_gc >= Duration::from_secs(5) &&
             self.shared_textures.size_in_bytes() >= RECLAIM_THRESHOLD_BYTES * 2;
         if do_periodic_gc {
             let threshold = EvictionThresholdBuilder::new(self.now)
                 .max_frames(1)
                 .max_time_s(10)
                 .build();
             self.maybe_expire_old_shared_entries(threshold);
         }
 
         // If we've had a sufficient number of unused layers for a sufficiently
         // long time, just blow the whole cache away to shrink it.
         //
         // We could do this more intelligently with a resize+blit, but that would
         // add complexity for a rare case.
+        //
+        // This block of code is broken with multiple documents, and should be
+        // moved out into a section that runs before building any frames in a
+        // group of documents.
         if self.shared_textures.empty_region_bytes() >= RECLAIM_THRESHOLD_BYTES {
             self.reached_reclaim_threshold.get_or_insert(self.now.time());
         } else {
             self.reached_reclaim_threshold = None;
         }
         if let Some(t) = self.reached_reclaim_threshold {
             let dur = self.now.time().duration_since(t).unwrap_or(Duration::default());
             if dur >= Duration::from_secs(5) {
                 self.clear_shared();
                 self.reached_reclaim_threshold = None;
             }
         }
-
     }
 
     pub fn end_frame(&mut self, texture_cache_profile: &mut TextureCacheProfileCounters) {
+        debug_assert!(self.now.is_valid());
         // Expire standalone entries.
         //
         // Most of the time, standalone cache entries correspond to images whose
         // width or height is greater than the region size in the shared cache, i.e.
         // 512 pixels. Cached render tasks also frequently get standalone entries,
         // but those use the Eviction::Eager policy (for now). So the tradeoff there
         // is largely around reducing texture upload jank while keeping memory usage
         // at an acceptable level.
@@ -628,16 +684,19 @@ impl TextureCache {
         self.shared_textures.array_a8_linear
             .update_profile(&mut texture_cache_profile.pages_a8_linear);
         self.shared_textures.array_a16_linear
             .update_profile(&mut texture_cache_profile.pages_a16_linear);
         self.shared_textures.array_rgba8_linear
             .update_profile(&mut texture_cache_profile.pages_rgba8_linear);
         self.shared_textures.array_rgba8_nearest
             .update_profile(&mut texture_cache_profile.pages_rgba8_nearest);
+
+        self.unset_doc_data();
+        self.now = FrameStamp::INVALID;
     }
 
     // Request an item in the texture cache. All images that will
     // be used on a frame *must* have request() called on their
     // handle, to update the last used timestamp and ensure
     // that resources are not flushed from the cache too early.
     //
     // Returns true if the image needs to be uploaded to the
@@ -685,16 +744,18 @@ impl TextureCache {
         data: Option<CachedImageData>,
         user_data: [f32; 3],
         mut dirty_rect: ImageDirtyRect,
         gpu_cache: &mut GpuCache,
         eviction_notice: Option<&EvictionNotice>,
         uv_rect_kind: UvRectKind,
         eviction: Eviction,
     ) {
+        debug_assert!(self.now.is_valid());
+
         // Determine if we need to allocate texture cache memory
         // for this item. We need to reallocate if any of the following
         // is true:
         // - Never been in the cache
         // - Has been in the cache but was evicted.
         // - Exists in the cache but dimensions / format have changed.
         let realloc = match self.entries.get_opt(handle) {
             Some(entry) => {
@@ -848,22 +909,23 @@ impl TextureCache {
             .scale_by_pressure()
             .build()
     }
 
     /// Shared eviction code for standalone and shared entries.
     ///
     /// See `EvictionThreshold` for more details on policy.
     fn expire_old_entries(&mut self, kind: EntryKind, threshold: EvictionThreshold) {
+        debug_assert!(self.now.is_valid());
         // Iterate over the entries in reverse order, evicting the ones older than
         // the frame age threshold. Reverse order avoids iterator invalidation when
         // removing entries.
-        for i in (0..self.handles.select(kind).len()).rev() {
+        for i in (0..self.doc_data.handles.select(kind).len()).rev() {
             let evict = {
-                let entry = self.entries.get(&self.handles.select(kind)[i]);
+                let entry = self.entries.get(&self.doc_data.handles.select(kind)[i]);
                 match entry.eviction {
                     Eviction::Manual => false,
                     Eviction::Auto => threshold.should_evict(entry.last_access),
                     Eviction::Eager => {
                         // Texture cache entries can be evicted at the start of
                         // a frame, or at any time during the frame when a cache
                         // allocation is occurring. This means that entries tagged
                         // with eager eviction may get evicted before they have a
@@ -875,34 +937,35 @@ impl TextureCache {
                         let mut entry_frame_id = entry.last_access.frame_id();
                         entry_frame_id.advance();
 
                         entry_frame_id < self.now.frame_id()
                     }
                 }
             };
             if evict {
-                let handle = self.handles.select(kind).swap_remove(i);
+                let handle = self.doc_data.handles.select(kind).swap_remove(i);
                 let entry = self.entries.free(handle);
                 entry.evict();
                 self.free(entry);
             }
         }
     }
 
     /// Expires old shared entries, if we haven't done so this frame.
     ///
     /// Returns true if any entries were expired.
     fn maybe_expire_old_shared_entries(&mut self, threshold: EvictionThreshold) -> bool {
-        let old_len = self.handles.shared.len();
-        if self.last_shared_cache_expiration.frame_id() < self.now.frame_id() {
+        debug_assert!(self.now.is_valid());
+        let old_len = self.doc_data.handles.shared.len();
+        if self.doc_data.last_shared_cache_expiration.frame_id() < self.now.frame_id() {
             self.expire_old_entries(EntryKind::Shared, threshold);
-            self.last_shared_cache_expiration = self.now;
+            self.doc_data.last_shared_cache_expiration = self.now;
         }
-        self.handles.shared.len() != old_len
+        self.doc_data.handles.shared.len() != old_len
     }
 
     // Free a cache entry from the standalone list or shared cache.
     fn free(&mut self, entry: CacheEntry) {
         match entry.details {
             EntryDetails::Standalone { .. } => {
                 // This is a standalone texture allocation. Free it directly.
                 self.pending_updates.push_free(entry.texture_id);
@@ -1101,16 +1164,17 @@ impl TextureCache {
         } else {
             self.allocate_standalone_entry(params)
         }
     }
 
     /// Allocates a cache entry for the given parameters, and updates the
     /// provided handle to point to the new entry.
     fn allocate(&mut self, params: &CacheAllocParams, handle: &mut TextureCacheHandle) {
+        debug_assert!(self.now.is_valid());
         let new_cache_entry = self.allocate_cache_entry(params);
         let new_kind = new_cache_entry.details.kind();
 
         // If the handle points to a valid cache entry, we want to replace the
         // cache entry with our newly updated location. We also need to ensure
         // that the storage (region or standalone) associated with the previous
         // entry here gets freed.
         //
@@ -1120,28 +1184,28 @@ impl TextureCache {
         // This is managed with a database style upsert operation.
         match self.entries.upsert(handle, new_cache_entry) {
             UpsertResult::Updated(old_entry) => {
                 if new_kind != old_entry.details.kind() {
                     // Handle the rare case than an update moves an entry from
                     // shared to standalone or vice versa. This involves a linear
                     // search, but should be rare enough not to matter.
                     let (from, to) = if new_kind == EntryKind::Standalone {
-                        (&mut self.handles.shared, &mut self.handles.standalone)
+                        (&mut self.doc_data.handles.shared, &mut self.doc_data.handles.standalone)
                     } else {
-                        (&mut self.handles.standalone, &mut self.handles.shared)
+                        (&mut self.doc_data.handles.standalone, &mut self.doc_data.handles.shared)
                     };
                     let idx = from.iter().position(|h| h.weak() == *handle).unwrap();
                     to.push(from.remove(idx));
                 }
                 self.free(old_entry);
             }
             UpsertResult::Inserted(new_handle) => {
                 *handle = new_handle.weak();
-                self.handles.select(new_kind).push(new_handle);
+                self.doc_data.handles.select(new_kind).push(new_handle);
             }
         }
     }
 }
 
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(Copy, Clone, PartialEq)]
--- a/gfx/wr/webrender_api/src/api.rs
+++ b/gfx/wr/webrender_api/src/api.rs
@@ -789,16 +789,20 @@ impl Epoch {
 #[repr(C)]
 #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash, Ord, PartialOrd, Deserialize, Serialize)]
 pub struct IdNamespace(pub u32);
 
 #[repr(C)]
 #[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
 pub struct DocumentId(pub IdNamespace, pub u32);
 
+impl DocumentId {
+    pub const INVALID: DocumentId = DocumentId(IdNamespace(0), 0);
+}
+
 /// This type carries no valuable semantics for WR. However, it reflects the fact that
 /// clients (Servo) may generate pipelines by different semi-independent sources.
 /// These pipelines still belong to the same `IdNamespace` and the same `DocumentId`.
 /// Having this extra Id field enables them to generate `PipelineId` without collision.
 pub type PipelineSourceId = u32;
 
 /// From the point of view of WR, `PipelineId` is completely opaque and generic as long as
 /// it's clonable, serializable, comparable, and hashable.