--- a/gfx/webrender_bindings/src/moz2d_renderer.rs
+++ b/gfx/webrender_bindings/src/moz2d_renderer.rs
@@ -1,19 +1,22 @@
use webrender::api::*;
use bindings::{ByteSlice, MutByteSlice, wr_moz2d_render_cb, ArcVecU8};
use rayon::ThreadPool;
use std::collections::hash_map::HashMap;
use std::collections::hash_map;
+use std::collections::btree_map::BTreeMap;
+use std::collections::Bound::Included;
use std::mem;
use std::os::raw::c_void;
use std::ptr;
use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::Arc;
+use std;
#[cfg(target_os = "windows")]
use dwrote;
#[cfg(target_os = "macos")]
use foreign_types::ForeignType;
#[cfg(not(any(target_os = "macos", target_os = "windows")))]
@@ -191,17 +194,17 @@ impl BlobWriter {
self.data.extend_from_slice(&self.index);
self.data.extend_from_slice(convert_to_bytes(&index_begin));
self.data
}
}
// XXX: Do we want to allow negative values here or clamp to the image bounds?
-#[derive(Debug, Eq, PartialEq, Clone, Copy)]
+#[derive(Debug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd)]
struct Box2d {
x1: u32,
y1: u32,
x2: u32,
y2: u32
}
impl Box2d {
@@ -237,68 +240,104 @@ fn check_result(result: &[u8]) -> () {
let mut index = BlobReader::new(result);
assert!(index.reader.has_more(), "Unexpectedly empty result. This blob should just have been deleted");
while index.reader.has_more() {
let e = index.read_entry();
dlog!("result bounds: {} {} {:?}", e.end, e.extra_end, e.bounds);
}
}
+// We use a BTree as a kind of multi-map, by appending an integer "cache_order" to the key.
+// This lets us use multiple items with matching bounds in the map and allows
+// us to fetch and remove them while retaining the ordering of the original list.
+
+struct CachedReader<'a> {
+ reader: BlobReader<'a>,
+ cache: BTreeMap<(Box2d, u32), Entry>,
+ cache_index_counter: u32
+}
+
+impl<'a> CachedReader<'a> {
+ fn new(buf: &'a[u8]) -> CachedReader {
+ CachedReader{reader:BlobReader::new(buf), cache: BTreeMap::new(), cache_index_counter: 0 }
+ }
+
+ fn take_entry_with_bounds_from_cache(&mut self, bounds: &Box2d) -> Option<Entry> {
+ if self.cache.is_empty() {
+ return None;
+ }
+
+ let key_to_delete = match self.cache. range((Included((*bounds, 0u32)), Included((*bounds, std::u32::MAX)))).next() {
+ Some((&key, _)) => key,
+ None => return None,
+ };
+
+ Some(self.cache.remove(&key_to_delete).expect("We just got this key from range, it needs to be present"))
+ }
+
+ fn next_entry_with_bounds(&mut self, bounds: &Box2d, ignore_rect: &Box2d) -> Entry {
+ if let Some(entry) = self.take_entry_with_bounds_from_cache(bounds) {
+ return entry;
+ }
+
+ loop {
+ let old = self.reader.read_entry();
+ if old.bounds == *bounds {
+ return old;
+ } else if !old.bounds.contained_by(&ignore_rect) {
+ self.cache.insert((old.bounds, self.cache_index_counter), old);
+ self.cache_index_counter += 1;
+ }
+ }
+ }
+}
+
/* Merge a new partial blob image into an existing complete blob image.
All of the items not fully contained in the dirty_rect should match
in both new and old lists.
We continue to use the old content for these items.
Old items contained in the dirty_rect are dropped and new items
are retained.
*/
fn merge_blob_images(old_buf: &[u8], new_buf: &[u8], dirty_rect: Box2d) -> Vec<u8> {
let mut result = BlobWriter::new();
dlog!("dirty rect: {:?}", dirty_rect);
dlog!("old:");
dump_blob_index(old_buf, dirty_rect);
dlog!("new:");
dump_blob_index(new_buf, dirty_rect);
- let mut old_reader = BlobReader::new(old_buf);
+ let mut old_reader = CachedReader::new(old_buf);
let mut new_reader = BlobReader::new(new_buf);
// Loop over both new and old entries merging them.
// Both new and old must have the same number of entries that
// overlap but are not contained by the dirty rect, and they
// must be in the same order.
while new_reader.reader.has_more() {
let new = new_reader.read_entry();
dlog!("bounds: {} {} {:?}", new.end, new.extra_end, new.bounds);
if new.bounds.contained_by(&dirty_rect) {
result.new_entry(new.extra_end - new.end, new.bounds, &new_buf[new.begin..new.extra_end]);
} else {
- loop {
- assert!(old_reader.reader.has_more());
- let old = old_reader.read_entry();
- dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
- if old.bounds.contained_by(&dirty_rect) {
- // fully contained items will be discarded or replaced
- } else {
- assert_eq!(old.bounds, new.bounds);
- // we found a matching item use the old data
- result.new_entry(old.extra_end - old.end, old.bounds, &old_buf[old.begin..old.extra_end]);
- break;
- }
- }
+ let old = old_reader.next_entry_with_bounds(&new.bounds, &dirty_rect);
+ result.new_entry(old.extra_end - old.end, new.bounds, &old_buf[old.begin..old.extra_end])
}
}
- // Include any remaining old items.
- while old_reader.reader.has_more() {
- let old = old_reader.read_entry();
+ // Ensure all remaining items will be discarded
+ while old_reader.reader.reader.has_more() {
+ let old = old_reader.reader.read_entry();
dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
assert!(old.bounds.contained_by(&dirty_rect));
}
+ assert!(old_reader.cache.is_empty());
+
let result = result.finish();
check_result(&result);
result
}
impl BlobImageRenderer for Moz2dImageRenderer {
fn add(&mut self, key: ImageKey, data: BlobImageData, tiling: Option<TileSize>) {
{