Bug 1450162. Use an Entry type instead of tuples. r=mstange
authorJeff Muizelaar <jmuizelaar@mozilla.com>
Wed, 04 Apr 2018 16:05:11 -0400
changeset 777646 6f967d0a02df35f59779aff445383b5f7dec26f4
parent 777645 f7be47711eb6e10c2ce970c6b9eb7a65059d682a
child 777647 fa80a4cb2098c8841321e9ae26cbf870048fca55
child 777648 6b235ed7c20163523e99fb7e930cbbe0937a9011
child 777705 3b6ec4f3984e90bfe97dd1c9bb6aa8e510b4e197
push id105257
push userbmo:jmuizelaar@mozilla.com
push dateThu, 05 Apr 2018 02:07:04 +0000
reviewersmstange
bugs1450162
milestone61.0a1
Bug 1450162. Use an Entry type instead of tuples. r=mstange This will make things a bit cleaner when interacting with the upcoming cache.
gfx/webrender_bindings/src/moz2d_renderer.rs
--- a/gfx/webrender_bindings/src/moz2d_renderer.rs
+++ b/gfx/webrender_bindings/src/moz2d_renderer.rs
@@ -1,13 +1,14 @@
 use webrender::api::*;
 use bindings::{ByteSlice, MutByteSlice, wr_moz2d_render_cb, ArcVecU8};
 use rayon::ThreadPool;
 
-use std::collections::hash_map::{HashMap, Entry};
+use std::collections::hash_map::HashMap;
+use std::collections::hash_map;
 use std::mem;
 use std::os::raw::c_void;
 use std::ptr;
 use std::sync::mpsc::{channel, Sender, Receiver};
 use std::sync::Arc;
 
 #[cfg(target_os = "windows")]
 use dwrote;
@@ -126,30 +127,37 @@ impl<'a> BufReader<'a> {
  */
 
 
 struct BlobReader<'a> {
     reader: BufReader<'a>,
     begin: usize,
 }
 
+struct Entry {
+    bounds: Box2d,
+    begin: usize,
+    end: usize,
+    extra_end: usize,
+}
+
 impl<'a> BlobReader<'a> {
     fn new(buf: &'a[u8]) -> BlobReader<'a> {
         // The offset of the index is at the end of the buffer.
         let index_offset_pos = buf.len()-mem::size_of::<usize>();
         let index_offset = to_usize(&buf[index_offset_pos..]);
 
         BlobReader { reader: BufReader::new(&buf[index_offset..index_offset_pos]), begin: 0 }
     }
 
-    fn read_entry(&mut self) -> (usize, usize, usize, Box2d) {
+    fn read_entry(&mut self) -> Entry {
         let end = self.reader.read();
         let extra_end = self.reader.read();
         let bounds = self.reader.read();
-        let ret = (self.begin, end, extra_end, bounds);
+        let ret = Entry { begin: self.begin, end, extra_end, bounds };
         self.begin = extra_end;
         ret
     }
 }
 
 // This is used for writing new blob images.
 // In our case this is the result of merging an old one and a new one
 struct BlobWriter {
@@ -209,86 +217,86 @@ impl From<DeviceUintRect> for Box2d {
     fn from(rect: DeviceUintRect) -> Self {
         Box2d{ x1: rect.min_x(), y1: rect.min_y(), x2: rect.max_x(), y2: rect.max_y() }
     }
 }
 
 fn dump_blob_index(blob: &[u8], dirty_rect: Box2d) {
     let mut index = BlobReader::new(blob);
     while index.reader.has_more() {
-        let (_, _, _, bounds) = index.read_entry();
-        dlog!("  {:?} {}", bounds,
-                 if bounds.contained_by(&dirty_rect) {
+        let e = index.read_entry();
+        dlog!("  {:?} {}", e.bounds,
+                 if e.bounds.contained_by(&dirty_rect) {
                     "*"
                  } else {
                     ""
                  }
         );
     }
 }
 
 fn check_result(result: &[u8]) -> () {
     let mut index = BlobReader::new(result);
     assert!(index.reader.has_more(), "Unexpectedly empty result. This blob should just have been deleted");
     while index.reader.has_more() {
-        let (_, end, extra, bounds) = index.read_entry();
-        dlog!("result bounds: {} {} {:?}", end, extra, bounds);
+        let e = index.read_entry();
+        dlog!("result bounds: {} {} {:?}", e.end, e.extra_end, e.bounds);
     }
 }
 
 /* Merge a new partial blob image into an existing complete blob image.
    All of the items not fully contained in the dirty_rect should match
    in both new and old lists.
    We continue to use the old content for these items.
    Old items contained in the dirty_rect are dropped and new items
    are retained.
 */
-fn merge_blob_images(old: &[u8], new: &[u8], dirty_rect: Box2d) -> Vec<u8> {
+fn merge_blob_images(old_buf: &[u8], new_buf: &[u8], dirty_rect: Box2d) -> Vec<u8> {
 
     let mut result = BlobWriter::new();
     dlog!("dirty rect: {:?}", dirty_rect);
     dlog!("old:");
-    dump_blob_index(old, dirty_rect);
+    dump_blob_index(old_buf, dirty_rect);
     dlog!("new:");
-    dump_blob_index(new, dirty_rect);
+    dump_blob_index(new_buf, dirty_rect);
 
-    let mut old_reader = BlobReader::new(old);
-    let mut new_reader = BlobReader::new(new);
+    let mut old_reader = BlobReader::new(old_buf);
+    let mut new_reader = BlobReader::new(new_buf);
 
     // Loop over both new and old entries merging them.
     // Both new and old must have the same number of entries that
     // overlap but are not contained by the dirty rect, and they
     // must be in the same order.
     while new_reader.reader.has_more() {
-        let (new_begin, new_end, new_extra, new_bounds) = new_reader.read_entry();
-        dlog!("bounds: {} {} {:?}", new_end, new_extra, new_bounds);
-        if new_bounds.contained_by(&dirty_rect) {
-            result.new_entry(new_extra - new_end, new_bounds, &new[new_begin..new_extra]);
+        let new = new_reader.read_entry();
+        dlog!("bounds: {} {} {:?}", new.end, new.extra_end, new.bounds);
+        if new.bounds.contained_by(&dirty_rect) {
+            result.new_entry(new.extra_end - new.end, new.bounds, &new_buf[new.begin..new.extra_end]);
         } else {
             loop {
                 assert!(old_reader.reader.has_more());
-                let (old_begin, old_end, old_extra, old_bounds) = old_reader.read_entry();
-                dlog!("new bounds: {} {} {:?}", old_end, old_extra, old_bounds);
-                if old_bounds.contained_by(&dirty_rect) {
+                let old = old_reader.read_entry();
+                dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
+                if old.bounds.contained_by(&dirty_rect) {
                     // fully contained items will be discarded or replaced
                 } else {
-                    assert_eq!(old_bounds, new_bounds);
+                    assert_eq!(old.bounds, new.bounds);
                     // we found a matching item use the old data
-                    result.new_entry(old_extra - old_end, old_bounds, &old[old_begin..old_extra]);
+                    result.new_entry(old.extra_end - old.end, old.bounds, &old_buf[old.begin..old.extra_end]);
                     break;
                 }
             }
         }
     }
 
     // Include any remaining old items.
     while old_reader.reader.has_more() {
-        let (_, old_end, old_extra, old_bounds) = old_reader.read_entry();
-        dlog!("new bounds: {} {} {:?}", old_end, old_extra, old_bounds);
-        assert!(old_bounds.contained_by(&dirty_rect));
+        let old = old_reader.read_entry();
+        dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds);
+        assert!(old.bounds.contained_by(&dirty_rect));
     }
 
     let result = result.finish();
     check_result(&result);
     result
 }
 
 impl BlobImageRenderer for Moz2dImageRenderer {
@@ -297,17 +305,17 @@ impl BlobImageRenderer for Moz2dImageRen
             let index = BlobReader::new(&data);
             assert!(index.reader.has_more());
         }
         self.blob_commands.insert(key, (Arc::new(data), tiling));
     }
 
     fn update(&mut self, key: ImageKey, data: BlobImageData, dirty_rect: Option<DeviceUintRect>) {
         match self.blob_commands.entry(key) {
-            Entry::Occupied(mut e) => {
+            hash_map::Entry::Occupied(mut e) => {
                 let old_data = &mut e.get_mut().0;
                 *old_data = Arc::new(merge_blob_images(&old_data, &data,
                                                        dirty_rect.unwrap().into()));
             }
             _ => { panic!("missing image key"); }
         }
     }
 
@@ -369,18 +377,18 @@ impl BlobImageRenderer for Moz2dImageRen
                 }
                 resources.get_font_data(key);
             }
         }
         {
             let mut index = BlobReader::new(&commands);
             assert!(index.reader.pos < index.reader.buf.len());
             while index.reader.pos < index.reader.buf.len() {
-                let (_, end, extra_end, _)  = index.read_entry();
-                process_fonts(BufReader::new(&commands[end..extra_end]), resources);
+                let e  = index.read_entry();
+                process_fonts(BufReader::new(&commands[e.end..e.extra_end]), resources);
             }
         }
 
         self.workers.spawn(move || {
             let buf_size = (descriptor.width
                 * descriptor.height
                 * descriptor.format.bytes_per_pixel()) as usize;
             let mut output = vec![0u8; buf_size];
@@ -408,20 +416,20 @@ impl BlobImageRenderer for Moz2dImageRen
 
             tx.send((request, result)).unwrap();
         });
     }
 
     fn resolve(&mut self, request: BlobImageRequest) -> BlobImageResult {
 
         match self.rendered_images.entry(request) {
-            Entry::Vacant(_) => {
+            hash_map::Entry::Vacant(_) => {
                 return Err(BlobImageError::InvalidKey);
             }
-            Entry::Occupied(entry) => {
+            hash_map::Entry::Occupied(entry) => {
                 // None means we haven't yet received the result.
                 if entry.get().is_some() {
                     let result = entry.remove();
                     return result.unwrap();
                 }
             }
         }