Bug 540464. pixman: update to 7862f9b96e8e8456cc60852790c7f244a5e3425e
authorJeff Muizelaar <jmuizelaar@mozilla.com>
Wed, 20 Jan 2010 15:08:05 -0500
changeset 37355 0af6ca3135ca8ccc4c785633c774d22dfaa30f3a
parent 37354 02135cbe7432866204f9887786f1e9c1c040079b
child 37356 76a717c52203af80067a95685da46ebd9d49e8f6
push id11250
push userjmuizelaar@mozilla.com
push dateWed, 20 Jan 2010 20:09:04 +0000
treeherdermozilla-central@0af6ca3135ca [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs540464
milestone1.9.3a1pre
Bug 540464. pixman: update to 7862f9b96e8e8456cc60852790c7f244a5e3425e This is a substantial cleanup of pixman and could break things.
gfx/cairo/libpixman/src/Makefile.in
gfx/cairo/libpixman/src/pixman-access.c
gfx/cairo/libpixman/src/pixman-accessor.h
gfx/cairo/libpixman/src/pixman-arm-neon-asm.S
gfx/cairo/libpixman/src/pixman-arm-neon-asm.h
gfx/cairo/libpixman/src/pixman-arm-neon.c
gfx/cairo/libpixman/src/pixman-arm-simd-asm.c
gfx/cairo/libpixman/src/pixman-arm-simd-asm.h
gfx/cairo/libpixman/src/pixman-arm-simd.c
gfx/cairo/libpixman/src/pixman-bits-image.c
gfx/cairo/libpixman/src/pixman-combine32.c
gfx/cairo/libpixman/src/pixman-combine32.h
gfx/cairo/libpixman/src/pixman-combine64.c
gfx/cairo/libpixman/src/pixman-combine64.h
gfx/cairo/libpixman/src/pixman-compiler.h
gfx/cairo/libpixman/src/pixman-conical-gradient.c
gfx/cairo/libpixman/src/pixman-cpu.c
gfx/cairo/libpixman/src/pixman-edge-imp.h
gfx/cairo/libpixman/src/pixman-edge.c
gfx/cairo/libpixman/src/pixman-fast-path.c
gfx/cairo/libpixman/src/pixman-general.c
gfx/cairo/libpixman/src/pixman-gradient-walker.c
gfx/cairo/libpixman/src/pixman-image.c
gfx/cairo/libpixman/src/pixman-implementation.c
gfx/cairo/libpixman/src/pixman-linear-gradient.c
gfx/cairo/libpixman/src/pixman-matrix.c
gfx/cairo/libpixman/src/pixman-mmx.c
gfx/cairo/libpixman/src/pixman-private.h
gfx/cairo/libpixman/src/pixman-radial-gradient.c
gfx/cairo/libpixman/src/pixman-region.c
gfx/cairo/libpixman/src/pixman-region16.c
gfx/cairo/libpixman/src/pixman-region32.c
gfx/cairo/libpixman/src/pixman-solid-fill.c
gfx/cairo/libpixman/src/pixman-sse2.c
gfx/cairo/libpixman/src/pixman-timer.c
gfx/cairo/libpixman/src/pixman-trap.c
gfx/cairo/libpixman/src/pixman-utils.c
gfx/cairo/libpixman/src/pixman-version.h
gfx/cairo/libpixman/src/pixman-vmx.c
gfx/cairo/libpixman/src/pixman-wce-arm-simd.asm
gfx/cairo/libpixman/src/pixman-x64-mmx-emulation.h
gfx/cairo/libpixman/src/pixman.c
gfx/cairo/libpixman/src/pixman.h
--- a/gfx/cairo/libpixman/src/Makefile.in
+++ b/gfx/cairo/libpixman/src/Makefile.in
@@ -98,31 +98,35 @@ endif
 endif
 
 endif
 
 
 CSRCS	= \
 	pixman-access.c \
 	pixman-access-accessors.c \
+	pixman-bits-image.c \
+	pixman.c \
 	pixman-combine32.c \
 	pixman-combine64.c \
-	pixman-compose.c \
-	pixman-compose-accessors.c \
-	pixman-compute-region.c \
+	pixman-conical-gradient.c \
+	pixman-cpu.c \
 	pixman-edge.c \
 	pixman-edge-accessors.c \
+	pixman-fast-path.c \
+	pixman-general.c \
+	pixman-gradient-walker.c \
 	pixman-image.c \
+	pixman-implementation.c \
+	pixman-linear-gradient.c \
 	pixman-matrix.c \
-	pixman-pict.c \
+	pixman-radial-gradient.c \
 	pixman-region16.c \
 	pixman-region32.c \
-	pixman-source.c \
-	pixman-transformed.c \
-	pixman-transformed-accessors.c \
+	pixman-solid-fill.c \
 	pixman-trap.c \
 	pixman-utils.c \
 	$(NULL)
 
 ifdef USE_MMX
 CSRCS += pixman-mmx.c
 DEFINES += -DUSE_MMX
 endif
@@ -133,28 +137,29 @@ DEFINES += -DUSE_SSE -DUSE_SSE2
 endif
 
 ifdef USE_VMX
 CSRCS += pixman-vmx.c
 DEFINES += -DUSE_VMX
 endif
 
 ifdef USE_ARM_SIMD_GCC
-CSRCS += pixman-arm-simd.c
+CSRCS += pixman-arm-simd.c pixman-arm-simd-asm.c
 DEFINES += -DUSE_ARM_SIMD
 endif
 
 ifdef USE_ARM_NEON_GCC
 CSRCS += pixman-arm-neon.c
 DEFINES += -DUSE_ARM_NEON
 ARM_NEON_CFLAGS = -mfloat-abi=softfp -mfpu=neon
 endif
 
 ifdef USE_ARM_SIMD_MSVC
 ASFILES += pixman-arm-detect-win32.asm pixman-wce-arm-simd.asm
+CSRCS += pixman-arm-simd.c
 DEFINES += -DUSE_ARM_SIMD
 AS_DASH_C_FLAG = 
 endif
 
 EXPORTS		= pixman.h pixman-version.h
 
 LOCAL_INCLUDES	+= -I$(srcdir) -I$(srcdir)/../../cairo/src
 
--- a/gfx/cairo/libpixman/src/pixman-access.c
+++ b/gfx/cairo/libpixman/src/pixman-access.c
@@ -28,1959 +28,2869 @@
 #include <config.h>
 #endif
 
 #include <stdlib.h>
 #include <string.h>
 #include <assert.h>
 
 #include "pixman-private.h"
+#include "pixman-accessor.h"
 
-#define Red(x) (((x) >> 16) & 0xff)
-#define Green(x) (((x) >> 8) & 0xff)
-#define Blue(x) ((x) & 0xff)
+#define CONVERT_RGB24_TO_Y15(s)						\
+    (((((s) >> 16) & 0xff) * 153 +					\
+      (((s) >>  8) & 0xff) * 301 +					\
+      (((s)      ) & 0xff) * 58) >> 2)
+
+#define CONVERT_RGB24_TO_RGB15(s)                                       \
+    ((((s) >> 3) & 0x001f) |                                            \
+     (((s) >> 6) & 0x03e0) |                                            \
+     (((s) >> 9) & 0x7c00))
+
+#define RGB15_TO_ENTRY(mif,rgb15)					\
+    ((mif)->ent[rgb15])
+
+#define RGB24_TO_ENTRY(mif,rgb24)					\
+    RGB15_TO_ENTRY (mif,CONVERT_RGB24_TO_RGB15 (rgb24))
+
+#define RGB24_TO_ENTRY_Y(mif,rgb24)					\
+    ((mif)->ent[CONVERT_RGB24_TO_Y15 (rgb24)])
 
 /*
  * YV12 setup and access macros
  */
 
-#define YV12_SETUP(pict) \
-	uint32_t *bits = pict->bits; \
-	int stride = pict->rowstride; \
-	int offset0 = stride < 0 ? \
-		((-stride) >> 1) * ((pict->height - 1) >> 1) - stride : \
-		stride * pict->height; \
-	int offset1 = stride < 0 ? \
-		offset0 + ((-stride) >> 1) * ((pict->height) >> 1) : \
-		offset0 + (offset0 >> 2)
+#define YV12_SETUP(image)                                               \
+    bits_image_t *__bits_image = (bits_image_t *)image;                 \
+    uint32_t *bits = __bits_image->bits;                                \
+    int stride = __bits_image->rowstride;                               \
+    int offset0 = stride < 0 ?                                          \
+    ((-stride) >> 1) * ((__bits_image->height - 1) >> 1) - stride :	\
+    stride * __bits_image->height;					\
+    int offset1 = stride < 0 ?                                          \
+    offset0 + ((-stride) >> 1) * ((__bits_image->height) >> 1) :	\
+	offset0 + (offset0 >> 2)
+
 /* Note no trailing semicolon on the above macro; if it's there, then
- * the typical usage of YV12_SETUP(pict); will have an extra trailing ;
+ * the typical usage of YV12_SETUP(image); will have an extra trailing ;
  * that some compilers will interpret as a statement -- and then any further
  * variable declarations will cause an error.
  */
 
-#define YV12_Y(line)		\
+#define YV12_Y(line)                                                    \
     ((uint8_t *) ((bits) + (stride) * (line)))
 
-#define YV12_U(line)	      \
-    ((uint8_t *) ((bits) + offset1 + \
-		((stride) >> 1) * ((line) >> 1)))
+#define YV12_U(line)                                                    \
+    ((uint8_t *) ((bits) + offset1 +                                    \
+                  ((stride) >> 1) * ((line) >> 1)))
 
-#define YV12_V(line)	      \
-    ((uint8_t *) ((bits) + offset0 + \
-		((stride) >> 1) * ((line) >> 1)))
+#define YV12_V(line)                                                    \
+    ((uint8_t *) ((bits) + offset0 +                                    \
+                  ((stride) >> 1) * ((line) >> 1)))
+
+/********************************** Fetch ************************************/
 
-/*********************************** Fetch ************************************/
-
-static FASTCALL void
-fbFetch_a8r8g8b8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a8r8g8b8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    MEMCPY_WRAPPED(pict,
-                   buffer, (const uint32_t *)bits + x,
-		   width*sizeof(uint32_t));
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    
+    MEMCPY_WRAPPED (image,
+                    buffer, (const uint32_t *)bits + x,
+                    width * sizeof(uint32_t));
 }
 
-static FASTCALL void
-fbFetch_x8r8g8b8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x8r8g8b8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint32_t *pixel = (const uint32_t *)bits + x;
     const uint32_t *end = pixel + width;
-    while (pixel < end) {
-	*buffer++ = READ(pict, pixel++) | 0xff000000;
+    
+    while (pixel < end)
+	*buffer++ = READ (image, pixel++) | 0xff000000;
+}
+
+static void
+fetch_scanline_a8b8g8r8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
+{
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = (uint32_t *)bits + x;
+    const uint32_t *end = pixel + width;
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	
+	*buffer++ = (p & 0xff00ff00)	|
+	    ((p >> 16) & 0xff)		|
+	    ((p & 0xff) << 16);
     }
 }
 
-static FASTCALL void
-fbFetch_a8b8g8r8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x8b8g8r8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint32_t *pixel = (uint32_t *)bits + x;
     const uint32_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t p = READ(pict, pixel++);
-	*buffer++ = (p & 0xff00ff00) |
-	            ((p >> 16) & 0xff) |
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	
+	*buffer++ = 0xff000000		|
+	    (p & 0x0000ff00)		|
+	    ((p >> 16) & 0xff)		|
 	    ((p & 0xff) << 16);
     }
 }
 
-static FASTCALL void
-fbFetch_x8b8g8r8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_b8g8r8a8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = (uint32_t *)bits + x;
+    const uint32_t *end = pixel + width;
+
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+
+	*buffer++ = (((p & 0xff000000) >> 24)	|
+	             ((p & 0x00ff0000) >> 8)	|
+	             ((p & 0x0000ff00) << 8)	|
+	             ((p & 0x000000ff) << 24));
+    }
+}
+
+static void
+fetch_scanline_b8g8r8x8 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
+{
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint32_t *pixel = (uint32_t *)bits + x;
     const uint32_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t p = READ(pict, pixel++);
-	*buffer++ = 0xff000000 |
-	    (p & 0x0000ff00) |
-	    ((p >> 16) & 0xff) |
-	    ((p & 0xff) << 16);
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	
+	*buffer++ = (0xff000000 |
+	             ((p & 0xff000000) >> 24)	|
+	             ((p & 0x00ff0000) >> 8)	|
+	             ((p & 0x0000ff00) << 8));
+    }
+}
+
+/* Expects a uint64_t buffer */
+static void
+fetch_scanline_a2r10g10b10 (pixman_image_t *image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            uint32_t *      b,
+                            const uint32_t *mask,
+                            uint32_t        mask_bits)
+{
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = bits + x;
+    const uint32_t *end = pixel + width;
+    uint64_t *buffer = (uint64_t *)b;
+
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint64_t a = p >> 30;
+	uint64_t r = (p >> 20) & 0x3ff;
+	uint64_t g = (p >> 10) & 0x3ff;
+	uint64_t b = p & 0x3ff;
+
+	r = r << 6 | r >> 4;
+	g = g << 6 | g >> 4;
+	b = b << 6 | b >> 4;
+
+	a <<= 14;
+	a |= a >> 2;
+	a |= a >> 4;
+	a |= a >> 8;
+
+	*buffer++ = a << 48 | r << 32 | g << 16 | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a2b10g10r10 (bits_image_t *pict, int x, int y, int width, uint64_t *buffer)
+/* Expects a uint64_t buffer */
+static void
+fetch_scanline_x2r10g10b10 (pixman_image_t *image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            uint32_t *      b,
+                            const uint32_t *mask,
+                            uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const uint32_t *pixel = bits + x;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = (uint32_t *)bits + x;
     const uint32_t *end = pixel + width;
-    while (pixel < end) {
-        uint32_t p = READ(pict, pixel++);
-        uint64_t a = p >> 30;
-        uint64_t b = (p >> 20) & 0x3ff;
-        uint64_t g = (p >> 10) & 0x3ff;
-        uint64_t r = p & 0x3ff;
-
-        r = r << 6 | r >> 4;
-        g = g << 6 | g >> 4;
-        b = b << 6 | b >> 4;
-
-        a <<= 62;
-        a |= a >> 2;
-        a |= a >> 4;
-        a |= a >> 8;
-
-        *buffer++ = a << 48 | r << 32 | g << 16 | b;
+    uint64_t *buffer = (uint64_t *)b;
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint64_t r = (p >> 20) & 0x3ff;
+	uint64_t g = (p >> 10) & 0x3ff;
+	uint64_t b = p & 0x3ff;
+	
+	r = r << 6 | r >> 4;
+	g = g << 6 | g >> 4;
+	b = b << 6 | b >> 4;
+	
+	*buffer++ = 0xffffULL << 48 | r << 32 | g << 16 | b;
     }
 }
 
-static FASTCALL void
-fbFetch_x2b10g10r10 (bits_image_t *pict, int x, int y, int width, uint64_t *buffer)
+/* Expects a uint64_t buffer */
+static void
+fetch_scanline_a2b10g10r10 (pixman_image_t *image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            uint32_t *      b,
+                            const uint32_t *mask,
+                            uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const uint32_t *pixel = (uint32_t *)bits + x;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = bits + x;
     const uint32_t *end = pixel + width;
-    while (pixel < end) {
-        uint32_t p = READ(pict, pixel++);
-        uint64_t b = (p >> 20) & 0x3ff;
-        uint64_t g = (p >> 10) & 0x3ff;
-        uint64_t r = p & 0x3ff;
+    uint64_t *buffer = (uint64_t *)b;
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint64_t a = p >> 30;
+	uint64_t b = (p >> 20) & 0x3ff;
+	uint64_t g = (p >> 10) & 0x3ff;
+	uint64_t r = p & 0x3ff;
+	
+	r = r << 6 | r >> 4;
+	g = g << 6 | g >> 4;
+	b = b << 6 | b >> 4;
+	
+	a <<= 14;
+	a |= a >> 2;
+	a |= a >> 4;
+	a |= a >> 8;
 
-        r = r << 6 | r >> 4;
-        g = g << 6 | g >> 4;
-        b = b << 6 | b >> 4;
-
-        *buffer++ = 0xffffULL << 48 | r << 32 | g << 16 | b;
+	*buffer++ = a << 48 | r << 32 | g << 16 | b;
     }
 }
 
-static FASTCALL void
-fbFetch_r8g8b8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+/* Expects a uint64_t buffer */
+static void
+fetch_scanline_x2b10g10r10 (pixman_image_t *image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            uint32_t *      b,
+                            const uint32_t *mask,
+                            uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const uint8_t *pixel = (const uint8_t *)bits + 3*x;
-    const uint8_t *end = pixel + 3*width;
-    while (pixel < end) {
-	uint32_t b = Fetch24(pict, pixel) | 0xff000000;
-	pixel += 3;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint32_t *pixel = (uint32_t *)bits + x;
+    const uint32_t *end = pixel + width;
+    uint64_t *buffer = (uint64_t *)b;
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint64_t b = (p >> 20) & 0x3ff;
+	uint64_t g = (p >> 10) & 0x3ff;
+	uint64_t r = p & 0x3ff;
+	
+	r = r << 6 | r >> 4;
+	g = g << 6 | g >> 4;
+	b = b << 6 | b >> 4;
+	
+	*buffer++ = 0xffffULL << 48 | r << 32 | g << 16 | b;
+    }
+}
+
+static void
+fetch_scanline_r8g8b8 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
+{
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint8_t *pixel = (const uint8_t *)bits + 3 * x;
+    const uint8_t *end = pixel + 3 * width;
+    
+    while (pixel < end)
+    {
+	uint32_t b = 0xff000000;
+	
+#ifdef WORDS_BIGENDIAN
+	b |= (READ (image, pixel++) << 16);
+	b |= (READ (image, pixel++) << 8);
+	b |= (READ (image, pixel++));
+#else
+	b |= (READ (image, pixel++));
+	b |= (READ (image, pixel++) << 8);
+	b |= (READ (image, pixel++) << 16);
+#endif
+	
 	*buffer++ = b;
     }
 }
 
-static FASTCALL void
-fbFetch_b8g8r8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_b8g8r8 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const uint8_t *pixel = (const uint8_t *)bits + 3*x;
-    const uint8_t *end = pixel + 3*width;
-    while (pixel < end) {
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const uint8_t *pixel = (const uint8_t *)bits + 3 * x;
+    const uint8_t *end = pixel + 3 * width;
+    
+    while (pixel < end)
+    {
 	uint32_t b = 0xff000000;
-#if IMAGE_BYTE_ORDER == MSBFirst
-	b |= (READ(pict, pixel++));
-	b |= (READ(pict, pixel++) << 8);
-	b |= (READ(pict, pixel++) << 16);
+#ifdef WORDS_BIGENDIAN
+	b |= (READ (image, pixel++));
+	b |= (READ (image, pixel++) << 8);
+	b |= (READ (image, pixel++) << 16);
 #else
-	b |= (READ(pict, pixel++) << 16);
-	b |= (READ(pict, pixel++) << 8);
-	b |= (READ(pict, pixel++));
+	b |= (READ (image, pixel++) << 16);
+	b |= (READ (image, pixel++) << 8);
+	b |= (READ (image, pixel++));
 #endif
 	*buffer++ = b;
     }
 }
 
-static FASTCALL void
-fbFetch_r5g6b5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_r5g6b5 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t p = READ(pict, pixel++);
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
 	uint32_t r = (((p) << 3) & 0xf8) |
 	    (((p) << 5) & 0xfc00) |
 	    (((p) << 8) & 0xf80000);
+	
 	r |= (r >> 5) & 0x70007;
 	r |= (r >> 6) & 0x300;
+	
 	*buffer++ = 0xff000000 | r;
     }
 }
 
-static FASTCALL void
-fbFetch_b5g6r5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_b5g6r5 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	b = ((p & 0xf800) | ((p & 0xe000) >> 5)) >> 8;
 	g = ((p & 0x07e0) | ((p & 0x0600) >> 6)) << 5;
 	r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a1r5g5b5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a1r5g5b5 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b, a;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b, a;
+	
 	a = (uint32_t) ((uint8_t) (0 - ((p & 0x8000) >> 15))) << 24;
 	r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
 	g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
 	b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
+	
 	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_x1r5g5b5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x1r5g5b5 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	r = ((p & 0x7c00) | ((p & 0x7000) >> 5)) << 9;
 	g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
 	b = ((p & 0x001c) | ((p & 0x001f) << 5)) >> 2;
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a1b5g5r5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a1b5g5r5 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b, a;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    uint32_t r, g, b, a;
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	
 	a = (uint32_t) ((uint8_t) (0 - ((p & 0x8000) >> 15))) << 24;
 	b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
 	g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
 	r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
+	
 	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_x1b5g5r5 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x1b5g5r5 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	b = ((p & 0x7c00) | ((p & 0x7000) >> 5)) >> 7;
 	g = ((p & 0x03e0) | ((p & 0x0380) >> 5)) << 6;
 	r = ((p & 0x001c) | ((p & 0x001f) << 5)) << 14;
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a4r4g4b4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a4r4g4b4 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b, a;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b, a;
+	
 	a = ((p & 0xf000) | ((p & 0xf000) >> 4)) << 16;
 	r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
 	g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
 	b = ((p & 0x000f) | ((p & 0x000f) << 4));
+	
 	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_x4r4g4b4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x4r4g4b4 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	r = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) << 12;
 	g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
 	b = ((p & 0x000f) | ((p & 0x000f) << 4));
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a4b4g4r4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a4b4g4r4 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b, a;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b, a;
+	
 	a = ((p & 0xf000) | ((p & 0xf000) >> 4)) << 16;
 	b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
 	g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
 	r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
+	
 	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_x4b4g4r4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x4b4g4r4 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint16_t *pixel = (const uint16_t *)bits + x;
     const uint16_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	b = ((p & 0x0f00) | ((p & 0x0f00) >> 4)) >> 4;
 	g = ((p & 0x00f0) | ((p & 0x00f0) >> 4)) << 8;
 	r = ((p & 0x000f) | ((p & 0x000f) << 4)) << 16;
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a8 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	*buffer++ = READ(pict, pixel++) << 24;
-    }
+    
+    while (pixel < end)
+	*buffer++ = READ (image, pixel++) << 24;
 }
 
-static FASTCALL void
-fbFetch_r3g3b2 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_r3g3b2 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+	
 	r = ((p & 0xe0) | ((p & 0xe0) >> 3) | ((p & 0xc0) >> 6)) << 16;
 	g = ((p & 0x1c) | ((p & 0x18) >> 3) | ((p & 0x1c) << 3)) << 8;
 	b = (((p & 0x03)     ) |
 	     ((p & 0x03) << 2) |
 	     ((p & 0x03) << 4) |
 	     ((p & 0x03) << 6));
+	
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_b2g3r3 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_b2g3r3 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
+
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t r, g, b;
+
+	b  = p & 0xc0;
+	b |= b >> 2;
+	b |= b >> 4;
+	b &= 0xff;
 
-	b = (((p & 0xc0)     ) |
-	     ((p & 0xc0) >> 2) |
-	     ((p & 0xc0) >> 4) |
-	     ((p & 0xc0) >> 6));
-	g = ((p & 0x38) | ((p & 0x38) >> 3) | ((p & 0x30) << 2)) << 8;
-	r = (((p & 0x07)     ) |
-	     ((p & 0x07) << 3) |
-	     ((p & 0x06) << 6)) << 16;
+	g  = (p & 0x38) << 10;
+	g |= g >> 3;
+	g |= g >> 6;
+	g &= 0xff00;
+
+	r  = (p & 0x7) << 21;
+	r |= r >> 3;
+	r |= r >> 6;
+	r &= 0xff0000;
+
 	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a2r2g2b2 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a2r2g2b2 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t   a,r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t a, r, g, b;
+	
 	a = ((p & 0xc0) * 0x55) << 18;
 	r = ((p & 0x30) * 0x55) << 12;
 	g = ((p & 0x0c) * 0x55) << 6;
 	b = ((p & 0x03) * 0x55);
-	*buffer++ = a|r|g|b;
+	
+	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a2b2g2r2 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a2b2g2r2 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t   a,r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
-
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	uint32_t a, r, g, b;
+	
 	a = ((p & 0xc0) * 0x55) << 18;
-	b = ((p & 0x30) * 0x55) >> 6;
+	b = ((p & 0x30) * 0x55) >> 4;
 	g = ((p & 0x0c) * 0x55) << 6;
 	r = ((p & 0x03) * 0x55) << 16;
-	*buffer++ = a|r|g|b;
+	
+	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_c8 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_c8 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const pixman_indexed_t * indexed = pict->indexed;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const pixman_indexed_t * indexed = image->bits.indexed;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint32_t  p = READ(pict, pixel++);
+    
+    while (pixel < end)
+    {
+	uint32_t p = READ (image, pixel++);
+	
 	*buffer++ = indexed->rgba[p];
     }
 }
 
-static FASTCALL void
-fbFetch_x4a4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_x4a4 (pixman_image_t *image,
+                     int             x,
+                     int             y,
+                     int             width,
+                     uint32_t *      buffer,
+                     const uint32_t *mask,
+                     uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     const uint8_t *pixel = (const uint8_t *)bits + x;
     const uint8_t *end = pixel + width;
-    while (pixel < end) {
-	uint8_t p = READ(pict, pixel++) & 0xf;
+   
+    while (pixel < end)
+    {
+	uint8_t p = READ (image, pixel++) & 0xf;
+
 	*buffer++ = (p | (p << 4)) << 24;
     }
 }
 
-#define Fetch8(img,l,o)    (READ(img, (uint8_t *)(l) + ((o) >> 2)))
-#if IMAGE_BYTE_ORDER == MSBFirst
-#define Fetch4(img,l,o)    ((o) & 2 ? Fetch8(img,l,o) & 0xf : Fetch8(img,l,o) >> 4)
+#define FETCH_8(img,l,o)    (READ (img, (((uint8_t *)(l)) + ((o) >> 3))))
+#ifdef WORDS_BIGENDIAN
+#define FETCH_4(img,l,o)						\
+    (((4 * (o)) & 4) ? (FETCH_8 (img,l, 4 * (o)) & 0xf) : (FETCH_8 (img,l,(4 * (o))) >> 4))
 #else
-#define Fetch4(img,l,o)    ((o) & 2 ? Fetch8(img,l,o) >> 4 : Fetch8(img,l,o) & 0xf)
+#define FETCH_4(img,l,o)						\
+    (((4 * (o)) & 4) ? (FETCH_8 (img, l, 4 * (o)) >> 4) : (FETCH_8 (img, l, (4 * (o))) & 0xf))
 #endif
 
-static FASTCALL void
-fbFetch_a4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a4 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
+
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
 
 	p |= p << 4;
+
 	*buffer++ = p << 24;
     }
 }
 
-static FASTCALL void
-fbFetch_r1g2b1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_r1g2b1 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
-
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
+	uint32_t r, g, b;
+	
 	r = ((p & 0x8) * 0xff) << 13;
 	g = ((p & 0x6) * 0x55) << 7;
 	b = ((p & 0x1) * 0xff);
-	*buffer++ = 0xff000000|r|g|b;
+	
+	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_b1g2r1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_b1g2r1 (pixman_image_t *image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       uint32_t *      buffer,
+                       const uint32_t *mask,
+                       uint32_t        mask_bits)
 {
-    uint32_t  r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
-
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
+	uint32_t r, g, b;
+	
 	b = ((p & 0x8) * 0xff) >> 3;
 	g = ((p & 0x6) * 0x55) << 7;
 	r = ((p & 0x1) * 0xff) << 16;
-	*buffer++ = 0xff000000|r|g|b;
+
+	*buffer++ = 0xff000000 | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a1r1g1b1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a1r1g1b1 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  a,r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    uint32_t a, r, g, b;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
+
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
 
 	a = ((p & 0x8) * 0xff) << 21;
 	r = ((p & 0x4) * 0xff) << 14;
 	g = ((p & 0x2) * 0xff) << 7;
 	b = ((p & 0x1) * 0xff);
-	*buffer++ = a|r|g|b;
+
+	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_a1b1g1r1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a1b1g1r1 (pixman_image_t *image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         uint32_t *      buffer,
+                         const uint32_t *mask,
+                         uint32_t        mask_bits)
 {
-    uint32_t  a,r,g,b;
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
+
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
+	uint32_t a, r, g, b;
 
 	a = ((p & 0x8) * 0xff) << 21;
-	r = ((p & 0x4) * 0xff) >> 3;
+	b = ((p & 0x4) * 0xff) >> 2;
 	g = ((p & 0x2) * 0xff) << 7;
-	b = ((p & 0x1) * 0xff) << 16;
-	*buffer++ = a|r|g|b;
+	r = ((p & 0x1) * 0xff) << 16;
+
+	*buffer++ = a | r | g | b;
     }
 }
 
-static FASTCALL void
-fbFetch_c4 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_c4 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const pixman_indexed_t * indexed = pict->indexed;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const pixman_indexed_t * indexed = image->bits.indexed;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = Fetch4(pict, bits, i + x);
-
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = FETCH_4 (image, bits, i + x);
+	
 	*buffer++ = indexed->rgba[p];
     }
 }
 
-
-static FASTCALL void
-fbFetch_a1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_a1 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  p = READ(pict, bits + ((i + x) >> 5));
-	uint32_t  a;
-#if BITMAP_BIT_ORDER == MSBFirst
-	a = p >> (0x1f - ((i+x) & 0x1f));
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = READ (image, bits + ((i + x) >> 5));
+	uint32_t a;
+	
+#ifdef WORDS_BIGENDIAN
+	a = p >> (0x1f - ((i + x) & 0x1f));
 #else
-	a = p >> ((i+x) & 0x1f);
+	a = p >> ((i + x) & 0x1f);
 #endif
 	a = a & 1;
 	a |= a << 1;
 	a |= a << 2;
 	a |= a << 4;
+	
 	*buffer++ = a << 24;
     }
 }
 
-static FASTCALL void
-fbFetch_g1 (bits_image_t *pict, int x, int y, int width, uint32_t *buffer)
+static void
+fetch_scanline_g1 (pixman_image_t *image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   uint32_t *      buffer,
+                   const uint32_t *mask,
+                   uint32_t        mask_bits)
 {
-    const uint32_t *bits = pict->bits + y*pict->rowstride;
-    const pixman_indexed_t * indexed = pict->indexed;
+    const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
+    const pixman_indexed_t * indexed = image->bits.indexed;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t p = READ(pict, bits + ((i+x) >> 5));
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t p = READ (image, bits + ((i + x) >> 5));
 	uint32_t a;
-#if BITMAP_BIT_ORDER == MSBFirst
-	a = p >> (0x1f - ((i+x) & 0x1f));
+	
+#ifdef WORDS_BIGENDIAN
+	a = p >> (0x1f - ((i + x) & 0x1f));
 #else
-	a = p >> ((i+x) & 0x1f);
+	a = p >> ((i + x) & 0x1f);
 #endif
 	a = a & 1;
+	
 	*buffer++ = indexed->rgba[a];
     }
 }
 
-static FASTCALL void
-fbFetch_yuy2 (bits_image_t *pict, int x, int line, int width, uint32_t *buffer)
+static void
+fetch_scanline_yuy2 (pixman_image_t *image,
+                     int             x,
+                     int             line,
+                     int             width,
+                     uint32_t *      buffer,
+                     const uint32_t *mask,
+                     uint32_t        mask_bits)
 {
-    int16_t y, u, v;
-    int32_t r, g, b;
-    int   i;
-
-    const uint32_t *bits = pict->bits + pict->rowstride * line;
-
+    const uint32_t *bits = image->bits.bits + image->bits.rowstride * line;
+    int i;
+    
     for (i = 0; i < width; i++)
     {
+	int16_t y, u, v;
+	int32_t r, g, b;
+	
 	y = ((uint8_t *) bits)[(x + i) << 1] - 16;
-	u = ((uint8_t *) bits)[(((x + i) << 1) & -4) + 1] - 128;
-	v = ((uint8_t *) bits)[(((x + i) << 1) & -4) + 3] - 128;
-
+	u = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 1] - 128;
+	v = ((uint8_t *) bits)[(((x + i) << 1) & - 4) + 3] - 128;
+	
 	/* R = 1.164(Y - 16) + 1.596(V - 128) */
 	r = 0x012b27 * y + 0x019a2e * v;
 	/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
 	g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
 	/* B = 1.164(Y - 16) + 2.018(U - 128) */
 	b = 0x012b27 * y + 0x0206a2 * u;
-
-    WRITE(pict, buffer++, 0xff000000 |
-	(r >= 0 ? r < 0x1000000 ? r         & 0xff0000 : 0xff0000 : 0) |
-	(g >= 0 ? g < 0x1000000 ? (g >> 8)  & 0x00ff00 : 0x00ff00 : 0) |
-	(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0));
+	
+	*buffer++ = 0xff000000 |
+	    (r >= 0 ? r < 0x1000000 ? r         & 0xff0000 : 0xff0000 : 0) |
+	    (g >= 0 ? g < 0x1000000 ? (g >> 8)  & 0x00ff00 : 0x00ff00 : 0) |
+	    (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
     }
 }
 
-static FASTCALL void
-fbFetch_yv12 (bits_image_t *pict, int x, int line, int width, uint32_t *buffer)
+static void
+fetch_scanline_yv12 (pixman_image_t *image,
+                     int             x,
+                     int             line,
+                     int             width,
+                     uint32_t *      buffer,
+                     const uint32_t *mask,
+                     uint32_t        mask_bits)
 {
-    YV12_SETUP(pict);
-    uint8_t *pY = YV12_Y (line);
-    uint8_t *pU = YV12_U (line);
-    uint8_t *pV = YV12_V (line);
-    int16_t y, u, v;
-    int32_t r, g, b;
-    int   i;
-
+    YV12_SETUP (image);
+    uint8_t *y_line = YV12_Y (line);
+    uint8_t *u_line = YV12_U (line);
+    uint8_t *v_line = YV12_V (line);
+    int i;
+    
     for (i = 0; i < width; i++)
     {
-	y = pY[x + i] - 16;
-	u = pU[(x + i) >> 1] - 128;
-	v = pV[(x + i) >> 1] - 128;
+	int16_t y, u, v;
+	int32_t r, g, b;
+
+	y = y_line[x + i] - 16;
+	u = u_line[(x + i) >> 1] - 128;
+	v = v_line[(x + i) >> 1] - 128;
 
 	/* R = 1.164(Y - 16) + 1.596(V - 128) */
 	r = 0x012b27 * y + 0x019a2e * v;
 	/* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
 	g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
 	/* B = 1.164(Y - 16) + 2.018(U - 128) */
 	b = 0x012b27 * y + 0x0206a2 * u;
 
-	WRITE(pict, buffer++, 0xff000000 |
+	*buffer++ = 0xff000000 |
 	    (r >= 0 ? r < 0x1000000 ? r         & 0xff0000 : 0xff0000 : 0) |
 	    (g >= 0 ? g < 0x1000000 ? (g >> 8)  & 0x00ff00 : 0x00ff00 : 0) |
-	    (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0));
-    }
-}
-
-fetchProc32 ACCESS(pixman_fetchProcForPicture32) (bits_image_t * pict)
-{
-    switch(pict->format) {
-    case PIXMAN_a8r8g8b8: return fbFetch_a8r8g8b8;
-    case PIXMAN_x8r8g8b8: return fbFetch_x8r8g8b8;
-    case PIXMAN_a8b8g8r8: return fbFetch_a8b8g8r8;
-    case PIXMAN_x8b8g8r8: return fbFetch_x8b8g8r8;
-    /* These two require wide compositing */
-    case PIXMAN_a2b10g10r10: return NULL;
-    case PIXMAN_x2b10g10r10: return NULL;
-
-        /* 24bpp formats */
-    case PIXMAN_r8g8b8: return fbFetch_r8g8b8;
-    case PIXMAN_b8g8r8: return fbFetch_b8g8r8;
-
-        /* 16bpp formats */
-    case PIXMAN_r5g6b5: return fbFetch_r5g6b5;
-    case PIXMAN_b5g6r5: return fbFetch_b5g6r5;
-
-    case PIXMAN_a1r5g5b5: return fbFetch_a1r5g5b5;
-    case PIXMAN_x1r5g5b5: return fbFetch_x1r5g5b5;
-    case PIXMAN_a1b5g5r5: return fbFetch_a1b5g5r5;
-    case PIXMAN_x1b5g5r5: return fbFetch_x1b5g5r5;
-    case PIXMAN_a4r4g4b4: return fbFetch_a4r4g4b4;
-    case PIXMAN_x4r4g4b4: return fbFetch_x4r4g4b4;
-    case PIXMAN_a4b4g4r4: return fbFetch_a4b4g4r4;
-    case PIXMAN_x4b4g4r4: return fbFetch_x4b4g4r4;
-
-        /* 8bpp formats */
-    case PIXMAN_a8: return  fbFetch_a8;
-    case PIXMAN_r3g3b2: return fbFetch_r3g3b2;
-    case PIXMAN_b2g3r3: return fbFetch_b2g3r3;
-    case PIXMAN_a2r2g2b2: return fbFetch_a2r2g2b2;
-    case PIXMAN_a2b2g2r2: return fbFetch_a2b2g2r2;
-    case PIXMAN_c8: return  fbFetch_c8;
-    case PIXMAN_g8: return  fbFetch_c8;
-    case PIXMAN_x4a4: return fbFetch_x4a4;
-
-        /* 4bpp formats */
-    case PIXMAN_a4: return  fbFetch_a4;
-    case PIXMAN_r1g2b1: return fbFetch_r1g2b1;
-    case PIXMAN_b1g2r1: return fbFetch_b1g2r1;
-    case PIXMAN_a1r1g1b1: return fbFetch_a1r1g1b1;
-    case PIXMAN_a1b1g1r1: return fbFetch_a1b1g1r1;
-    case PIXMAN_c4: return  fbFetch_c4;
-    case PIXMAN_g4: return  fbFetch_c4;
-
-        /* 1bpp formats */
-    case PIXMAN_a1: return  fbFetch_a1;
-    case PIXMAN_g1: return  fbFetch_g1;
-
-        /* YUV formats */
-    case PIXMAN_yuy2: return fbFetch_yuy2;
-    case PIXMAN_yv12: return fbFetch_yv12;
-    }
-
-    return NULL;
-}
-
-static FASTCALL void
-fbFetch64_generic (bits_image_t *pict, int x, int y, int width, uint64_t *buffer)
-{
-    fetchProc32 fetch32 = ACCESS(pixman_fetchProcForPicture32) (pict);
-
-    // Fetch the pixels into the first half of buffer and then expand them in
-    // place.
-    fetch32(pict, x, y, width, (uint32_t*)buffer);
-    pixman_expand(buffer, (uint32_t*)buffer, pict->format, width);
-}
-
-fetchProc64 ACCESS(pixman_fetchProcForPicture64) (bits_image_t * pict)
-{
-    switch(pict->format) {
-    case PIXMAN_a2b10g10r10: return fbFetch_a2b10g10r10;
-    case PIXMAN_x2b10g10r10: return fbFetch_x2b10g10r10;
-    default: return fbFetch64_generic;
+	    (b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
     }
 }
 
 /**************************** Pixel wise fetching *****************************/
 
-static FASTCALL uint64_t
-fbFetchPixel_a2b10g10r10 (bits_image_t *pict, int offset, int line)
+/* Despite the type, expects a uint64_t buffer */
+static uint64_t
+fetch_pixel_a2r10g10b10 (bits_image_t *image,
+			 int		  offset,
+			 int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t p = READ(pict, bits + offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t p = READ (image, bits + offset);
     uint64_t a = p >> 30;
-    uint64_t b = (p >> 20) & 0x3ff;
+    uint64_t r = (p >> 20) & 0x3ff;
     uint64_t g = (p >> 10) & 0x3ff;
-    uint64_t r = p & 0x3ff;
+    uint64_t b = p & 0x3ff;
 
     r = r << 6 | r >> 4;
     g = g << 6 | g >> 4;
     b = b << 6 | b >> 4;
 
-    a <<= 62;
+    a <<= 14;
     a |= a >> 2;
     a |= a >> 4;
     a |= a >> 8;
 
     return a << 48 | r << 32 | g << 16 | b;
 }
 
-static FASTCALL uint64_t
-fbFetchPixel_x2b10g10r10 (bits_image_t *pict, int offset, int line)
+/* Despite the type, this function expects a uint64_t buffer */
+static uint64_t
+fetch_pixel_x2r10g10b10 (bits_image_t *image,
+			 int	   offset,
+			 int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t p = READ(pict, bits + offset);
-    uint64_t b = (p >> 20) & 0x3ff;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t p = READ (image, bits + offset);
+    uint64_t r = (p >> 20) & 0x3ff;
     uint64_t g = (p >> 10) & 0x3ff;
-    uint64_t r = p & 0x3ff;
-
+    uint64_t b = p & 0x3ff;
+    
     r = r << 6 | r >> 4;
     g = g << 6 | g >> 4;
     b = b << 6 | b >> 4;
-
+    
     return 0xffffULL << 48 | r << 32 | g << 16 | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a8r8g8b8 (bits_image_t *pict, int offset, int line)
+/* Despite the type, expects a uint64_t buffer */
+static uint64_t
+fetch_pixel_a2b10g10r10 (bits_image_t *image,
+			 int           offset,
+			 int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    return READ(pict, (uint32_t *)bits + offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t p = READ (image, bits + offset);
+    uint64_t a = p >> 30;
+    uint64_t b = (p >> 20) & 0x3ff;
+    uint64_t g = (p >> 10) & 0x3ff;
+    uint64_t r = p & 0x3ff;
+    
+    r = r << 6 | r >> 4;
+    g = g << 6 | g >> 4;
+    b = b << 6 | b >> 4;
+    
+    a <<= 14;
+    a |= a >> 2;
+    a |= a >> 4;
+    a |= a >> 8;
+    
+    return a << 48 | r << 32 | g << 16 | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x8r8g8b8 (bits_image_t *pict, int offset, int line)
+/* Despite the type, this function expects a uint64_t buffer */
+static uint64_t
+fetch_pixel_x2b10g10r10 (bits_image_t *image,
+			 int           offset,
+			 int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    return READ(pict, (uint32_t *)bits + offset) | 0xff000000;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t p = READ (image, bits + offset);
+    uint64_t b = (p >> 20) & 0x3ff;
+    uint64_t g = (p >> 10) & 0x3ff;
+    uint64_t r = p & 0x3ff;
+    
+    r = r << 6 | r >> 4;
+    g = g << 6 | g >> 4;
+    b = b << 6 | b >> 4;
+    
+    return 0xffffULL << 48 | r << 32 | g << 16 | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a8b8g8r8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a8r8g8b8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
+{
+    uint32_t *bits = image->bits + line * image->rowstride;
+    return READ (image, (uint32_t *)bits + offset);
+}
+
+static uint32_t
+fetch_pixel_x8r8g8b8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint32_t *)bits + offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+
+    return READ (image, (uint32_t *)bits + offset) | 0xff000000;
+}
 
+static uint32_t
+fetch_pixel_a8b8g8r8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
+{
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint32_t *)bits + offset);
+    
     return ((pixel & 0xff000000) |
 	    ((pixel >> 16) & 0xff) |
 	    (pixel & 0x0000ff00) |
 	    ((pixel & 0xff) << 16));
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x8b8g8r8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x8b8g8r8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint32_t *)bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint32_t *)bits + offset);
+    
     return ((0xff000000) |
 	    ((pixel >> 16) & 0xff) |
 	    (pixel & 0x0000ff00) |
 	    ((pixel & 0xff) << 16));
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_r8g8b8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_b8g8r8a8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
+{
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint32_t *)bits + offset);
+    
+    return ((pixel & 0xff000000) >> 24 |
+	    (pixel & 0x00ff0000) >> 8 |
+	    (pixel & 0x0000ff00) << 8 |
+	    (pixel & 0x000000ff) << 24);
+}
+
+static uint32_t
+fetch_pixel_b8g8r8x8 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint8_t   *pixel = ((uint8_t *) bits) + (offset*3);
-#if IMAGE_BYTE_ORDER == MSBFirst
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint32_t *)bits + offset);
+    
+    return ((0xff000000) |
+	    (pixel & 0xff000000) >> 24 |
+	    (pixel & 0x00ff0000) >> 8 |
+	    (pixel & 0x0000ff00) << 8);
+}
+
+static uint32_t
+fetch_pixel_r8g8b8 (bits_image_t *image,
+		    int           offset,
+		    int           line)
+{
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint8_t   *pixel = ((uint8_t *) bits) + (offset * 3);
+    
+#ifdef WORDS_BIGENDIAN
     return (0xff000000 |
-	    (READ(pict, pixel + 0) << 16) |
-	    (READ(pict, pixel + 1) << 8) |
-	    (READ(pict, pixel + 2)));
+	    (READ (image, pixel + 0) << 16) |
+	    (READ (image, pixel + 1) << 8) |
+	    (READ (image, pixel + 2)));
 #else
     return (0xff000000 |
-	    (READ(pict, pixel + 2) << 16) |
-	    (READ(pict, pixel + 1) << 8) |
-	    (READ(pict, pixel + 0)));
+	    (READ (image, pixel + 2) << 16) |
+	    (READ (image, pixel + 1) << 8) |
+	    (READ (image, pixel + 0)));
 #endif
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_b8g8r8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_b8g8r8 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint8_t   *pixel = ((uint8_t *) bits) + (offset*3);
-#if IMAGE_BYTE_ORDER == MSBFirst
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint8_t   *pixel = ((uint8_t *) bits) + (offset * 3);
+#ifdef WORDS_BIGENDIAN
     return (0xff000000 |
-	    (READ(pict, pixel + 2) << 16) |
-	    (READ(pict, pixel + 1) << 8) |
-	    (READ(pict, pixel + 0)));
+	    (READ (image, pixel + 2) << 16) |
+	    (READ (image, pixel + 1) << 8) |
+	    (READ (image, pixel + 0)));
 #else
     return (0xff000000 |
-	    (READ(pict, pixel + 0) << 16) |
-	    (READ(pict, pixel + 1) << 8) |
-	    (READ(pict, pixel + 2)));
+	    (READ (image, pixel + 0) << 16) |
+	    (READ (image, pixel + 1) << 8) |
+	    (READ (image, pixel + 2)));
 #endif
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_r5g6b5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_r5g6b5 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t r, g, b;
+    
     r = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) << 8;
     g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
     b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_b5g6r5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_b5g6r5 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t r, g, b;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    
     b = ((pixel & 0xf800) | ((pixel & 0xe000) >> 5)) >> 8;
     g = ((pixel & 0x07e0) | ((pixel & 0x0600) >> 6)) << 5;
     r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a1r5g5b5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a1r5g5b5 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
     r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
     g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
     b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+    
     return (a | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x1r5g5b5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x1r5g5b5 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t r, g, b;
+    
     r = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) << 9;
     g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
     b = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) >> 2;
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a1b5g5r5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a1b5g5r5 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = (uint32_t) ((uint8_t) (0 - ((pixel & 0x8000) >> 15))) << 24;
     b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
     g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
     r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+    
     return (a | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x1b5g5r5 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x1b5g5r5 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t r, g, b;
+    
     b = ((pixel & 0x7c00) | ((pixel & 0x7000) >> 5)) >> 7;
     g = ((pixel & 0x03e0) | ((pixel & 0x0380) >> 5)) << 6;
     r = ((pixel & 0x001c) | ((pixel & 0x001f) << 5)) << 14;
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a4r4g4b4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a4r4g4b4 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
     r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
     g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
     b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
+    
     return (a | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x4r4g4b4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x4r4g4b4 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t r, g, b;
+    
     r = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) << 12;
     g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
     b = ((pixel & 0x000f) | ((pixel & 0x000f) << 4));
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a4b4g4r4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a4b4g4r4 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = ((pixel & 0xf000) | ((pixel & 0xf000) >> 4)) << 16;
     b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
     g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
     r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
+    
     return (a | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x4b4g4r4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x4b4g4r4 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, (uint16_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint16_t *) bits + offset);
+    uint32_t r, g, b;
+    
     b = ((pixel & 0x0f00) | ((pixel & 0x0f00) >> 4)) >> 4;
     g = ((pixel & 0x00f0) | ((pixel & 0x00f0) >> 4)) << 8;
     r = ((pixel & 0x000f) | ((pixel & 0x000f) << 4)) << 16;
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a8 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    
     return pixel << 24;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_r3g3b2 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_r3g3b2 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-
-    r = ((pixel & 0xe0) | ((pixel & 0xe0) >> 3) | ((pixel & 0xc0) >> 6)) << 16;
-    g = ((pixel & 0x1c) | ((pixel & 0x18) >> 3) | ((pixel & 0x1c) << 3)) << 8;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    uint32_t r, g, b;
+    
+    r = ((pixel & 0xe0) |
+	 ((pixel & 0xe0) >> 3) |
+	 ((pixel & 0xc0) >> 6)) << 16;
+    
+    g = ((pixel & 0x1c) |
+	 ((pixel & 0x18) >> 3) |
+	 ((pixel & 0x1c) << 3)) << 8;
+    
     b = (((pixel & 0x03)     ) |
 	 ((pixel & 0x03) << 2) |
 	 ((pixel & 0x03) << 4) |
 	 ((pixel & 0x03) << 6));
+    
     return (0xff000000 | r | g | b);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_b2g3r3 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_b2g3r3 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t p = READ (image, (uint8_t *) bits + offset);
+    uint32_t r, g, b;
 
-    b = (((pixel & 0xc0)     ) |
-	 ((pixel & 0xc0) >> 2) |
-	 ((pixel & 0xc0) >> 4) |
-	 ((pixel & 0xc0) >> 6));
-    g = ((pixel & 0x38) | ((pixel & 0x38) >> 3) | ((pixel & 0x30) << 2)) << 8;
-    r = (((pixel & 0x07)     ) |
-	 ((pixel & 0x07) << 3) |
-	 ((pixel & 0x06) << 6)) << 16;
-    return (0xff000000 | r | g | b);
+    b  = p & 0xc0;
+    b |= b >> 2;
+    b |= b >> 4;
+    b &= 0xff;
+
+    g  = (p & 0x38) << 10;
+    g |= g >> 3;
+    g |= g >> 6;
+    g &= 0xff00;
+
+    r  = (p & 0x7) << 21;
+    r |= r >> 3;
+    r |= r >> 6;
+    r &= 0xff0000;
+
+    return 0xff000000 | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a2r2g2b2 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a2r2g2b2 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t   a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = ((pixel & 0xc0) * 0x55) << 18;
     r = ((pixel & 0x30) * 0x55) << 12;
     g = ((pixel & 0x0c) * 0x55) << 6;
     b = ((pixel & 0x03) * 0x55);
-    return a|r|g|b;
+    
+    return a | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a2b2g2r2 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a2b2g2r2 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t   a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    uint32_t a, r, g, b;
+    
     a = ((pixel & 0xc0) * 0x55) << 18;
-    b = ((pixel & 0x30) * 0x55) >> 6;
+    b = ((pixel & 0x30) * 0x55) >> 4;
     g = ((pixel & 0x0c) * 0x55) << 6;
     r = ((pixel & 0x03) * 0x55) << 16;
-    return a|r|g|b;
+    
+    return a | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_c8 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_c8 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-    const pixman_indexed_t * indexed = pict->indexed;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    const pixman_indexed_t * indexed = image->indexed;
+    
     return indexed->rgba[pixel];
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_x4a4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_x4a4 (bits_image_t *image,
+		  int           offset,
+		  int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t   pixel = READ(pict, (uint8_t *) bits + offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, (uint8_t *) bits + offset);
+    
     return ((pixel & 0xf) | ((pixel & 0xf) << 4)) << 24;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a4 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    
     pixel |= pixel << 4;
     return pixel << 24;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_r1g2b1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_r1g2b1 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    uint32_t r, g, b;
+    
     r = ((pixel & 0x8) * 0xff) << 13;
     g = ((pixel & 0x6) * 0x55) << 7;
     b = ((pixel & 0x1) * 0xff);
-    return 0xff000000|r|g|b;
+    
+    return 0xff000000 | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_b1g2r1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_b1g2r1 (bits_image_t *image,
+		    int           offset,
+		    int           line)
 {
-    uint32_t  r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
-
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    uint32_t r, g, b;
+    
     b = ((pixel & 0x8) * 0xff) >> 3;
     g = ((pixel & 0x6) * 0x55) << 7;
     r = ((pixel & 0x1) * 0xff) << 16;
-    return 0xff000000|r|g|b;
+    
+    return 0xff000000 | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a1r1g1b1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a1r1g1b1 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    uint32_t a, r, g, b;
 
     a = ((pixel & 0x8) * 0xff) << 21;
     r = ((pixel & 0x4) * 0xff) << 14;
     g = ((pixel & 0x2) * 0xff) << 7;
     b = ((pixel & 0x1) * 0xff);
-    return a|r|g|b;
+
+    return a | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_a1b1g1r1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a1b1g1r1 (bits_image_t *image,
+		      int           offset,
+		      int           line)
 {
-    uint32_t  a,r,g,b;
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    uint32_t a, r, g, b;
 
     a = ((pixel & 0x8) * 0xff) << 21;
-    r = ((pixel & 0x4) * 0xff) >> 3;
+    b = ((pixel & 0x4) * 0xff) >> 2;
     g = ((pixel & 0x2) * 0xff) << 7;
-    b = ((pixel & 0x1) * 0xff) << 16;
-    return a|r|g|b;
+    r = ((pixel & 0x1) * 0xff) << 16;
+
+    return a | r | g | b;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_c4 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_c4 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = Fetch4(pict, bits, offset);
-    const pixman_indexed_t * indexed = pict->indexed;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = FETCH_4 (image, bits, offset);
+    const pixman_indexed_t * indexed = image->indexed;
 
     return indexed->rgba[pixel];
 }
 
-
-static FASTCALL uint32_t
-fbFetchPixel_a1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_a1 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t  pixel = READ(pict, bits + (offset >> 5));
-    uint32_t  a;
-#if BITMAP_BIT_ORDER == MSBFirst
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, bits + (offset >> 5));
+    uint32_t a;
+    
+#ifdef WORDS_BIGENDIAN
     a = pixel >> (0x1f - (offset & 0x1f));
 #else
     a = pixel >> (offset & 0x1f);
 #endif
     a = a & 1;
     a |= a << 1;
     a |= a << 2;
     a |= a << 4;
+    
     return a << 24;
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_g1 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_g1 (bits_image_t *image,
+		int           offset,
+		int           line)
 {
-    uint32_t *bits = pict->bits + line*pict->rowstride;
-    uint32_t pixel = READ(pict, bits + (offset >> 5));
-    const pixman_indexed_t * indexed = pict->indexed;
+    uint32_t *bits = image->bits + line * image->rowstride;
+    uint32_t pixel = READ (image, bits + (offset >> 5));
+    const pixman_indexed_t * indexed = image->indexed;
     uint32_t a;
-#if BITMAP_BIT_ORDER == MSBFirst
+    
+#ifdef WORDS_BIGENDIAN
     a = pixel >> (0x1f - (offset & 0x1f));
 #else
     a = pixel >> (offset & 0x1f);
 #endif
     a = a & 1;
+    
     return indexed->rgba[a];
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_yuy2 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_yuy2 (bits_image_t *image,
+		  int           offset,
+		  int           line)
 {
+    const uint32_t *bits = image->bits + image->rowstride * line;
+    
     int16_t y, u, v;
     int32_t r, g, b;
-
-    const uint32_t *bits = pict->bits + pict->rowstride * line;
-
+    
     y = ((uint8_t *) bits)[offset << 1] - 16;
-    u = ((uint8_t *) bits)[((offset << 1) & -4) + 1] - 128;
-    v = ((uint8_t *) bits)[((offset << 1) & -4) + 3] - 128;
-
+    u = ((uint8_t *) bits)[((offset << 1) & - 4) + 1] - 128;
+    v = ((uint8_t *) bits)[((offset << 1) & - 4) + 3] - 128;
+    
     /* R = 1.164(Y - 16) + 1.596(V - 128) */
     r = 0x012b27 * y + 0x019a2e * v;
+    
     /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
     g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
+    
     /* B = 1.164(Y - 16) + 2.018(U - 128) */
     b = 0x012b27 * y + 0x0206a2 * u;
-
+    
     return 0xff000000 |
 	(r >= 0 ? r < 0x1000000 ? r         & 0xff0000 : 0xff0000 : 0) |
 	(g >= 0 ? g < 0x1000000 ? (g >> 8)  & 0x00ff00 : 0x00ff00 : 0) |
 	(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
 }
 
-static FASTCALL uint32_t
-fbFetchPixel_yv12 (bits_image_t *pict, int offset, int line)
+static uint32_t
+fetch_pixel_yv12 (bits_image_t *image,
+		  int           offset,
+		  int           line)
 {
-    YV12_SETUP(pict);
+    YV12_SETUP (image);
     int16_t y = YV12_Y (line)[offset] - 16;
     int16_t u = YV12_U (line)[offset >> 1] - 128;
     int16_t v = YV12_V (line)[offset >> 1] - 128;
     int32_t r, g, b;
-
+    
     /* R = 1.164(Y - 16) + 1.596(V - 128) */
     r = 0x012b27 * y + 0x019a2e * v;
+    
     /* G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) */
     g = 0x012b27 * y - 0x00d0f2 * v - 0x00647e * u;
+    
     /* B = 1.164(Y - 16) + 2.018(U - 128) */
     b = 0x012b27 * y + 0x0206a2 * u;
-
+    
     return 0xff000000 |
 	(r >= 0 ? r < 0x1000000 ? r         & 0xff0000 : 0xff0000 : 0) |
 	(g >= 0 ? g < 0x1000000 ? (g >> 8)  & 0x00ff00 : 0x00ff00 : 0) |
 	(b >= 0 ? b < 0x1000000 ? (b >> 16) & 0x0000ff : 0x0000ff : 0);
 }
 
-/*
- * XXX: The transformed fetch path only works at 32-bpp so far.  When all paths
- * have wide versions, this can be removed.
- *
- * WARNING: This function loses precision!
- */
-static FASTCALL uint32_t
-fbFetchPixel32_generic_lossy (bits_image_t *pict, int offset, int line)
+/*********************************** Store ************************************/
+
+#define SPLIT_A(v)              \
+    uint32_t a = ((v) >> 24),   \
+	r = ((v) >> 16) & 0xff, \
+	g = ((v) >> 8) & 0xff,  \
+	b = (v) & 0xff
+
+#define SPLIT(v)                     \
+    uint32_t r = ((v) >> 16) & 0xff, \
+	g = ((v) >> 8) & 0xff,       \
+	b = (v) & 0xff
+
+static void
+store_scanline_a2r10g10b10 (bits_image_t *  image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            const uint32_t *v)
 {
-    fetchPixelProc64 fetchPixel64 = ACCESS(pixman_fetchPixelProcForPicture64) (pict);
-    const uint64_t argb16Pixel = fetchPixel64(pict, offset, line);
-    uint32_t argb8Pixel;
-
-    pixman_contract(&argb8Pixel, &argb16Pixel, 1);
-
-    return argb8Pixel;
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = bits + x;
+    uint64_t *values = (uint64_t *)v;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >> 32) & 0xc0000000) |
+	       ((values[i] >> 18) & 0x3ff00000) |
+	       ((values[i] >> 12) & 0xffc00) | 
+	       ((values[i] >> 6) & 0x3ff));    
+    }
 }
 
-fetchPixelProc32 ACCESS(pixman_fetchPixelProcForPicture32) (bits_image_t * pict)
+static void
+store_scanline_x2r10g10b10 (bits_image_t *  image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            const uint32_t *v)
 {
-    switch(pict->format) {
-    case PIXMAN_a8r8g8b8: return fbFetchPixel_a8r8g8b8;
-    case PIXMAN_x8r8g8b8: return fbFetchPixel_x8r8g8b8;
-    case PIXMAN_a8b8g8r8: return fbFetchPixel_a8b8g8r8;
-    case PIXMAN_x8b8g8r8: return fbFetchPixel_x8b8g8r8;
-    /* These two require wide compositing */
-    case PIXMAN_a2b10g10r10: return fbFetchPixel32_generic_lossy;
-    case PIXMAN_x2b10g10r10: return fbFetchPixel32_generic_lossy;
-
-        /* 24bpp formats */
-    case PIXMAN_r8g8b8: return fbFetchPixel_r8g8b8;
-    case PIXMAN_b8g8r8: return fbFetchPixel_b8g8r8;
-
-        /* 16bpp formats */
-    case PIXMAN_r5g6b5: return fbFetchPixel_r5g6b5;
-    case PIXMAN_b5g6r5: return fbFetchPixel_b5g6r5;
-
-    case PIXMAN_a1r5g5b5: return fbFetchPixel_a1r5g5b5;
-    case PIXMAN_x1r5g5b5: return fbFetchPixel_x1r5g5b5;
-    case PIXMAN_a1b5g5r5: return fbFetchPixel_a1b5g5r5;
-    case PIXMAN_x1b5g5r5: return fbFetchPixel_x1b5g5r5;
-    case PIXMAN_a4r4g4b4: return fbFetchPixel_a4r4g4b4;
-    case PIXMAN_x4r4g4b4: return fbFetchPixel_x4r4g4b4;
-    case PIXMAN_a4b4g4r4: return fbFetchPixel_a4b4g4r4;
-    case PIXMAN_x4b4g4r4: return fbFetchPixel_x4b4g4r4;
-
-        /* 8bpp formats */
-    case PIXMAN_a8: return  fbFetchPixel_a8;
-    case PIXMAN_r3g3b2: return fbFetchPixel_r3g3b2;
-    case PIXMAN_b2g3r3: return fbFetchPixel_b2g3r3;
-    case PIXMAN_a2r2g2b2: return fbFetchPixel_a2r2g2b2;
-    case PIXMAN_a2b2g2r2: return fbFetchPixel_a2b2g2r2;
-    case PIXMAN_c8: return  fbFetchPixel_c8;
-    case PIXMAN_g8: return  fbFetchPixel_c8;
-    case PIXMAN_x4a4: return fbFetchPixel_x4a4;
-
-        /* 4bpp formats */
-    case PIXMAN_a4: return  fbFetchPixel_a4;
-    case PIXMAN_r1g2b1: return fbFetchPixel_r1g2b1;
-    case PIXMAN_b1g2r1: return fbFetchPixel_b1g2r1;
-    case PIXMAN_a1r1g1b1: return fbFetchPixel_a1r1g1b1;
-    case PIXMAN_a1b1g1r1: return fbFetchPixel_a1b1g1r1;
-    case PIXMAN_c4: return  fbFetchPixel_c4;
-    case PIXMAN_g4: return  fbFetchPixel_c4;
-
-        /* 1bpp formats */
-    case PIXMAN_a1: return  fbFetchPixel_a1;
-    case PIXMAN_g1: return  fbFetchPixel_g1;
-
-        /* YUV formats */
-    case PIXMAN_yuy2: return fbFetchPixel_yuy2;
-    case PIXMAN_yv12: return fbFetchPixel_yv12;
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint64_t *values = (uint64_t *)v;
+    uint32_t *pixel = bits + x;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >> 18) & 0x3ff00000) | 
+	       ((values[i] >> 12) & 0xffc00) |
+	       ((values[i] >> 6) & 0x3ff));
     }
-
-    return NULL;
 }
 
-static FASTCALL uint64_t
-fbFetchPixel64_generic (bits_image_t *pict, int offset, int line)
+static void
+store_scanline_a2b10g10r10 (bits_image_t *  image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            const uint32_t *v)
 {
-    fetchPixelProc32 fetchPixel32 = ACCESS(pixman_fetchPixelProcForPicture32) (pict);
-    uint32_t argb8Pixel = fetchPixel32(pict, offset, line);
-    uint64_t argb16Pixel;
-
-    pixman_expand(&argb16Pixel, &argb8Pixel, pict->format, 1);
-
-    return argb16Pixel;
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = bits + x;
+    uint64_t *values = (uint64_t *)v;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >> 32) & 0xc0000000) |
+	       ((values[i] >> 38) & 0x3ff) |
+	       ((values[i] >> 12) & 0xffc00) |
+	       ((values[i] << 14) & 0x3ff00000));
+    }
 }
 
-fetchPixelProc64 ACCESS(pixman_fetchPixelProcForPicture64) (bits_image_t * pict)
+static void
+store_scanline_x2b10g10r10 (bits_image_t *  image,
+                            int             x,
+                            int             y,
+                            int             width,
+                            const uint32_t *v)
 {
-    switch(pict->format) {
-    case PIXMAN_a2b10g10r10: return fbFetchPixel_a2b10g10r10;
-    case PIXMAN_x2b10g10r10: return fbFetchPixel_x2b10g10r10;
-    default: return fbFetchPixel64_generic;
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint64_t *values = (uint64_t *)v;
+    uint32_t *pixel = bits + x;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >> 38) & 0x3ff) |
+	       ((values[i] >> 12) & 0xffc00) |
+	       ((values[i] << 14) & 0x3ff00000));
     }
 }
 
-/*********************************** Store ************************************/
+static void
+store_scanline_a8r8g8b8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    
+    MEMCPY_WRAPPED (image, ((uint32_t *)bits) + x, values,
+                    width * sizeof(uint32_t));
+}
 
-#define Splita(v)	uint32_t	a = ((v) >> 24), r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
-#define Split(v)	uint32_t	r = ((v) >> 16) & 0xff, g = ((v) >> 8) & 0xff, b = (v) & 0xff
-
-static FASTCALL void
-fbStore_a2b10g10r10 (pixman_image_t *image,
-		     uint32_t *bits, const uint64_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x8r8g8b8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = (uint32_t *)bits + x;
     int i;
-    uint32_t *pixel = bits + x;
-    for (i = 0; i < width; ++i) {
-        WRITE(image, pixel++,
-            ((values[i] >> 32) & 0xc0000000) | // A
-            ((values[i] >> 38) & 0x3ff) |      // R
-            ((values[i] >> 12) & 0xffc00) |    // G
-            ((values[i] << 14) & 0x3ff00000)); // B
+    
+    for (i = 0; i < width; ++i)
+	WRITE (image, pixel++, values[i] & 0xffffff);
+}
+
+static void
+store_scanline_a8b8g8r8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = (uint32_t *)bits + x;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       (values[i] & 0xff00ff00)         |
+	       ((values[i] >> 16) & 0xff)       |
+	       ((values[i] & 0xff) << 16));
     }
 }
 
-static FASTCALL void
-fbStore_x2b10g10r10 (pixman_image_t *image,
-		     uint32_t *bits, const uint64_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x8b8g8r8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = (uint32_t *)bits + x;
     int i;
-    uint32_t *pixel = bits + x;
-    for (i = 0; i < width; ++i) {
-        WRITE(image, pixel++,
-            ((values[i] >> 38) & 0x3ff) |      // R
-            ((values[i] >> 12) & 0xffc00) |    // G
-            ((values[i] << 14) & 0x3ff00000)); // B
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++,
+	       (values[i] & 0x0000ff00)         |
+	       ((values[i] >> 16) & 0xff)       |
+	       ((values[i] & 0xff) << 16));
     }
 }
 
-static FASTCALL void
-fbStore_a8r8g8b8 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_b8g8r8a8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    MEMCPY_WRAPPED(image, ((uint32_t *)bits) + x, values, width*sizeof(uint32_t));
-}
-
-static FASTCALL void
-fbStore_x8r8g8b8 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint32_t *pixel = (uint32_t *)bits + x;
     int i;
-    uint32_t *pixel = (uint32_t *)bits + x;
+    
     for (i = 0; i < width; ++i)
-	WRITE(image, pixel++, values[i] & 0xffffff);
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >> 24) & 0x000000ff) |
+	       ((values[i] >>  8) & 0x0000ff00) |
+	       ((values[i] <<  8) & 0x00ff0000) |
+	       ((values[i] << 24) & 0xff000000));
+    }
 }
 
-static FASTCALL void
-fbStore_a8b8g8r8 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_b8g8r8x8 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint32_t *pixel = (uint32_t *)bits + x;
-    for (i = 0; i < width; ++i)
-	WRITE(image, pixel++, (values[i] & 0xff00ff00) | ((values[i] >> 16) & 0xff) | ((values[i] & 0xff) << 16));
-}
-
-static FASTCALL void
-fbStore_x8b8g8r8 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
     int i;
-    uint32_t *pixel = (uint32_t *)bits + x;
+    
     for (i = 0; i < width; ++i)
-	WRITE(image, pixel++, (values[i] & 0x0000ff00) | ((values[i] >> 16) & 0xff) | ((values[i] & 0xff) << 16));
-}
-
-static FASTCALL void
-fbStore_r8g8b8 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width,
-		const pixman_indexed_t * indexed)
-{
-    int i;
-    uint8_t *pixel = ((uint8_t *) bits) + 3*x;
-    for (i = 0; i < width; ++i) {
-	Store24(image, pixel, values[i]);
-	pixel += 3;
+    {
+	WRITE (image, pixel++,
+	       ((values[i] >>  8) & 0x0000ff00) |
+	       ((values[i] <<  8) & 0x00ff0000) |
+	       ((values[i] << 24) & 0xff000000));
     }
 }
 
-static FASTCALL void
-fbStore_b8g8r8 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_r8g8b8 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t *pixel = ((uint8_t *) bits) + 3 * x;
     int i;
-    uint8_t *pixel = ((uint8_t *) bits) + 3*x;
-    for (i = 0; i < width; ++i) {
+    
+    for (i = 0; i < width; ++i)
+    {
 	uint32_t val = values[i];
-#if IMAGE_BYTE_ORDER == MSBFirst
-	WRITE(image, pixel++, Blue(val));
-	WRITE(image, pixel++, Green(val));
-	WRITE(image, pixel++, Red(val));
+	
+#ifdef WORDS_BIGENDIAN
+	WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
+	WRITE (image, pixel++, (val & 0x0000ff00) >>  8);
+	WRITE (image, pixel++, (val & 0x000000ff) >>  0);
 #else
-	WRITE(image, pixel++, Red(val));
-	WRITE(image, pixel++, Green(val));
-	WRITE(image, pixel++, Blue(val));
+	WRITE (image, pixel++, (val & 0x000000ff) >>  0);
+	WRITE (image, pixel++, (val & 0x0000ff00) >>  8);
+	WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
+#endif
+    }
+}
+
+static void
+store_scanline_b8g8r8 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t *pixel = ((uint8_t *) bits) + 3 * x;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t val = values[i];
+	
+#ifdef WORDS_BIGENDIAN
+	WRITE (image, pixel++, (val & 0x000000ff) >>  0);
+	WRITE (image, pixel++, (val & 0x0000ff00) >>  8);
+	WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
+#else
+	WRITE (image, pixel++, (val & 0x00ff0000) >> 16);
+	WRITE (image, pixel++, (val & 0x0000ff00) >>  8);
+	WRITE (image, pixel++, (val & 0x000000ff) >>  0);
 #endif
     }
 }
 
-static FASTCALL void
-fbStore_r5g6b5 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_r5g6b5 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
 	uint32_t s = values[i];
-	WRITE(image, pixel++, ((s >> 3) & 0x001f) |
-	      ((s >> 5) & 0x07e0) |
-	      ((s >> 8) & 0xf800));
+	
+	WRITE (image, pixel++,
+	       ((s >> 3) & 0x001f) |
+	       ((s >> 5) & 0x07e0) |
+	       ((s >> 8) & 0xf800));
     }
 }
 
-static FASTCALL void
-fbStore_b5g6r5 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_b5g6r5 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++, ((b << 8) & 0xf800) |
-	      ((g << 3) & 0x07e0) |
-	      ((r >> 3)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((b << 8) & 0xf800) |
+	       ((g << 3) & 0x07e0) |
+	       ((r >> 3)         ));
     }
 }
 
-static FASTCALL void
-fbStore_a1r5g5b5 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a1r5g5b5 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Splita(values[i]);
-	WRITE(image, pixel++, ((a << 8) & 0x8000) |
-	      ((r << 7) & 0x7c00) |
-	      ((g << 2) & 0x03e0) |
-	      ((b >> 3)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((a << 8) & 0x8000) |
+	       ((r << 7) & 0x7c00) |
+	       ((g << 2) & 0x03e0) |
+	       ((b >> 3)         ));
     }
 }
 
-static FASTCALL void
-fbStore_x1r5g5b5 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x1r5g5b5 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++, ((r << 7) & 0x7c00) |
-	      ((g << 2) & 0x03e0) |
-	      ((b >> 3)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((r << 7) & 0x7c00) |
+	       ((g << 2) & 0x03e0) |
+	       ((b >> 3)         ));
     }
 }
 
-static FASTCALL void
-fbStore_a1b5g5r5 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a1b5g5r5 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Splita(values[i]);
-	WRITE(image, pixel++, ((a << 8) & 0x8000) |
-	      ((b << 7) & 0x7c00) |
-	      ((g << 2) & 0x03e0) |
-	      ((r >> 3)         ));
-    }
-}
-
-static FASTCALL void
-fbStore_x1b5g5r5 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
     int i;
-    uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++, ((b << 7) & 0x7c00) |
-	      ((g << 2) & 0x03e0) |
-	      ((r >> 3)         ));
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((a << 8) & 0x8000) |
+	       ((b << 7) & 0x7c00) |
+	       ((g << 2) & 0x03e0) |
+	       ((r >> 3)         ));
     }
 }
 
-static FASTCALL void
-fbStore_a4r4g4b4 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x1b5g5r5 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Splita(values[i]);
-	WRITE(image, pixel++, ((a << 8) & 0xf000) |
-	      ((r << 4) & 0x0f00) |
-	      ((g     ) & 0x00f0) |
-	      ((b >> 4)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++, ((b << 7) & 0x7c00) |
+	       ((g << 2) & 0x03e0) |
+	       ((r >> 3)         ));
     }
 }
 
-static FASTCALL void
-fbStore_x4r4g4b4 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a4r4g4b4 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++, ((r << 4) & 0x0f00) |
-	      ((g     ) & 0x00f0) |
-	      ((b >> 4)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((a << 8) & 0xf000) |
+	       ((r << 4) & 0x0f00) |
+	       ((g     ) & 0x00f0) |
+	       ((b >> 4)         ));
     }
 }
 
-static FASTCALL void
-fbStore_a4b4g4r4 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x4r4g4b4 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Splita(values[i]);
-	WRITE(image, pixel++, ((a << 8) & 0xf000) |
-	      ((b << 4) & 0x0f00) |
-	      ((g     ) & 0x00f0) |
-	      ((r >> 4)         ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((r << 4) & 0x0f00) |
+	       ((g     ) & 0x00f0) |
+	       ((b >> 4)         ));
     }
 }
 
-static FASTCALL void
-fbStore_x4b4g4r4 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a4b4g4r4 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint16_t  *pixel = ((uint16_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++, ((b << 4) & 0x0f00) |
-	      ((g     ) & 0x00f0) |
-	      ((r >> 4)         ));
-    }
-}
-
-static FASTCALL void
-fbStore_a8 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
     int i;
-    uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	WRITE(image, pixel++, values[i] >> 24);
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	WRITE (image, pixel++, ((a << 8) & 0xf000) |
+	       ((b << 4) & 0x0f00) |
+	       ((g     ) & 0x00f0) |
+	       ((r >> 4)         ));
     }
 }
 
-static FASTCALL void
-fbStore_r3g3b2 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_x4b4g4r4 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint16_t  *pixel = ((uint16_t *) bits) + x;
     int i;
-    uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++,
-	      ((r     ) & 0xe0) |
-	      ((g >> 3) & 0x1c) |
-	      ((b >> 6)       ));
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((b << 4) & 0x0f00) |
+	       ((g     ) & 0x00f0) |
+	       ((r >> 4)         ));
     }
 }
 
-static FASTCALL void
-fbStore_b2g3r3 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a8 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Split(values[i]);
-	WRITE(image, pixel++,
-	      ((b     ) & 0xc0) |
-	      ((g >> 2) & 0x38) |
-	      ((r >> 5)       ));
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	WRITE (image, pixel++, values[i] >> 24);
+    }
+}
+
+static void
+store_scanline_r3g3b2 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t   *pixel = ((uint8_t *) bits) + x;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((r     ) & 0xe0) |
+	       ((g >> 3) & 0x1c) |
+	       ((b >> 6)       ));
     }
 }
 
-static FASTCALL void
-fbStore_a2r2g2b2 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_b2g3r3 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
 {
-    int i;
+    uint32_t *bits = image->bits + image->rowstride * y;
     uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	Splita(values[i]);
-	WRITE(image, pixel++, ((a     ) & 0xc0) |
-	      ((r >> 2) & 0x30) |
-	      ((g >> 4) & 0x0c) |
-	      ((b >> 6)       ));
-    }
-}
-
-static FASTCALL void
-fbStore_c8 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
     int i;
-    uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	WRITE(image, pixel++, miIndexToEnt24(indexed,values[i]));
-    }
-}
-
-static FASTCALL void
-fbStore_x4a4 (pixman_image_t *image,
-	      uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
-    int i;
-    uint8_t   *pixel = ((uint8_t *) bits) + x;
-    for (i = 0; i < width; ++i) {
-	WRITE(image, pixel++, values[i] >> 28);
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((b     ) & 0xc0) |
+	       ((g >> 2) & 0x38) |
+	       ((r >> 5)       ));
     }
 }
 
-#define Store8(img,l,o,v)  (WRITE(img, (uint8_t *)(l) + ((o) >> 3), (v)))
-#if IMAGE_BYTE_ORDER == MSBFirst
-#define Store4(img,l,o,v)  Store8(img,l,o,((o) & 4 ?				\
-				   (Fetch8(img,l,o) & 0xf0) | (v) :		\
-				   (Fetch8(img,l,o) & 0x0f) | ((v) << 4)))
-#else
-#define Store4(img,l,o,v)  Store8(img,l,o,((o) & 4 ?			       \
-				   (Fetch8(img,l,o) & 0x0f) | ((v) << 4) : \
-				   (Fetch8(img,l,o) & 0xf0) | (v)))
-#endif
-
-static FASTCALL void
-fbStore_a4 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a2r2g2b2 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t   *pixel = ((uint8_t *) bits) + x;
     int i;
-    for (i = 0; i < width; ++i) {
-	Store4(image, bits, i + x, values[i]>>28);
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	
+	WRITE (image, pixel++,
+	       ((a     ) & 0xc0) |
+	       ((r >> 2) & 0x30) |
+	       ((g >> 4) & 0x0c) |
+	       ((b >> 6)       ));
     }
 }
 
-static FASTCALL void
-fbStore_r1g2b1 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a2b2g2r2 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
-    int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  pixel;
-
-	Split(values[i]);
-	pixel = (((r >> 4) & 0x8) |
-		 ((g >> 5) & 0x6) |
-		 ((b >> 7)      ));
-	Store4(image, bits, i + x, pixel);
-    }
-}
-
-static FASTCALL void
-fbStore_b1g2r1 (pixman_image_t *image,
-		uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t   *pixel = ((uint8_t *) bits) + x;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  pixel;
-
-	Split(values[i]);
-	pixel = (((b >> 4) & 0x8) |
-		 ((g >> 5) & 0x6) |
-		 ((r >> 7)      ));
-	Store4(image, bits, i + x, pixel);
-    }
-}
-
-static FASTCALL void
-fbStore_a1r1g1b1 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
-    int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  pixel;
-	Splita(values[i]);
-	pixel = (((a >> 4) & 0x8) |
-		 ((r >> 5) & 0x4) |
-		 ((g >> 6) & 0x2) |
-		 ((b >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+    
+    for (i = 0; i < width; ++i)
+    {
+	SPLIT_A (values[i]);
+	
+	*(pixel++) =
+	    ((a     ) & 0xc0) |
+	    ((b >> 2) & 0x30) |
+	    ((g >> 4) & 0x0c) |
+	    ((r >> 6)       );
     }
 }
 
-static FASTCALL void
-fbStore_a1b1g1r1 (pixman_image_t *image,
-		  uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_c8 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t *pixel = ((uint8_t *) bits) + x;
+    const pixman_indexed_t *indexed = image->indexed;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+	WRITE (image, pixel++, RGB24_TO_ENTRY (indexed,values[i]));
+}
+
+static void
+store_scanline_x4a4 (bits_image_t *  image,
+                     int             x,
+                     int             y,
+                     int             width,
+                     const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    uint8_t   *pixel = ((uint8_t *) bits) + x;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  pixel;
-	Splita(values[i]);
-	pixel = (((a >> 4) & 0x8) |
-		 ((b >> 5) & 0x4) |
-		 ((g >> 6) & 0x2) |
-		 ((r >> 7)      ));
-	Store4(image, bits, i + x, pixel);
+
+    for (i = 0; i < width; ++i)
+	WRITE (image, pixel++, values[i] >> 28);
+}
+
+#define STORE_8(img,l,o,v)  (WRITE (img, (uint8_t *)(l) + ((o) >> 3), (v)))
+#ifdef WORDS_BIGENDIAN
+
+#define STORE_4(img,l,o,v)						\
+    do									\
+    {									\
+	int bo = 4 * (o);						\
+	STORE_8 (img, l, bo, (bo & 4 ?					\
+			      (FETCH_8 (img, l, bo) & 0xf0) | (v) :	\
+			      (FETCH_8 (img, l, bo) & 0x0f) | ((v) << 4))); \
+    } while (0)
+#else
+
+#define STORE_4(img,l,o,v)						\
+    do									\
+    {									\
+	int bo = 4 * (o);						\
+	STORE_8 (img, l, bo, (bo & 4 ?					\
+			      (FETCH_8 (img, l, bo) & 0x0f) | ((v) << 4) : \
+			      (FETCH_8 (img, l, bo) & 0xf0) | (v)));	\
+    } while (0)
+#endif
+
+static void
+store_scanline_a4 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    int i;
+
+    for (i = 0; i < width; ++i)
+	STORE_4 (image, bits, i + x, values[i] >> 28);
+}
+
+static void
+store_scanline_r1g2b1 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t pixel;
+	
+	SPLIT (values[i]);
+	pixel = (((r >> 4) & 0x8) |
+	         ((g >> 5) & 0x6) |
+	         ((b >> 7)      ));
+	STORE_4 (image, bits, i + x, pixel);
     }
 }
 
-static FASTCALL void
-fbStore_c4 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_b1g2r1 (bits_image_t *  image,
+                       int             x,
+                       int             y,
+                       int             width,
+                       const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  pixel;
 
-	pixel = miIndexToEnt24(indexed, values[i]);
-	Store4(image, bits, i + x, pixel);
-    }
-}
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t pixel;
 
-static FASTCALL void
-fbStore_a1 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
-{
-    int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  *pixel = ((uint32_t *) bits) + ((i+x) >> 5);
-	uint32_t  mask = FbStipMask((i+x) & 0x1f, 1);
-
-	uint32_t v = values[i] & 0x80000000 ? mask : 0;
-	WRITE(image, pixel, (READ(image, pixel) & ~mask) | v);
+	SPLIT (values[i]);
+	pixel = (((b >> 4) & 0x8) |
+	         ((g >> 5) & 0x6) |
+	         ((r >> 7)      ));
+	STORE_4 (image, bits, i + x, pixel);
     }
 }
 
-static FASTCALL void
-fbStore_g1 (pixman_image_t *image,
-	    uint32_t *bits, const uint32_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_a1r1g1b1 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
 {
+    uint32_t *bits = image->bits + image->rowstride * y;
     int i;
-    for (i = 0; i < width; ++i) {
-	uint32_t  *pixel = ((uint32_t *) bits) + ((i+x) >> 5);
-	uint32_t  mask = FbStipMask((i+x) & 0x1f, 1);
+
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t pixel;
+
+	SPLIT_A (values[i]);
+	pixel = (((a >> 4) & 0x8) |
+	         ((r >> 5) & 0x4) |
+	         ((g >> 6) & 0x2) |
+	         ((b >> 7)      ));
+
+	STORE_4 (image, bits, i + x, pixel);
+    }
+}
 
-	uint32_t v = miIndexToEntY24(indexed,values[i]) ? mask : 0;
-	WRITE(image, pixel, (READ(image, pixel) & ~mask) | v);
+static void
+store_scanline_a1b1g1r1 (bits_image_t *  image,
+                         int             x,
+                         int             y,
+                         int             width,
+                         const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    int i;
+
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t pixel;
+
+	SPLIT_A (values[i]);
+	pixel = (((a >> 4) & 0x8) |
+	         ((b >> 5) & 0x4) |
+	         ((g >> 6) & 0x2) |
+	         ((r >> 7)      ));
+
+	STORE_4 (image, bits, i + x, pixel);
     }
 }
 
-
-storeProc32 ACCESS(pixman_storeProcForPicture32) (bits_image_t * pict)
+static void
+store_scanline_c4 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
 {
-    switch(pict->format) {
-    case PIXMAN_a8r8g8b8: return fbStore_a8r8g8b8;
-    case PIXMAN_x8r8g8b8: return fbStore_x8r8g8b8;
-    case PIXMAN_a8b8g8r8: return fbStore_a8b8g8r8;
-    case PIXMAN_x8b8g8r8: return fbStore_x8b8g8r8;
-
-        /* 24bpp formats */
-    case PIXMAN_r8g8b8: return fbStore_r8g8b8;
-    case PIXMAN_b8g8r8: return fbStore_b8g8r8;
-
-        /* 16bpp formats */
-    case PIXMAN_r5g6b5: return fbStore_r5g6b5;
-    case PIXMAN_b5g6r5: return fbStore_b5g6r5;
+    uint32_t *bits = image->bits + image->rowstride * y;
+    const pixman_indexed_t *indexed = image->indexed;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t pixel;
+	
+	pixel = RGB24_TO_ENTRY (indexed, values[i]);
+	STORE_4 (image, bits, i + x, pixel);
+    }
+}
 
-    case PIXMAN_a1r5g5b5: return fbStore_a1r5g5b5;
-    case PIXMAN_x1r5g5b5: return fbStore_x1r5g5b5;
-    case PIXMAN_a1b5g5r5: return fbStore_a1b5g5r5;
-    case PIXMAN_x1b5g5r5: return fbStore_x1b5g5r5;
-    case PIXMAN_a4r4g4b4: return fbStore_a4r4g4b4;
-    case PIXMAN_x4r4g4b4: return fbStore_x4r4g4b4;
-    case PIXMAN_a4b4g4r4: return fbStore_a4b4g4r4;
-    case PIXMAN_x4b4g4r4: return fbStore_x4b4g4r4;
+static void
+store_scanline_a1 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t  *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
+	uint32_t mask, v;
+	
+#ifdef WORDS_BIGENDIAN
+	mask = 1 << (0x1f - ((i + x) & 0x1f));
+#else
+	mask = 1 << ((i + x) & 0x1f);
+#endif
+	v = values[i] & 0x80000000 ? mask : 0;
+	
+	WRITE (image, pixel, (READ (image, pixel) & ~mask) | v);
+    }
+}
 
-        /* 8bpp formats */
-    case PIXMAN_a8: return  fbStore_a8;
-    case PIXMAN_r3g3b2: return fbStore_r3g3b2;
-    case PIXMAN_b2g3r3: return fbStore_b2g3r3;
-    case PIXMAN_a2r2g2b2: return fbStore_a2r2g2b2;
-    case PIXMAN_c8: return  fbStore_c8;
-    case PIXMAN_g8: return  fbStore_c8;
-    case PIXMAN_x4a4: return fbStore_x4a4;
-
-        /* 4bpp formats */
-    case PIXMAN_a4: return  fbStore_a4;
-    case PIXMAN_r1g2b1: return fbStore_r1g2b1;
-    case PIXMAN_b1g2r1: return fbStore_b1g2r1;
-    case PIXMAN_a1r1g1b1: return fbStore_a1r1g1b1;
-    case PIXMAN_a1b1g1r1: return fbStore_a1b1g1r1;
-    case PIXMAN_c4: return  fbStore_c4;
-    case PIXMAN_g4: return  fbStore_c4;
-
-        /* 1bpp formats */
-    case PIXMAN_a1: return  fbStore_a1;
-    case PIXMAN_g1: return  fbStore_g1;
-    default:
-        return NULL;
+static void
+store_scanline_g1 (bits_image_t *  image,
+                   int             x,
+                   int             y,
+                   int             width,
+                   const uint32_t *values)
+{
+    uint32_t *bits = image->bits + image->rowstride * y;
+    const pixman_indexed_t *indexed = image->indexed;
+    int i;
+    
+    for (i = 0; i < width; ++i)
+    {
+	uint32_t  *pixel = ((uint32_t *) bits) + ((i + x) >> 5);
+	uint32_t mask, v;
+	
+#ifdef WORDS_BIGENDIAN
+	mask = 1 << (0x1f - ((i + x) & 0x1f));
+#else
+	mask = 1 << ((i + x) & 0x1f);
+#endif
+	v = RGB24_TO_ENTRY_Y (indexed, values[i]) ? mask : 0;
+	
+	WRITE (image, pixel, (READ (image, pixel) & ~mask) | v);
     }
 }
 
 /*
  * Contracts a 64bpp image to 32bpp and then stores it using a regular 32-bit
- * store proc.
+ * store proc. Despite the type, this function expects a uint64_t buffer.
  */
-static FASTCALL void
-fbStore64_generic (pixman_image_t *image,
-		   uint32_t *bits, const uint64_t *values, int x, int width, const pixman_indexed_t * indexed)
+static void
+store_scanline_generic_64 (bits_image_t *  image,
+                           int             x,
+                           int             y,
+                           int             width,
+                           const uint32_t *values)
 {
-    bits_image_t *pict = (bits_image_t*)image;
-    storeProc32 store32 = ACCESS(pixman_storeProcForPicture32) (pict);
-    uint32_t *argb8Pixels;
-
-    assert(image->common.type == BITS);
-    assert(store32);
-
-    argb8Pixels = pixman_malloc_ab (width, sizeof(uint32_t));
-    if (!argb8Pixels) return;
-
-    // Contract the scanline.  We could do this in place if values weren't
-    // const.
-    pixman_contract(argb8Pixels, values, width);
-    store32(image, bits, argb8Pixels, x, width, indexed);
-
-    free(argb8Pixels);
+    uint32_t *argb8_pixels;
+    
+    assert (image->common.type == BITS);
+    
+    argb8_pixels = pixman_malloc_ab (width, sizeof(uint32_t));
+    if (!argb8_pixels)
+	return;
+    
+    /* Contract the scanline.  We could do this in place if values weren't
+     * const.
+     */
+    pixman_contract (argb8_pixels, (uint64_t *)values, width);
+    
+    image->store_scanline_raw_32 (image, x, y, width, argb8_pixels);
+    
+    free (argb8_pixels);
 }
 
-storeProc64 ACCESS(pixman_storeProcForPicture64) (bits_image_t * pict)
+/* Despite the type, this function expects both buffer
+ * and mask to be uint64_t
+ */
+static void
+fetch_scanline_generic_64 (pixman_image_t *image,
+                           int             x,
+                           int             y,
+                           int             width,
+                           uint32_t *      buffer,
+                           const uint32_t *mask,
+                           uint32_t        mask_bits)
 {
-    switch(pict->format) {
-    case PIXMAN_a2b10g10r10: return fbStore_a2b10g10r10;
-    case PIXMAN_x2b10g10r10: return fbStore_x2b10g10r10;
-    default: return fbStore64_generic;
-    }
+    /* Fetch the pixels into the first half of buffer and then expand them in
+     * place.
+     */
+    image->bits.fetch_scanline_raw_32 (image, x, y, width, buffer, NULL, 0);
+    
+    pixman_expand ((uint64_t *)buffer, buffer, image->bits.format, width);
 }
 
-#ifndef PIXMAN_FB_ACCESSORS
-/*
- * Helper routine to expand a color component from 0 < n <= 8 bits to 16 bits by
- * replication.
- */
-static inline uint64_t expand16(const uint8_t val, int nbits)
+/* Despite the type, this function expects a uint64_t *buffer */
+static uint64_t
+fetch_pixel_generic_64 (bits_image_t *image,
+			int	      offset,
+			int           line)
 {
-    // Start out with the high bit of val in the high bit of result.
-    uint16_t result = (uint16_t)val << (16 - nbits);
-
-    if (nbits == 0)
-        return 0;
-
-    // Copy the bits in result, doubling the number of bits each time, until we
-    // fill all 16 bits.
-    while (nbits < 16) {
-        result |= result >> nbits;
-        nbits *= 2;
-    }
+    uint32_t pixel32 = image->fetch_pixel_raw_32 (image, offset, line);
+    uint64_t result;
+    
+    pixman_expand ((uint64_t *)&result, &pixel32, image->format, 1);
 
     return result;
 }
 
 /*
- * This function expands images from ARGB8 format to ARGB16.  To preserve
- * precision, it needs to know the original source format.  For example, if the
- * source was PIXMAN_x1r5g5b5 and the red component contained bits 12345, then
- * the expanded value is 12345123.  To correctly expand this to 16 bits, it
- * should be 1234512345123451 and not 1234512312345123.
+ * XXX: The transformed fetch path only works at 32-bpp so far.  When all
+ * paths have wide versions, this can be removed.
+ *
+ * WARNING: This function loses precision!
  */
-void pixman_expand(uint64_t *dst, const uint32_t *src,
-                   pixman_format_code_t format, int width)
+static uint32_t
+fetch_pixel_generic_lossy_32 (bits_image_t *image,
+			      int           offset,
+			      int           line)
+{
+    uint64_t pixel64 = image->fetch_pixel_raw_64 (image, offset, line);
+    uint32_t result;
+    
+    pixman_contract (&result, &pixel64, 1);
+
+    return result;
+}
+
+typedef struct
+{
+    pixman_format_code_t	format;
+    fetch_scanline_t		fetch_scanline_raw_32;
+    fetch_scanline_t		fetch_scanline_raw_64;
+    fetch_pixel_32_t		fetch_pixel_raw_32;
+    fetch_pixel_64_t		fetch_pixel_raw_64;
+    store_scanline_t		store_scanline_raw_32;
+    store_scanline_t		store_scanline_raw_64;
+} format_info_t;
+
+#define FORMAT_INFO(format) 						\
+    {									\
+	PIXMAN_ ## format,						\
+	    fetch_scanline_ ## format,					\
+	    fetch_scanline_generic_64,					\
+	    fetch_pixel_ ## format, fetch_pixel_generic_64,		\
+	    store_scanline_ ## format, store_scanline_generic_64	\
+    }
+
+static const format_info_t accessors[] =
 {
-    /*
-     * Determine the sizes of each component and the masks and shifts required
-     * to extract them from the source pixel.
-     */
-    const int a_size = PIXMAN_FORMAT_A(format),
-              r_size = PIXMAN_FORMAT_R(format),
-              g_size = PIXMAN_FORMAT_G(format),
-              b_size = PIXMAN_FORMAT_B(format);
-    const int a_shift = 32 - a_size,
-              r_shift = 24 - r_size,
-              g_shift = 16 - g_size,
-              b_shift =  8 - b_size;
-    const uint8_t a_mask = ~(~0 << a_size),
-                  r_mask = ~(~0 << r_size),
-                  g_mask = ~(~0 << g_size),
-                  b_mask = ~(~0 << b_size);
-    int i;
+/* 32 bpp formats */
+    FORMAT_INFO (a8r8g8b8),
+    FORMAT_INFO (x8r8g8b8),
+    FORMAT_INFO (a8b8g8r8),
+    FORMAT_INFO (x8b8g8r8),
+    FORMAT_INFO (b8g8r8a8),
+    FORMAT_INFO (b8g8r8x8),
+    
+/* 24bpp formats */
+    FORMAT_INFO (r8g8b8),
+    FORMAT_INFO (b8g8r8),
+    
+/* 16bpp formats */
+    FORMAT_INFO (r5g6b5),
+    FORMAT_INFO (b5g6r5),
+    
+    FORMAT_INFO (a1r5g5b5),
+    FORMAT_INFO (x1r5g5b5),
+    FORMAT_INFO (a1b5g5r5),
+    FORMAT_INFO (x1b5g5r5),
+    FORMAT_INFO (a4r4g4b4),
+    FORMAT_INFO (x4r4g4b4),
+    FORMAT_INFO (a4b4g4r4),
+    FORMAT_INFO (x4b4g4r4),
+    
+/* 8bpp formats */
+    FORMAT_INFO (a8),
+    FORMAT_INFO (r3g3b2),
+    FORMAT_INFO (b2g3r3),
+    FORMAT_INFO (a2r2g2b2),
+    FORMAT_INFO (a2b2g2r2),
+    
+    FORMAT_INFO (c8),
+    
+#define fetch_scanline_g8 fetch_scanline_c8
+#define fetch_pixel_g8 fetch_pixel_c8
+#define store_scanline_g8 store_scanline_c8
+    FORMAT_INFO (g8),
+    
+#define fetch_scanline_x4c4 fetch_scanline_c8
+#define fetch_pixel_x4c4 fetch_pixel_c8
+#define store_scanline_x4c4 store_scanline_c8
+    FORMAT_INFO (x4c4),
+    
+#define fetch_scanline_x4g4 fetch_scanline_c8
+#define fetch_pixel_x4g4 fetch_pixel_c8
+#define store_scanline_x4g4 store_scanline_c8
+    FORMAT_INFO (x4g4),
+    
+    FORMAT_INFO (x4a4),
+    
+/* 4bpp formats */
+    FORMAT_INFO (a4),
+    FORMAT_INFO (r1g2b1),
+    FORMAT_INFO (b1g2r1),
+    FORMAT_INFO (a1r1g1b1),
+    FORMAT_INFO (a1b1g1r1),
+    
+    FORMAT_INFO (c4),
+    
+#define fetch_scanline_g4 fetch_scanline_c4
+#define fetch_pixel_g4 fetch_pixel_c4
+#define store_scanline_g4 store_scanline_c4
+    FORMAT_INFO (g4),
+    
+/* 1bpp formats */
+    FORMAT_INFO (a1),
+    FORMAT_INFO (g1),
+    
+/* Wide formats */
+    
+    { PIXMAN_a2r10g10b10,
+      NULL, fetch_scanline_a2r10g10b10,
+      fetch_pixel_generic_lossy_32, fetch_pixel_a2r10g10b10,
+      NULL, store_scanline_a2r10g10b10 },
+    
+    { PIXMAN_x2r10g10b10,
+      NULL, fetch_scanline_x2r10g10b10,
+      fetch_pixel_generic_lossy_32, fetch_pixel_x2r10g10b10,
+      NULL, store_scanline_x2r10g10b10 },
+    
+    { PIXMAN_a2b10g10r10,
+      NULL, fetch_scanline_a2b10g10r10,
+      fetch_pixel_generic_lossy_32, fetch_pixel_a2b10g10r10,
+      NULL, store_scanline_a2b10g10r10 },
+    
+    { PIXMAN_x2b10g10r10,
+      NULL, fetch_scanline_x2b10g10r10,
+      fetch_pixel_generic_lossy_32, fetch_pixel_x2b10g10r10,
+      NULL, store_scanline_x2b10g10r10 },
+    
+/* YUV formats */
+    { PIXMAN_yuy2,
+      fetch_scanline_yuy2, fetch_scanline_generic_64,
+      fetch_pixel_yuy2, fetch_pixel_generic_64,
+      NULL, NULL },
+    
+    { PIXMAN_yv12,
+      fetch_scanline_yv12, fetch_scanline_generic_64,
+      fetch_pixel_yv12, fetch_pixel_generic_64,
+      NULL, NULL },
+    
+    { PIXMAN_null },
+};
 
-    /* Start at the end so that we can do the expansion in place when src == dst */
-    for (i = width - 1; i >= 0; i--)
+static void
+setup_accessors (bits_image_t *image)
+{
+    const format_info_t *info = accessors;
+    
+    while (info->format != PIXMAN_null)
     {
-        const uint32_t pixel = src[i];
-        // Extract the components.
-        const uint8_t a = (pixel >> a_shift) & a_mask,
-                      r = (pixel >> r_shift) & r_mask,
-                      g = (pixel >> g_shift) & g_mask,
-                      b = (pixel >> b_shift) & b_mask;
-        const uint64_t a16 = a_size ? expand16(a, a_size) : 0xffff,
-                       r16 = expand16(r, r_size),
-                       g16 = expand16(g, g_size),
-                       b16 = expand16(b, b_size);
-
-        dst[i] = a16 << 48 | r16 << 32 | g16 << 16 | b16;
+	if (info->format == image->format)
+	{
+	    image->fetch_scanline_raw_32 = info->fetch_scanline_raw_32;
+	    image->fetch_scanline_raw_64 = info->fetch_scanline_raw_64;
+	    image->fetch_pixel_raw_32 = info->fetch_pixel_raw_32;
+	    image->fetch_pixel_raw_64 = info->fetch_pixel_raw_64;
+	    image->store_scanline_raw_32 = info->store_scanline_raw_32;
+	    image->store_scanline_raw_64 = info->store_scanline_raw_64;
+	    
+	    return;
+	}
+	
+	info++;
     }
 }
 
-/*
- * Contracting is easier than expanding.  We just need to truncate the
- * components.
- */
-void pixman_contract(uint32_t *dst, const uint64_t *src, int width)
-{
-    int i;
+#ifndef PIXMAN_FB_ACCESSORS
+void
+_pixman_bits_image_setup_raw_accessors_accessors (bits_image_t *image);
 
-    /* Start at the beginning so that we can do the contraction in place when
-     * src == dst */
-    for (i = 0; i < width; i++)
-    {
-        const uint8_t a = src[i] >> 56,
-                      r = src[i] >> 40,
-                      g = src[i] >> 24,
-                      b = src[i] >> 8;
-        dst[i] = a << 24 | r << 16 | g << 8 | b;
-    }
+void
+_pixman_bits_image_setup_raw_accessors (bits_image_t *image)
+{
+    if (image->read_func || image->write_func)
+	_pixman_bits_image_setup_raw_accessors_accessors (image);
+    else
+	setup_accessors (image);
 }
-#endif // PIXMAN_FB_ACCESSORS
+
+#else
+
+void
+_pixman_bits_image_setup_raw_accessors_accessors (bits_image_t *image)
+{
+    setup_accessors (image);
+}
+
+#endif
new file mode 100644
--- /dev/null
+++ b/gfx/cairo/libpixman/src/pixman-accessor.h
@@ -0,0 +1,40 @@
+#ifdef PIXMAN_FB_ACCESSORS
+
+#define ACCESS(sym) sym##_accessors
+
+#define READ(img, ptr)							\
+    (((bits_image_t *)(img))->read_func ((ptr), sizeof(*(ptr))))
+#define WRITE(img, ptr,val)						\
+    (((bits_image_t *)(img))->write_func ((ptr), (val), sizeof (*(ptr))))
+
+#define MEMCPY_WRAPPED(img, dst, src, size)				\
+    do {								\
+	size_t _i;							\
+	uint8_t *_dst = (uint8_t*)(dst), *_src = (uint8_t*)(src);	\
+	for(_i = 0; _i < size; _i++) {					\
+	    WRITE((img), _dst +_i, READ((img), _src + _i));		\
+	}								\
+    } while (0)
+
+#define MEMSET_WRAPPED(img, dst, val, size)				\
+    do {								\
+	size_t _i;							\
+	uint8_t *_dst = (uint8_t*)(dst);				\
+	for(_i = 0; _i < (size_t) size; _i++) {				\
+	    WRITE((img), _dst +_i, (val));				\
+	}								\
+    } while (0)
+
+#else
+
+#define ACCESS(sym) sym
+
+#define READ(img, ptr)		(*(ptr))
+#define WRITE(img, ptr, val)	(*(ptr) = (val))
+#define MEMCPY_WRAPPED(img, dst, src, size)				\
+    memcpy(dst, src, size)
+#define MEMSET_WRAPPED(img, dst, val, size)				\
+    memset(dst, val, size)
+
+#endif
+
new file mode 100644
--- /dev/null
+++ b/gfx/cairo/libpixman/src/pixman-arm-neon-asm.S
@@ -0,0 +1,1511 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains implementations of NEON optimized pixel processing
+ * functions. There is no full and detailed tutorial, but some functions
+ * (those which are exposing some new or interesting features) are
+ * extensively commented and can be used as examples.
+ *
+ * You may want to have a look at the comments for following functions:
+ *  - pixman_composite_over_8888_0565_asm_neon
+ *  - pixman_composite_over_n_8_0565_asm_neon
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+    .text
+    .fpu neon
+    .arch armv7a
+    .altmacro
+
+#include "pixman-arm-neon-asm.h"
+
+/* Global configuration options and preferences */
+
+/*
+ * The code can optionally make use of unaligned memory accesses to improve
+ * performance of handling leading/trailing pixels for each scanline.
+ * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
+ * example in linux if unaligned memory accesses are not configured to
+ * generate.exceptions.
+ */
+.set RESPECT_STRICT_ALIGNMENT, 1
+
+/*
+ * Set default prefetch type. There is a choice between the following options:
+ *
+ * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
+ * as NOP to workaround some HW bugs or for whatever other reason)
+ *
+ * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
+ * advanced prefetch intruduces heavy overhead)
+ *
+ * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
+ * which can run ARM and NEON instructions simultaneously so that extra ARM
+ * instructions do not add (many) extra cycles, but improve prefetch efficiency)
+ *
+ * Note: some types of function can't support advanced prefetch and fallback
+ *       to simple one (those which handle 24bpp pixels)
+ */
+.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
+
+/* Prefetch distance in pixels for simple prefetch */
+.set PREFETCH_DISTANCE_SIMPLE, 64
+
+/*
+ * Implementation of pixman_composite_over_8888_0565_asm_neon
+ *
+ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
+ * performs OVER compositing operation. Function fast_composite_over_8888_0565
+ * from pixman-fast-path.c does the same in C and can be used as a reference.
+ *
+ * First we need to have some NEON assembly code which can do the actual
+ * operation on the pixels and provide it to the template macro.
+ *
+ * Template macro quite conveniently takes care of emitting all the necessary
+ * code for memory reading and writing (including quite tricky cases of
+ * handling unaligned leading/trailing pixels), so we only need to deal with
+ * the data in NEON registers.
+ *
+ * NEON registers allocation in general is recommented to be the following:
+ * d0,  d1,  d2,  d3  - contain loaded source pixel data
+ * d4,  d5,  d6,  d7  - contain loaded destination pixels (if they are needed)
+ * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used)
+ * d28, d29, d30, d31 - place for storing the result (destination pixels)
+ *
+ * As can be seen above, four 64-bit NEON registers are used for keeping
+ * intermediate pixel data and up to 8 pixels can be processed in one step
+ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
+ *
+ * This particular function uses the following registers allocation:
+ * d0,  d1,  d2,  d3  - contain loaded source pixel data
+ * d4,  d5            - contain loaded destination pixels (they are needed)
+ * d28, d29           - place for storing the result (destination pixels)
+ */
+
+/*
+ * Step one. We need to have some code to do some arithmetics on pixel data.
+ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
+ * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5},
+ * perform all the needed calculations and write the result to {d28, d29}.
+ * The rationale for having two macros and not just one will be explained
+ * later. In practice, any single monolitic function which does the work can
+ * be split into two parts in any arbitrary way without affecting correctness.
+ *
+ * There is one special trick here too. Common template macro can optionally
+ * make our life a bit easier by doing R, G, B, A color components
+ * deinterleaving for 32bpp pixel formats (and this feature is used in
+ * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
+ * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we
+ * actually use d0 register for blue channel (a vector of eight 8-bit
+ * values), d1 register for green, d2 for red and d3 for alpha. This
+ * simple conversion can be also done with a few NEON instructions:
+ *
+ * Packed to planar conversion:
+ *  vuzp.8 d0, d1
+ *  vuzp.8 d2, d3
+ *  vuzp.8 d1, d3
+ *  vuzp.8 d0, d2
+ *
+ * Planar to packed conversion:
+ *  vzip.8 d0, d2
+ *  vzip.8 d1, d3
+ *  vzip.8 d2, d3
+ *  vzip.8 d0, d1
+ *
+ * But pixel can be loaded directly in planar format using VLD4.8 NEON
+ * instruction. It is 1 cycle slower than VLD1.32, so this is not always
+ * desirable, that's why deinterleaving is optional.
+ *
+ * But anyway, here is the code:
+ */
+.macro pixman_composite_over_8888_0565_process_pixblock_head
+    /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
+       and put data into d6 - red, d7 - green, d30 - blue */
+    vshrn.u16   d6, q2, #8
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+    vsri.u8     d6, d6, #5
+    vmvn.8      d3, d3      /* invert source alpha */
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    /* now do alpha blending, storing results in 8-bit planar format
+       into d16 - red, d19 - green, d18 - blue */
+    vmull.u8    q10, d3, d6
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+    vrshr.u16   q13, q10, #8
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+    vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail
+    /* ... continue alpha blending */
+    vqadd.u8    d16, d2, d20
+    vqadd.u8    q9, q0, q11
+    /* convert the result to r5g6b5 and store it into {d28, d29} */
+    vshll.u8    q14, d16, #8
+    vshll.u8    q8, d19, #8
+    vshll.u8    q9, d18, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+/*
+ * OK, now we got almost everything that we need. Using the above two
+ * macros, the work can be done right. But now we want to optimize
+ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
+ * a lot from good code scheduling and software pipelining.
+ *
+ * Let's construct some code, which will run in the core main loop.
+ * Some pseudo-code of the main loop will look like this:
+ *   head
+ *   while (...) {
+ *     tail
+ *     head
+ *   }
+ *   tail
+ *
+ * It may look a bit weird, but this setup allows to hide instruction
+ * latencies better and also utilize dual-issue capability more
+ * efficiently (make pairs of load-store and ALU instructions).
+ *
+ * So what we need now is a '*_tail_head' macro, which will be used
+ * in the core main loop. A trivial straightforward implementation
+ * of this macro would look like this:
+ *
+ *   pixman_composite_over_8888_0565_process_pixblock_tail
+ *   vst1.16     {d28, d29}, [DST_W, :128]!
+ *   vld1.16     {d4, d5}, [DST_R, :128]!
+ *   vld4.32     {d0, d1, d2, d3}, [SRC]!
+ *   pixman_composite_over_8888_0565_process_pixblock_head
+ *   cache_preload 8, 8
+ *
+ * Now it also got some VLD/VST instructions. We simply can't move from
+ * processing one block of pixels to the other one with just arithmetics.
+ * The previously processed data needs to be written to memory and new
+ * data needs to be fetched. Fortunately, this main loop does not deal
+ * with partial leading/trailing pixels and can load/store a full block
+ * of pixels in a bulk. Additionally, destination buffer is already
+ * 16 bytes aligned here (which is good for performance).
+ *
+ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
+ * are the aliases for ARM registers which are used as pointers for
+ * accessing data. We maintain separate pointers for reading and writing
+ * destination buffer (DST_R and DST_W).
+ *
+ * Another new thing is 'cache_preload' macro. It is used for prefetching
+ * data into CPU L2 cache and improve performance when dealing with large
+ * images which are far larger than cache size. It uses one argument
+ * (actually two, but they need to be the same here) - number of pixels
+ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
+ * details about this macro. Moreover, if good performance is needed
+ * the code from this macro needs to be copied into '*_tail_head' macro
+ * and mixed with the rest of code for optimal instructions scheduling.
+ * We are actually doing it below.
+ *
+ * Now after all the explanations, here is the optimized code.
+ * Different instruction streams (originaling from '*_head', '*_tail'
+ * and 'cache_preload' macro) use different indentation levels for
+ * better readability. Actually taking the code from one of these
+ * indentation levels and ignoring a few VLD/VST instructions would
+ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
+ * macro!
+ */
+
+#if 1
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+        vqadd.u8    d16, d2, d20
+    vld1.16     {d4, d5}, [DST_R, :128]!
+        vqadd.u8    q9, q0, q11
+    vshrn.u16   d6, q2, #8
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+        vshll.u8    q14, d16, #8
+                                    PF add PF_X, PF_X, #8
+        vshll.u8    q8, d19, #8
+                                    PF tst PF_CTL, #0xF
+    vsri.u8     d6, d6, #5
+                                    PF addne PF_X, PF_X, #8
+    vmvn.8      d3, d3
+                                    PF subne PF_CTL, PF_CTL, #1
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    vmull.u8    q10, d3, d6
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+        vsri.u16    q14, q8, #5
+                                    PF cmp PF_X, ORIG_W
+        vshll.u8    q9, d18, #8
+    vrshr.u16   q13, q10, #8
+                                    PF subge PF_X, PF_X, ORIG_W
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+                                    PF subges PF_CTL, PF_CTL, #0x10
+        vsri.u16    q14, q9, #11
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vraddhn.u16 d22, q12, q15
+        vst1.16     {d28, d29}, [DST_W, :128]!
+.endm
+
+#else
+
+/* If we did not care much about the performance, we would just use this... */
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+    pixman_composite_over_8888_0565_process_pixblock_tail
+    vst1.16     {d28, d29}, [DST_W, :128]!
+    vld1.16     {d4, d5}, [DST_R, :128]!
+    vld4.32     {d0, d1, d2, d3}, [SRC]!
+    pixman_composite_over_8888_0565_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+#endif
+
+/*
+ * And now the final part. We are using 'generate_composite_function' macro
+ * to put all the stuff together. We are specifying the name of the function
+ * which we want to get, number of bits per pixel for the source, mask and
+ * destination (0 if unused, like mask in this case). Next come some bit
+ * flags:
+ *   FLAG_DST_READWRITE      - tells that the destination buffer is both read
+ *                             and written, for write-only buffer we would use
+ *                             FLAG_DST_WRITEONLY flag instead
+ *   FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
+ *                             and separate color channels for 32bpp format.
+ * The next things are:
+ *  - the number of pixels processed per iteration (8 in this case, because
+ *    that's the maximum what can fit into four 64-bit NEON registers).
+ *  - prefetch distance, measured in pixel blocks. In this case it is 5 times
+ *    by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
+ *    prefetch distance can be selected by running some benchmarks.
+ *
+ * After that we specify some macros, these are 'default_init',
+ * 'default_cleanup' here which are empty (but it is possible to have custom
+ * init/cleanup macros to be able to save/restore some extra NEON registers
+ * like d8-d15 or do anything else) followed by
+ * 'pixman_composite_over_8888_0565_process_pixblock_head',
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
+ * which we got implemented above.
+ *
+ * The last part is the NEON registers allocation scheme.
+ */
+generate_composite_function \
+    pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_0565_process_pixblock_head, \
+    pixman_composite_over_8888_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_0565_process_pixblock_head
+    /* convert 8 r5g6b5 pixel data from {d4, d5} to planar 8-bit format
+       and put data into d6 - red, d7 - green, d30 - blue */
+    vshrn.u16   d6, q2, #8
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+    vsri.u8     d6, d6, #5
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    /* now do alpha blending, storing results in 8-bit planar format
+       into d16 - red, d19 - green, d18 - blue */
+    vmull.u8    q10, d3, d6
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+    vrshr.u16   q13, q10, #8
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+    vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_n_0565_process_pixblock_tail
+    /* ... continue alpha blending */
+    vqadd.u8    d16, d2, d20
+    vqadd.u8    q9, q0, q11
+    /* convert the result to r5g6b5 and store it into {d28, d29} */
+    vshll.u8    q14, d16, #8
+    vshll.u8    q8, d19, #8
+    vshll.u8    q9, d18, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_0565_process_pixblock_tail_head
+    pixman_composite_over_n_0565_process_pixblock_tail
+    vld1.16     {d4, d5}, [DST_R, :128]!
+    vst1.16     {d28, d29}, [DST_W, :128]!
+    pixman_composite_over_n_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_0565_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d3[0]}, [DUMMY]
+    vdup.8      d0, d3[0]
+    vdup.8      d1, d3[1]
+    vdup.8      d2, d3[2]
+    vdup.8      d3, d3[3]
+    vmvn.8      d3, d3      /* invert source alpha */
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_0565_init, \
+    default_cleanup, \
+    pixman_composite_over_n_0565_process_pixblock_head, \
+    pixman_composite_over_n_0565_process_pixblock_tail, \
+    pixman_composite_over_n_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_0565_process_pixblock_head
+    vshll.u8    q8, d1, #8
+    vshll.u8    q14, d2, #8
+    vshll.u8    q9, d0, #8
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
+        vsri.u16    q14, q8, #5
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vsri.u16    q14, q9, #11
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vshll.u8    q8, d1, #8
+        vst1.16     {d28, d29}, [DST_W, :128]!
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vshll.u8    q14, d2, #8
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vshll.u8    q9, d0, #8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_0565_process_pixblock_head, \
+    pixman_composite_src_8888_0565_process_pixblock_tail, \
+    pixman_composite_src_8888_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_8888_process_pixblock_head
+    vshrn.u16   d30, q0, #8
+    vshrn.u16   d29, q0, #3
+    vsli.u16    q0, q0, #5
+    vmov.u8     d31, #255
+    vsri.u8     d30, d30, #5
+    vsri.u8     d29, d29, #6
+    vshrn.u16   d28, q0, #2
+.endm
+
+.macro pixman_composite_src_0565_8888_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_src_0565_8888_process_pixblock_tail_head
+    pixman_composite_src_0565_8888_process_pixblock_tail
+    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
+    vld1.16    {d0, d1}, [SRC]!
+    pixman_composite_src_0565_8888_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_8888_process_pixblock_head, \
+    pixman_composite_src_0565_8888_process_pixblock_tail, \
+    pixman_composite_src_0565_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8000_8000_process_pixblock_head
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail
+.endm
+
+.macro pixman_composite_add_8000_8000_process_pixblock_tail_head
+    vld1.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF add PF_X, PF_X, #32
+                                    PF tst PF_CTL, #0xF
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+                                    PF addne PF_X, PF_X, #32
+                                    PF subne PF_CTL, PF_CTL, #1
+        vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vqadd.u8    q14, q0, q2
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vqadd.u8    q15, q1, q3
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8000_8000_asm_neon, 8, 0, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8000_8000_process_pixblock_head, \
+    pixman_composite_add_8000_8000_process_pixblock_tail, \
+    pixman_composite_add_8000_8000_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8888_8888_process_pixblock_tail_head
+    vld1.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vqadd.u8    q14, q0, q2
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vqadd.u8    q15, q1, q3
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8000_8000_process_pixblock_head, \
+    pixman_composite_add_8000_8000_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_add_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8000_8000_process_pixblock_head, \
+    pixman_composite_add_8000_8000_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8888_process_pixblock_head
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+        vrshr.u16   q14, q8, #8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+        vrshr.u16   q15, q9, #8
+        vrshr.u16   q12, q10, #8
+        vrshr.u16   q13, q11, #8
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vraddhn.u16 d28, q14, q8
+        vraddhn.u16 d29, q15, q9
+                                    PF cmp PF_X, ORIG_W
+        vraddhn.u16 d30, q12, q10
+        vraddhn.u16 d31, q13, q11
+        vqadd.u8    q14, q0, q14
+        vqadd.u8    q15, q1, q15
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+    vmvn.8      d22, d3
+                                    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+        vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF subge PF_X, PF_X, ORIG_W
+    vmull.u8    q8, d22, d4
+                                    PF subges PF_CTL, PF_CTL, #0x10
+    vmull.u8    q9, d22, d5
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+    vmull.u8    q10, d22, d6
+                                    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+    vmull.u8    q11, d22, d7
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8888_process_pixblock_tail_head
+    pixman_composite_over_8888_8888_process_pixblock_tail
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    pixman_composite_over_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_8888_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d3[0]}, [DUMMY]
+    vdup.8      d0, d3[0]
+    vdup.8      d1, d3[1]
+    vdup.8      d2, d3[2]
+    vdup.8      d3, d3[3]
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8888_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_head
+    /* in */
+    vmull.u8    q0, d24, d8
+    vmull.u8    q1, d24, d9
+    vmull.u8    q6, d24, d10
+    vmull.u8    q7, d24, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+
+    vshrn.u16   d6, q2, #8
+    vshrn.u16   d7, q2, #3
+    vsli.u16    q2, q2, #5
+    vsri.u8     d6, d6, #5
+    vmvn.8      d3, d3
+    vsri.u8     d7, d7, #6
+    vshrn.u16   d30, q2, #2
+    /* now do alpha blending */
+    vmull.u8    q10, d3, d6
+    vmull.u8    q11, d3, d7
+    vmull.u8    q12, d3, d30
+    vrshr.u16   q13, q10, #8
+    vrshr.u16   q3, q11, #8
+    vrshr.u16   q15, q12, #8
+    vraddhn.u16 d20, q10, q13
+    vraddhn.u16 d23, q11, q3
+    vraddhn.u16 d22, q12, q15
+.endm
+
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail
+    vqadd.u8    d16, d2, d20
+    vqadd.u8    q9, q0, q11
+    /* convert to r5g6b5 */
+    vshll.u8    q14, d16, #8
+    vshll.u8    q8, d19, #8
+    vshll.u8    q9, d18, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_0565_process_pixblock_tail_head
+    pixman_composite_over_n_8_0565_process_pixblock_tail
+    vst1.16     {d28, d29}, [DST_W, :128]!
+    vld1.16     {d4, d5}, [DST_R, :128]!
+    vld1.8      {d24}, [MASK]!
+    cache_preload 8, 8
+    pixman_composite_over_n_8_0565_process_pixblock_head
+.endm
+
+/*
+ * This function needs a special initialization of solid mask.
+ * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
+ * offset, split into color components and replicated in d8-d11
+ * registers. Additionally, this function needs all the NEON registers,
+ * so it has to save d8-d15 registers which are callee saved according
+ * to ABI. These registers are restored from 'cleanup' macro. All the
+ * other NEON registers are caller saved, so can be clobbered freely
+ * without introducing any problems.
+ */
+.macro pixman_composite_over_n_8_0565_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d8, d11[0]
+    vdup.8      d9, d11[1]
+    vdup.8      d10, d11[2]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_0565_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_0565_init, \
+    pixman_composite_over_n_8_0565_cleanup, \
+    pixman_composite_over_n_8_0565_process_pixblock_head, \
+    pixman_composite_over_n_8_0565_process_pixblock_tail, \
+    pixman_composite_over_n_8_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
+    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+    vld1.16 {d0, d1, d2, d3}, [SRC]!
+    cache_preload 16, 16
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_0565_process_pixblock_head, \
+    pixman_composite_src_0565_0565_process_pixblock_tail, \
+    pixman_composite_src_0565_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail_head
+    vst1.8  {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #8
+    vsli.u64    d0, d0, #16
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
+    FLAG_DST_WRITEONLY, \
+    32, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_8_init, \
+    pixman_composite_src_n_8_cleanup, \
+    pixman_composite_src_n_8_process_pixblock_head, \
+    pixman_composite_src_n_8_process_pixblock_tail, \
+    pixman_composite_src_n_8_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail_head
+    vst1.16 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_0565_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #16
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_0565_init, \
+    pixman_composite_src_n_0565_cleanup, \
+    pixman_composite_src_n_0565_process_pixblock_head, \
+    pixman_composite_src_n_0565_process_pixblock_tail, \
+    pixman_composite_src_n_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail_head
+    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_src_n_8888_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vld1.32     {d0[0]}, [DUMMY]
+    vsli.u64    d0, d0, #32
+    vmov        d1, d0
+    vmov        q1, q0
+.endm
+
+.macro pixman_composite_src_n_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    0, /* prefetch distance */ \
+    pixman_composite_src_n_8888_init, \
+    pixman_composite_src_n_8888_cleanup, \
+    pixman_composite_src_n_8888_process_pixblock_head, \
+    pixman_composite_src_n_8888_process_pixblock_tail, \
+    pixman_composite_src_n_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
+    vst1.32 {d0, d1, d2, d3}, [DST_W, :128]!
+    vld1.32 {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_8888_process_pixblock_head, \
+    pixman_composite_src_8888_8888_process_pixblock_tail, \
+    pixman_composite_src_8888_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_head
+    /* expecting deinterleaved source data in {d8, d9, d10, d11} */
+    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+    /* and destination data in {d4, d5, d6, d7} */
+    /* mask is in d24 (d25, d26, d27 are unused) */
+
+    /* in */
+    vmull.u8    q0, d24, d8
+    vmull.u8    q1, d24, d9
+    vmull.u8    q6, d24, d10
+    vmull.u8    q7, d24, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* source:      d0 - blue, d1 - green, d2 - red, d3 - alpha */
+    /* destination: d4 - blue, d5 - green, d6 - red, d7 - alpha */
+    /* now do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
+    pixman_composite_over_n_8_8888_process_pixblock_tail
+    vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24}, [MASK]!
+    cache_preload 8, 8
+    pixman_composite_over_n_8_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_8_8888_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d8, d11[0]
+    vdup.8      d9, d11[1]
+    vdup.8      d10, d11[2]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_over_n_8_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_8888_init, \
+    pixman_composite_over_n_8_8888_cleanup, \
+    pixman_composite_over_n_8_8888_process_pixblock_head, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_n_8_8_process_pixblock_head
+    /* expecting source data in {d8, d9, d10, d11} */
+    /* d8 - blue, d9 - green, d10 - red, d11 - alpha */
+    /* and destination data in {d4, d5, d6, d7} */
+    /* mask is in d24, d25, d26, d27 */
+    vmull.u8    q0, d24, d11
+    vmull.u8    q1, d25, d11
+    vmull.u8    q6, d26, d11
+    vmull.u8    q7, d27, d11
+    vrshr.u16   q10, q0, #8
+    vrshr.u16   q11, q1, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q13, q7, #8
+    vraddhn.u16 d0, q0, q10
+    vraddhn.u16 d1, q1, q11
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d3, q7, q13
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_n_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
+    pixman_composite_add_n_8_8_process_pixblock_tail
+    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24, d25, d26, d27}, [MASK]!
+    cache_preload 32, 32
+    pixman_composite_add_n_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_n_8_8_init
+    add         DUMMY, sp, #ARGS_STACK_OFFSET
+    vpush       {d8-d15}
+    vld1.32     {d11[0]}, [DUMMY]
+    vdup.8      d11, d11[3]
+.endm
+
+.macro pixman_composite_add_n_8_8_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_n_8_8_init, \
+    pixman_composite_add_n_8_8_cleanup, \
+    pixman_composite_add_n_8_8_process_pixblock_head, \
+    pixman_composite_add_n_8_8_process_pixblock_tail, \
+    pixman_composite_add_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8_8_8_process_pixblock_head
+    /* expecting source data in {d0, d1, d2, d3} */
+    /* destination data in {d4, d5, d6, d7} */
+    /* mask in {d24, d25, d26, d27} */
+    vmull.u8    q8, d24, d0
+    vmull.u8    q9, d25, d1
+    vmull.u8    q10, d26, d2
+    vmull.u8    q11, d27, d3
+    vrshr.u16   q0, q8, #8
+    vrshr.u16   q1, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d0, q0, q8
+    vraddhn.u16 d1, q1, q9
+    vraddhn.u16 d2, q12, q10
+    vraddhn.u16 d3, q13, q11
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
+    pixman_composite_add_8_8_8_process_pixblock_tail
+    vst1.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld1.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld1.8      {d24, d25, d26, d27}, [MASK]!
+    vld1.8      {d0, d1, d2, d3}, [SRC]!
+    cache_preload 32, 32
+    pixman_composite_add_8_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_8_8_8_init
+.endm
+
+.macro pixman_composite_add_8_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_8_8_8_init, \
+    pixman_composite_add_8_8_8_cleanup, \
+    pixman_composite_add_8_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_8_process_pixblock_tail, \
+    pixman_composite_add_8_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_head
+    /* expecting source data in {d0, d1, d2, d3} */
+    /* destination data in {d4, d5, d6, d7} */
+    /* mask in {d24, d25, d26, d27} */
+    vmull.u8    q8, d27, d0
+    vmull.u8    q9, d27, d1
+    vmull.u8    q10, d27, d2
+    vmull.u8    q11, d27, d3
+    vrshr.u16   q0, q8, #8
+    vrshr.u16   q1, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d0, q0, q8
+    vraddhn.u16 d1, q1, q9
+    vraddhn.u16 d2, q12, q10
+    vraddhn.u16 d3, q13, q11
+    vqadd.u8    q14, q0, q2
+    vqadd.u8    q15, q1, q3
+.endm
+
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail
+    vst4.8      {d28, d29, d30, d31}, [DST_W, :128]!
+    vld4.8      {d4, d5, d6, d7}, [DST_R, :128]!
+    vld4.8      {d24, d25, d26, d27}, [MASK]!
+    vld4.8      {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+    pixman_composite_add_8888_8888_8888_process_pixblock_head
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_head
+    /* expecting source data in {d0, d1, d2, d3} */
+    /* destination data in {d4, d5, d6, d7} */
+    /* solid mask is in d15 */
+
+    /* 'in' */
+    vmull.u8    q8, d15, d3
+    vmull.u8    q6, d15, d2
+    vmull.u8    q5, d15, d1
+    vmull.u8    q4, d15, d0
+    vrshr.u16   q13, q8, #8
+    vrshr.u16   q12, q6, #8
+    vrshr.u16   q11, q5, #8
+    vrshr.u16   q10, q4, #8
+    vraddhn.u16 d3, q8, q13
+    vraddhn.u16 d2, q6, q12
+    vraddhn.u16 d1, q5, q11
+    vraddhn.u16 d0, q4, q10
+    vmvn.8      d24, d3  /* get inverted alpha */
+    /* now do alpha blending */
+    vmull.u8    q8, d24, d4
+    vmull.u8    q9, d24, d5
+    vmull.u8    q10, d24, d6
+    vmull.u8    q11, d24, d7
+.endm
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vrshr.u16   q14, q8, #8
+    vrshr.u16   q15, q9, #8
+    vrshr.u16   q12, q10, #8
+    vrshr.u16   q13, q11, #8
+    vraddhn.u16 d28, q14, q8
+    vraddhn.u16 d29, q15, q9
+    vraddhn.u16 d30, q12, q10
+    vraddhn.u16 d31, q13, q11
+    vqadd.u8    q14, q0, q14
+    vqadd.u8    q15, q1, q15
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+    vld4.8     {d4, d5, d6, d7}, [DST_R, :128]!
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vld4.8     {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_8888_n_8888_init
+    add         DUMMY, sp, #48
+    vpush       {d8-d15}
+    vld1.32     {d15[0]}, [DUMMY]
+    vdup.8      d15, d15[3]
+.endm
+
+.macro pixman_composite_over_8888_n_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_n_8888_init, \
+    pixman_composite_over_8888_n_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head
+    vld4.8     {d4, d5, d6, d7}, [DST_R, :128]!
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vld4.8     {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+    vld4.8     {d12, d13, d14, d15}, [MASK]!
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_8888_8888_8888_init
+    vpush       {d8-d15}
+.endm
+
+.macro pixman_composite_over_8888_8888_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_8888_8888_init, \
+    pixman_composite_over_8888_8888_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    pixman_composite_over_8888_8888_8888_init, \
+    pixman_composite_over_8888_8888_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+/******************************************************************************/
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head
+    vld4.8     {d4, d5, d6, d7}, [DST_R, :128]!
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    vld4.8     {d0, d1, d2, d3}, [SRC]!
+    cache_preload 8, 8
+    vld1.8     {d15}, [MASK]!
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    vst4.8     {d28, d29, d30, d31}, [DST_W, :128]!
+.endm
+
+.macro pixman_composite_over_8888_8_8888_init
+    vpush       {d8-d15}
+.endm
+
+.macro pixman_composite_over_8888_8_8888_cleanup
+    vpop        {d8-d15}
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_8_8888_init, \
+    pixman_composite_over_8888_8_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
+    vst3.8 {d0, d1, d2}, [DST_W]!
+    vld3.8 {d0, d1, d2}, [SRC]!
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_0888_process_pixblock_head, \
+    pixman_composite_src_0888_0888_process_pixblock_tail, \
+    pixman_composite_src_0888_0888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_head
+    vswp   d0, d2
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head
+    vst4.8 {d0, d1, d2, d3}, [DST_W]!
+    vld3.8 {d0, d1, d2}, [SRC]!
+    vswp   d0, d2
+    cache_preload 8, 8
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_init
+    veor   d3, d3, d3
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    pixman_composite_src_0888_8888_rev_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_head, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_tail, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_head
+    vshll.u8    q8, d1, #8
+    vshll.u8    q9, d2, #8
+.endm
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail
+    vshll.u8    q14, d0, #8
+    vsri.u16    q14, q8, #5
+    vsri.u16    q14, q9, #11
+.endm
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head
+        vshll.u8    q14, d0, #8
+    vld3.8 {d0, d1, d2}, [SRC]!
+        vsri.u16    q14, q8, #5
+        vsri.u16    q14, q9, #11
+    vshll.u8    q8, d1, #8
+        vst1.16 {d28, d29}, [DST_W, :128]!
+    vshll.u8    q9, d2, #8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_head, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_tail, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_head
+    vmull.u8    q8, d3, d0
+    vmull.u8    q9, d3, d1
+    vmull.u8    q10, d3, d2
+.endm
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail
+    vrshr.u16   q11, q8, #8
+    vswp        d3, d31
+    vrshr.u16   q12, q9, #8
+    vrshr.u16   q13, q10, #8
+    vraddhn.u16 d30, q11, q8
+    vraddhn.u16 d29, q12, q9
+    vraddhn.u16 d28, q13, q10
+.endm
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head
+        vrshr.u16   q11, q8, #8
+        vswp        d3, d31
+        vrshr.u16   q12, q9, #8
+        vrshr.u16   q13, q10, #8
+    vld4.8 {d0, d1, d2, d3}, [SRC]!
+        vraddhn.u16 d30, q11, q8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+                                    PF addne PF_X, PF_X, #8
+                                    PF subne PF_CTL, PF_CTL, #1
+        vraddhn.u16 d29, q12, q9
+        vraddhn.u16 d28, q13, q10
+    vmull.u8    q8, d3, d0
+    vmull.u8    q9, d3, d1
+    vmull.u8    q10, d3, d2
+        vst4.8 {d28, d29, d30, d31}, [DST_W, :128]!
+                                    PF cmp PF_X, ORIG_W
+                                    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+                                    PF subge PF_X, PF_X, ORIG_W
+                                    PF subges PF_CTL, PF_CTL, #0x10
+                                    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+.endm
+
+generate_composite_function \
+    pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_head, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_tail, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
new file mode 100644
--- /dev/null
+++ b/gfx/cairo/libpixman/src/pixman-arm-neon-asm.h
@@ -0,0 +1,906 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka@nokia.com)
+ */
+
+/*
+ * This file contains a macro ('generate_composite_function') which can
+ * construct 2D image processing functions, based on a common template.
+ * Any combinations of source, destination and mask images with 8bpp,
+ * 16bpp, 24bpp, 32bpp color formats are supported.
+ *
+ * This macro takes care of:
+ *  - handling of leading and trailing unaligned pixels
+ *  - doing most of the work related to L2 cache preload
+ *  - encourages the use of software pipelining for better instructions
+ *    scheduling
+ *
+ * The user of this macro has to provide some configuration parameters
+ * (bit depths for the images, prefetch distance, etc.) and a set of
+ * macros, which should implement basic code chunks responsible for
+ * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage
+ * examples.
+ *
+ * TODO:
+ *  - try overlapped pixel method (from Ian Rickards) when processing
+ *    exactly two blocks of pixels
+ *  - maybe add an option to do reverse scanline processing
+ */
+
+/*
+ * Bit flags for 'generate_composite_function' macro which are used
+ * to tune generated functions behavior.
+ */
+.set FLAG_DST_WRITEONLY,       0
+.set FLAG_DST_READWRITE,       1
+.set FLAG_DEINTERLEAVE_32BPP,  2
+
+/*
+ * Offset in stack where mask and source pointer/stride can be accessed
+ * from 'init' macro. This is useful for doing special handling for solid mask.
+ */
+.set ARGS_STACK_OFFSET,        40
+
+/*
+ * Constants for selecting preferable prefetch type.
+ */
+.set PREFETCH_TYPE_NONE,       0 /* No prefetch at all */
+.set PREFETCH_TYPE_SIMPLE,     1 /* A simple, fixed-distance-ahead prefetch */
+.set PREFETCH_TYPE_ADVANCED,   2 /* Advanced fine-grained prefetch */
+
+/*
+ * Definitions of supplementary pixld/pixst macros (for partial load/store of
+ * pixel data).
+ */
+
+.macro pixldst1 op, elem_size, reg1, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1, d&reg2}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
+.if abits > 0
+    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&, :&abits&]!
+.else
+    op&.&elem_size {d&reg1, d&reg2, d&reg3, d&reg4}, [&mem_operand&]!
+.endif
+.endm
+
+.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits
+    op&.&elem_size {d&reg1[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
+    op&.&elem_size {d&reg1, d&reg2, d&reg3}, [&mem_operand&]!
+.endm
+
+.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
+    op&.&elem_size {d&reg1[idx], d&reg2[idx], d&reg3[idx]}, [&mem_operand&]!
+.endm
+
+.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
+.if numbytes == 32
+    pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \
+                              %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif numbytes == 16
+    pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits
+.elseif numbytes == 8
+    pixldst1 op, elem_size, %(basereg+1), mem_operand, abits
+.elseif numbytes == 4
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
+        pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits
+    .elseif elem_size == 16
+        pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits
+        pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits
+    .else
+        pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits
+    .endif
+.elseif numbytes == 2
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
+        pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits
+    .else
+        pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits
+        pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits
+    .endif
+.elseif numbytes == 1
+    pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits
+.else
+    .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+    pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixst numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+    pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixld_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixld numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro pixst_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixst numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro vuzp8 reg1, reg2
+    vuzp.8 d&reg1, d&reg2
+.endm
+
+.macro vzip8 reg1, reg2
+    vzip.8 d&reg1, d&reg2
+.endm
+
+/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixdeinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vuzp8 %(basereg+0), %(basereg+1)
+    vuzp8 %(basereg+2), %(basereg+3)
+    vuzp8 %(basereg+1), %(basereg+3)
+    vuzp8 %(basereg+0), %(basereg+2)
+.endif
+.endm
+
+/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vzip8 %(basereg+0), %(basereg+2)
+    vzip8 %(basereg+1), %(basereg+3)
+    vzip8 %(basereg+2), %(basereg+3)
+    vzip8 %(basereg+0), %(basereg+1)
+.endif
+.endm
+
+/*
+ * This is a macro for implementing cache preload. The main idea is that
+ * cache preload logic is mostly independent from the rest of pixels
+ * processing code. It starts at the top left pixel and moves forward
+ * across pixels and can jump across scanlines. Prefetch distance is
+ * handled in an 'incremental' way: it starts from 0 and advances to the
+ * optimal distance over time. After reaching optimal prefetch distance,
+ * it is kept constant. There are some checks which prevent prefetching
+ * unneeded pixel lines below the image (but it still can prefetch a bit
+ * more data on the right side of the image - not a big issue and may
+ * be actually helpful when rendering text glyphs). Additional trick is
+ * the use of LDR instruction for prefetch instead of PLD when moving to
+ * the next line, the point is that we have a high chance of getting TLB
+ * miss in this case, and PLD would be useless.
+ *
+ * This sounds like it may introduce a noticeable overhead (when working with
+ * fully cached data). But in reality, due to having a separate pipeline and
+ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
+ * execute simultaneously with NEON and be completely shadowed by it. Thus
+ * we get no performance overhead at all (*). This looks like a very nice
+ * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
+ * but still can implement some rather advanced prefetch logic in sofware
+ * for almost zero cost!
+ *
+ * (*) The overhead of the prefetcher is visible when running some trivial
+ * pixels processing like simple copy. Anyway, having prefetch is a must
+ * when working with the graphics data.
+ */
+.macro PF a, x:vararg
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
+    a x
+.endif
+.endm
+
+.macro cache_preload std_increment, boost_increment
+.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
+.if regs_shortage
+    PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */
+.endif
+.if std_increment != 0
+    PF add PF_X, PF_X, #std_increment
+.endif
+    PF tst PF_CTL, #0xF
+    PF addne PF_X, PF_X, #boost_increment
+    PF subne PF_CTL, PF_CTL, #1
+    PF cmp PF_X, ORIG_W
+.if src_bpp_shift >= 0
+    PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift]
+.endif
+.if dst_r_bpp != 0
+    PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift]
+.endif
+.if mask_bpp_shift >= 0
+    PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift]
+.endif
+    PF subge PF_X, PF_X, ORIG_W
+    PF subges PF_CTL, PF_CTL, #0x10
+.if src_bpp_shift >= 0
+    PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]!
+.endif
+.if dst_r_bpp != 0
+    PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]!
+.endif
+.if mask_bpp_shift >= 0
+    PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]!
+.endif
+.endif
+.endm
+
+.macro cache_preload_simple
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
+.if src_bpp > 0
+    pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
+.endif
+.if dst_r_bpp > 0
+    pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
+.endif
+.if mask_bpp > 0
+    pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
+.endif
+.endif
+.endm
+
+/*
+ * Macro which is used to process leading pixels until destination
+ * pointer is properly aligned (at 16 bytes boundary). When destination
+ * buffer uses 16bpp format, this is unnecessary, or even pointless.
+ */
+.macro ensure_destination_ptr_alignment process_pixblock_head, \
+                                        process_pixblock_tail, \
+                                        process_pixblock_tail_head
+.if dst_w_bpp != 24
+    tst         DST_R, #0xF
+    beq         2f
+
+.irp lowbit, 1, 2, 4, 8, 16
+local skip1
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_R, #lowbit
+    beq         1f
+.endif
+    pixld       (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
+    pixld       (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
+.if dst_r_bpp > 0
+    pixld_a     (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
+.else
+    add         DST_R, DST_R, #lowbit
+.endif
+    PF add      PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
+    sub         W, W, #(lowbit * 8 / dst_w_bpp)
+1:
+.endif
+.endr
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    process_pixblock_tail
+
+    pixinterleave dst_w_bpp, dst_w_basereg
+.irp lowbit, 1, 2, 4, 8, 16
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_W, #lowbit
+    beq         1f
+.endif
+    pixst_a     (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
+1:
+.endif
+.endr
+.endif
+2:
+.endm
+
+/*
+ * Special code for processing up to (pixblock_size - 1) remaining
+ * trailing pixels. As SIMD processing performs operation on
+ * pixblock_size pixels, anything smaller than this has to be loaded
+ * and stored in a special way. Loading and storing of pixel data is
+ * performed in such a way that we fill some 'slots' in the NEON
+ * registers (some slots naturally are unused), then perform compositing
+ * operation as usual. In the end, the data is taken from these 'slots'
+ * and saved to memory.
+ *
+ * cache_preload_flag - allows to suppress prefetch if
+ *                      set to 0
+ * dst_aligned_flag   - selects whether destination buffer
+ *                      is aligned
+ */
+.macro process_trailing_pixels cache_preload_flag, \
+                               dst_aligned_flag, \
+                               process_pixblock_head, \
+                               process_pixblock_tail, \
+                               process_pixblock_tail_head
+    tst         W, #(pixblock_size - 1)
+    beq         2f
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         1f
+    pixld       chunk_size, src_bpp, src_basereg, SRC
+    pixld       chunk_size, mask_bpp, mask_basereg, MASK
+.if dst_aligned_flag != 0
+    pixld_a     chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.else
+    pixld       chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.endif
+.if cache_preload_flag != 0
+    PF add      PF_X, PF_X, #chunk_size
+.endif
+1:
+.endif
+.endr
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+.if cache_preload_flag != 0
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+.endif
+    process_pixblock_tail
+    pixinterleave dst_w_bpp, dst_w_basereg
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         1f
+.if dst_aligned_flag != 0
+    pixst_a     chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.else
+    pixst       chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.endif
+1:
+.endif
+.endr
+2:
+.endm
+
+/*
+ * Macro, which performs all the needed operations to switch to the next
+ * scanline and start the next loop iteration unless all the scanlines
+ * are already processed.
+ */
+.macro advance_to_next_scanline start_of_loop_label
+.if regs_shortage
+    ldrd        W, [sp] /* load W and H (width and height) from stack */
+.else
+    mov         W, ORIG_W
+.endif
+    add         DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
+.if src_bpp != 0
+    add         SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
+.endif
+.if mask_bpp != 0
+    add         MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
+.endif
+.if (dst_w_bpp != 24)
+    sub         DST_W, DST_W, W, lsl #dst_bpp_shift
+.endif
+.if (src_bpp != 24) && (src_bpp != 0)
+    sub         SRC, SRC, W, lsl #src_bpp_shift
+.endif
+.if (mask_bpp != 24) && (mask_bpp != 0)
+    sub         MASK, MASK, W, lsl #mask_bpp_shift
+.endif
+    subs        H, H, #1
+    mov         DST_R, DST_W
+.if regs_shortage
+    str         H, [sp, #4] /* save updated height to stack */
+.endif
+    bge         start_of_loop_label
+.endm
+
+/*
+ * Registers are allocated in the following way by default:
+ * d0, d1, d2, d3     - reserved for loading source pixel data
+ * d4, d5, d6, d7     - reserved for loading destination pixel data
+ * d24, d25, d26, d27 - reserved for loading mask pixel data
+ * d28, d29, d30, d31 - final destination pixel data for writeback to memory
+ */
+.macro generate_composite_function fname, \
+                                   src_bpp_, \
+                                   mask_bpp_, \
+                                   dst_w_bpp_, \
+                                   flags, \
+                                   pixblock_size_, \
+                                   prefetch_distance, \
+                                   init, \
+                                   cleanup, \
+                                   process_pixblock_head, \
+                                   process_pixblock_tail, \
+                                   process_pixblock_tail_head, \
+                                   dst_w_basereg_ = 28, \
+                                   dst_r_basereg_ = 4, \
+                                   src_basereg_   = 0, \
+                                   mask_basereg_  = 24
+
+    .func fname
+    .global fname
+    /* For ELF format also set function visibility to hidden */
+#ifdef __ELF__
+    .hidden fname
+    .type fname, %function
+#endif
+fname:
+    push        {r4-r12, lr}        /* save all registers */
+
+/*
+ * Select prefetch type for this function. If prefetch distance is
+ * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
+ * has to be used instead of ADVANCED.
+ */
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
+.if prefetch_distance == 0
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
+        ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
+.endif
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+    .set src_bpp, src_bpp_
+    .set mask_bpp, mask_bpp_
+    .set dst_w_bpp, dst_w_bpp_
+    .set pixblock_size, pixblock_size_
+    .set dst_w_basereg, dst_w_basereg_
+    .set dst_r_basereg, dst_r_basereg_
+    .set src_basereg, src_basereg_
+    .set mask_basereg, mask_basereg_
+
+/*
+ * Assign symbolic names to registers
+ */
+    W           .req        r0      /* width (is updated during processing) */
+    H           .req        r1      /* height (is updated during processing) */
+    DST_W       .req        r2      /* destination buffer pointer for writes */
+    DST_STRIDE  .req        r3      /* destination image stride */
+    SRC         .req        r4      /* source buffer pointer */
+    SRC_STRIDE  .req        r5      /* source image stride */
+    DST_R       .req        r6      /* destination buffer pointer for reads */
+
+    MASK        .req        r7      /* mask pointer */
+    MASK_STRIDE .req        r8      /* mask stride */
+
+    PF_CTL      .req        r9      /* combined lines counter and prefetch */
+                                    /* distance increment counter */
+    PF_X        .req        r10     /* pixel index in a scanline for current */
+                                    /* pretetch position */
+    PF_SRC      .req        r11     /* pointer to source scanline start */
+                                    /* for prefetch purposes */
+    PF_DST      .req        r12     /* pointer to destination scanline start */
+                                    /* for prefetch purposes */
+    PF_MASK     .req        r14     /* pointer to mask scanline start */
+                                    /* for prefetch purposes */
+/*
+ * Check whether we have enough registers for all the local variables.
+ * If we don't have enough registers, original width and height are
+ * kept on top of stack (and 'regs_shortage' variable is set to indicate
+ * this for the rest of code). Even if there are enough registers, the
+ * allocation scheme may be a bit different depending on whether source
+ * or mask is not used.
+ */
+.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED)
+    ORIG_W      .req        r10     /* saved original width */
+    DUMMY       .req        r12     /* temporary register */
+    .set        regs_shortage, 0
+.elseif mask_bpp == 0
+    ORIG_W      .req        r7      /* saved original width */
+    DUMMY       .req        r8      /* temporary register */
+    .set        regs_shortage, 0
+.elseif src_bpp == 0
+    ORIG_W      .req        r4      /* saved original width */
+    DUMMY       .req        r5      /* temporary register */
+    .set        regs_shortage, 0
+.else
+    ORIG_W      .req        r1      /* saved original width */
+    DUMMY       .req        r1      /* temporary register */
+    .set        regs_shortage, 1
+.endif
+
+    .set mask_bpp_shift, -1
+.if src_bpp == 32
+    .set src_bpp_shift, 2
+.elseif src_bpp == 24
+    .set src_bpp_shift, 0
+.elseif src_bpp == 16
+    .set src_bpp_shift, 1
+.elseif src_bpp == 8
+    .set src_bpp_shift, 0
+.elseif src_bpp == 0
+    .set src_bpp_shift, -1
+.else
+    .error "requested src bpp (src_bpp) is not supported"
+.endif
+.if mask_bpp == 32
+    .set mask_bpp_shift, 2
+.elseif mask_bpp == 24
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 8
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 0
+    .set mask_bpp_shift, -1
+.else
+    .error "requested mask bpp (mask_bpp) is not supported"
+.endif
+.if dst_w_bpp == 32
+    .set dst_bpp_shift, 2
+.elseif dst_w_bpp == 24
+    .set dst_bpp_shift, 0
+.elseif dst_w_bpp == 16
+    .set dst_bpp_shift, 1
+.elseif dst_w_bpp == 8
+    .set dst_bpp_shift, 0
+.else
+    .error "requested dst bpp (dst_w_bpp) is not supported"
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+    .set dst_r_bpp, dst_w_bpp
+.else
+    .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+    .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+    .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+.if prefetch_distance < 0 || prefetch_distance > 15
+    .error "invalid prefetch distance (prefetch_distance)"
+.endif
+
+.if src_bpp > 0
+    ldr         SRC, [sp, #40]
+.endif
+.if mask_bpp > 0
+    ldr         MASK, [sp, #48]
+.endif
+    PF mov      PF_X, #0
+.if src_bpp > 0
+    ldr         SRC_STRIDE, [sp, #44]
+.endif
+.if mask_bpp > 0
+    ldr         MASK_STRIDE, [sp, #52]
+.endif
+    mov         DST_R, DST_W
+
+.if src_bpp == 24
+    sub         SRC_STRIDE, SRC_STRIDE, W
+    sub         SRC_STRIDE, SRC_STRIDE, W, lsl #1
+.endif
+.if mask_bpp == 24
+    sub         MASK_STRIDE, MASK_STRIDE, W
+    sub         MASK_STRIDE, MASK_STRIDE, W, lsl #1
+.endif
+.if dst_w_bpp == 24
+    sub         DST_STRIDE, DST_STRIDE, W
+    sub         DST_STRIDE, DST_STRIDE, W, lsl #1
+.endif
+
+/*
+ * Setup advanced prefetcher initial state
+ */
+    PF mov      PF_SRC, SRC
+    PF mov      PF_DST, DST_R
+    PF mov      PF_MASK, MASK
+    /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
+    PF mov      PF_CTL, H, lsl #4
+    PF add      PF_CTL, #(prefetch_distance - 0x10)
+
+    init
+.if regs_shortage
+    push        {r0, r1}
+.endif
+    subs        H, H, #1
+.if regs_shortage
+    str         H, [sp, #4] /* save updated height to stack */
+.else
+    mov         ORIG_W, W
+.endif
+    blt         9f
+    cmp         W, #(pixblock_size * 2)
+    blt         8f
+/*
+ * This is the start of the pipelined loop, which if optimized for
+ * long scanlines
+ */
+0:
+    ensure_destination_ptr_alignment process_pixblock_head, \
+                                     process_pixblock_tail, \
+                                     process_pixblock_tail_head
+
+    /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+    pixld_a     pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    pixld       pixblock_size, src_bpp, \
+                (src_basereg - pixblock_size * src_bpp / 64), SRC
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    PF add      PF_X, PF_X, #pixblock_size
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    subs        W, W, #(pixblock_size * 2)
+    blt         2f
+1:
+    process_pixblock_tail_head
+    cache_preload_simple
+    subs        W, W, #pixblock_size
+    bge         1b
+2:
+    process_pixblock_tail
+    pixst_a     pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 1, 1, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 0b
+
+.if regs_shortage
+    pop         {r0, r1}
+.endif
+    cleanup
+    pop         {r4-r12, pc}  /* exit */
+/*
+ * This is the start of the loop, designed to process images with small width
+ * (less than pixblock_size * 2 pixels). In this case neither pipelining
+ * nor prefetch are used.
+ */
+8:
+    /* Process exactly pixblock_size pixels if needed */
+    tst         W, #pixblock_size
+    beq         1f
+    pixld       pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    pixld       pixblock_size, src_bpp, \
+                (src_basereg - pixblock_size * src_bpp / 64), SRC
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    process_pixblock_head
+    process_pixblock_tail
+    pixst       pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+1:
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 0, 0, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 8b
+9:
+.if regs_shortage
+    pop         {r0, r1}
+.endif
+    cleanup
+    pop         {r4-r12, pc}  /* exit */
+
+    .unreq      SRC
+    .unreq      MASK
+    .unreq      DST_R
+    .unreq      DST_W
+    .unreq      ORIG_W
+    .unreq      W
+    .unreq      H
+    .unreq      SRC_STRIDE
+    .unreq      DST_STRIDE
+    .unreq      MASK_STRIDE
+    .unreq      PF_CTL
+    .unreq      PF_X
+    .unreq      PF_SRC
+    .unreq      PF_DST
+    .unreq      PF_MASK
+    .unreq      DUMMY
+    .endfunc
+.endm
+
+/*
+ * A simplified variant of function generation template for a single
+ * scanline processing (for implementing pixman combine functions)
+ */
+.macro generate_composite_function_single_scanline fname, \
+                                                   src_bpp_, \
+                                                   mask_bpp_, \
+                                                   dst_w_bpp_, \
+                                                   flags, \
+                                                   pixblock_size_, \
+                                                   init, \
+                                                   cleanup, \
+                                                   process_pixblock_head, \
+                                                   process_pixblock_tail, \
+                                                   process_pixblock_tail_head, \
+                                                   dst_w_basereg_ = 28, \
+                                                   dst_r_basereg_ = 4, \
+                                                   src_basereg_   = 0, \
+                                                   mask_basereg_  = 24
+
+    .func fname
+    .global fname
+    /* For ELF format also set function visibility to hidden */
+#ifdef __ELF__
+    .hidden fname
+    .type fname, %function
+#endif
+fname:
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+    .set src_bpp, src_bpp_
+    .set mask_bpp, mask_bpp_
+    .set dst_w_bpp, dst_w_bpp_
+    .set pixblock_size, pixblock_size_
+    .set dst_w_basereg, dst_w_basereg_
+    .set dst_r_basereg, dst_r_basereg_
+    .set src_basereg, src_basereg_
+    .set mask_basereg, mask_basereg_
+/*
+ * Assign symbolic names to registers
+ */
+    W           .req        r0      /* width (is updated during processing) */
+    DST_W       .req        r1      /* destination buffer pointer for writes */
+    SRC         .req        r2      /* source buffer pointer */
+    DST_R       .req        ip      /* destination buffer pointer for reads */
+    MASK        .req        r3      /* mask pointer */
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+    .set dst_r_bpp, dst_w_bpp
+.else
+    .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+    .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+    .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+    init
+    mov         DST_R, DST_W
+
+    cmp         W, #pixblock_size
+    blt         8f
+
+    ensure_destination_ptr_alignment process_pixblock_head, \
+                                     process_pixblock_tail, \
+                                     process_pixblock_tail_head
+
+    subs        W, W, #pixblock_size
+    blt         7f
+
+    /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+    pixld_a     pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    pixld       pixblock_size, src_bpp, \
+                (src_basereg - pixblock_size * src_bpp / 64), SRC
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    process_pixblock_head
+    subs        W, W, #pixblock_size
+    blt         2f
+1:
+    process_pixblock_tail_head
+    subs        W, W, #pixblock_size
+    bge         1b
+2:
+    process_pixblock_tail
+    pixst_a     pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+7:
+    /* Process the remaining trailing pixels in the scanline (dst aligned) */
+    process_trailing_pixels 0, 1, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+
+    cleanup
+    bx         lr  /* exit */
+8:
+    /* Process the remaining trailing pixels in the scanline (dst unaligned) */
+    process_trailing_pixels 0, 0, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+
+    cleanup
+    bx          lr  /* exit */
+
+    .unreq      SRC
+    .unreq      MASK
+    .unreq      DST_R
+    .unreq      DST_W
+    .unreq      W
+    .endfunc
+.endm
+
+.macro default_init
+.endm
+
+.macro default_cleanup
+.endm
--- a/gfx/cairo/libpixman/src/pixman-arm-neon.c
+++ b/gfx/cairo/libpixman/src/pixman-arm-neon.c
@@ -1,1387 +1,563 @@
 /*
- * Copyright © 2009 Mozilla Corporation
+ * Copyright © 2009 ARM Ltd, Movial Creative Technologies Oy
  *
  * Permission to use, copy, modify, distribute, and sell this software and its
  * documentation for any purpose is hereby granted without fee, provided that
  * the above copyright notice appear in all copies and that both that
  * copyright notice and this permission notice appear in supporting
- * documentation, and that the name of Mozilla Corporation not be used in
+ * documentation, and that the name of ARM Ltd not be used in
  * advertising or publicity pertaining to distribution of the software without
- * specific, written prior permission.  Mozilla Corporation makes no
+ * specific, written prior permission.  ARM Ltd makes no
  * representations about the suitability of this software for any purpose.  It
  * is provided "as is" without express or implied warranty.
  *
  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
  * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
  * SOFTWARE.
  *
- * Author:  Ian Rickards (ian.rickards@arm.com) 
+ * Author:  Ian Rickards (ian.rickards@arm.com)
+ * Author:  Jonathan Morton (jonathan.morton@movial.com)
+ * Author:  Markku Vire (markku.vire@movial.com)
  *
  */
 
 #ifdef HAVE_CONFIG_H
 #include <config.h>
 #endif
 
-#include "pixman-arm-neon.h"
-
-#include <arm_neon.h>
-
-
-#if !defined(__ARMCC_VERSION) && !defined(FORCE_NO_NEON_INLINE_ASM)
-// [both armcc & gcc set __GNUC__]
-// Use GNU style inline asm on gcc, for best performance
-// Use intrinsics on armcc
-// This switch determines if any GNU style inline asm is allowed
-#define USE_NEON_INLINE_ASM
-#endif
-
-
-static force_inline uint8x8x4_t unpack0565(uint16x8_t rgb)
-{
-    uint16x8_t gb, b;
-    uint8x8x4_t res;
-
-    res.val[3] = vdup_n_u8(0);
-    gb = vshrq_n_u16(rgb, 5);
-    b = vshrq_n_u16(rgb, 5+6);
-    res.val[0] = vmovn_u16(rgb);  // get low 5 bits
-    res.val[1] = vmovn_u16(gb);   // get mid 6 bits
-    res.val[2] = vmovn_u16(b);    // get top 5 bits
-
-    res.val[0] = vshl_n_u8(res.val[0], 3); // shift to top
-    res.val[1] = vshl_n_u8(res.val[1], 2); // shift to top
-    res.val[2] = vshl_n_u8(res.val[2], 3); // shift to top
-
-    res.val[0] = vsri_n_u8(res.val[0], res.val[0], 5); 
-    res.val[1] = vsri_n_u8(res.val[1], res.val[1], 6);
-    res.val[2] = vsri_n_u8(res.val[2], res.val[2], 5);
-
-    return res;
-}
-
-static force_inline uint16x8_t pack0565(uint8x8x4_t s)
-{
-    uint16x8_t rgb, val_g, val_r;
-
-    rgb = vshll_n_u8(s.val[2],8);
-    val_g = vshll_n_u8(s.val[1],8);
-    val_r = vshll_n_u8(s.val[0],8);
-    rgb = vsriq_n_u16(rgb, val_g, 5);
-    rgb = vsriq_n_u16(rgb, val_r, 5+6);
+#include <string.h>
+#include "pixman-private.h"
 
-    return rgb;
-}
-
-static force_inline uint8x8_t neon2mul(uint8x8_t x, uint8x8_t alpha)
-{
-    uint16x8_t tmp,tmp2;
-    uint8x8_t res;
-
-    tmp = vmull_u8(x,alpha);
-    tmp2 = vrshrq_n_u16(tmp,8);
-    res = vraddhn_u16(tmp,tmp2);
-
-    return res;
-}
-
-static force_inline uint8x8x4_t neon8mul(uint8x8x4_t x, uint8x8_t alpha)
-{
-    uint16x8x4_t tmp;
-    uint8x8x4_t res;
-    uint16x8_t qtmp1,qtmp2;
-
-    tmp.val[0] = vmull_u8(x.val[0],alpha);
-    tmp.val[1] = vmull_u8(x.val[1],alpha);
-    tmp.val[2] = vmull_u8(x.val[2],alpha);
-    tmp.val[3] = vmull_u8(x.val[3],alpha);
-
-    qtmp1 = vrshrq_n_u16(tmp.val[0],8);
-    qtmp2 = vrshrq_n_u16(tmp.val[1],8);
-    res.val[0] = vraddhn_u16(tmp.val[0],qtmp1);
-    qtmp1 = vrshrq_n_u16(tmp.val[2],8);
-    res.val[1] = vraddhn_u16(tmp.val[1],qtmp2);
-    qtmp2 = vrshrq_n_u16(tmp.val[3],8);
-    res.val[2] = vraddhn_u16(tmp.val[2],qtmp1);
-    res.val[3] = vraddhn_u16(tmp.val[3],qtmp2);
-
-    return res;
-}
-
-static force_inline uint8x8x4_t neon8qadd(uint8x8x4_t x, uint8x8x4_t y)
-{
-    uint8x8x4_t res;
-
-    res.val[0] = vqadd_u8(x.val[0],y.val[0]);
-    res.val[1] = vqadd_u8(x.val[1],y.val[1]);
-    res.val[2] = vqadd_u8(x.val[2],y.val[2]);
-    res.val[3] = vqadd_u8(x.val[3],y.val[3]);
-
-    return res;
+#define BIND_SRC_NULL_DST(name, src_type, src_cnt, dst_type, dst_cnt)   \
+void                                                                    \
+pixman_composite_##name##_asm_neon (int32_t   w,                        \
+                                    int32_t   h,                        \
+                                    dst_type *dst,                      \
+                                    int32_t   dst_stride,               \
+                                    src_type *src,                      \
+                                    int32_t   src_stride);              \
+                                                                        \
+static void                                                             \
+neon_composite_##name (pixman_implementation_t *imp,                    \
+                       pixman_op_t              op,                     \
+                       pixman_image_t *         src_image,              \
+                       pixman_image_t *         mask_image,             \
+                       pixman_image_t *         dst_image,              \
+                       int32_t                  src_x,                  \
+                       int32_t                  src_y,                  \
+                       int32_t                  mask_x,                 \
+                       int32_t                  mask_y,                 \
+                       int32_t                  dest_x,                 \
+                       int32_t                  dest_y,                 \
+                       int32_t                  width,                  \
+                       int32_t                  height)                 \
+{                                                                       \
+    dst_type *dst_line;                                                 \
+    src_type *src_line;                                                 \
+    int32_t dst_stride, src_stride;                                     \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+                                                                        \
+    pixman_composite_##name##_asm_neon (width, height,                  \
+                                        dst_line, dst_stride,           \
+                                        src_line, src_stride);          \
 }
 
-
-void
-fbCompositeSrcAdd_8000x8000neon (pixman_op_t op,
-                                pixman_image_t * pSrc,
-                                pixman_image_t * pMask,
-                                pixman_image_t * pDst,
-                                int16_t      xSrc,
-                                int16_t      ySrc,
-                                int16_t      xMask,
-                                int16_t      yMask,
-                                int16_t      xDst,
-                                int16_t      yDst,
-                                uint16_t     width,
-                                uint16_t     height)
-{
-    uint8_t     *dstLine, *dst;
-    uint8_t     *srcLine, *src;
-    int dstStride, srcStride;
-    uint16_t    w;
-
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
-
-            uint8_t *keep_dst;
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8_t sval,dval,temp;
-
-            sval = vld1_u8((void*)src);
-            dval = vld1_u8((void*)dst);
-            keep_dst = dst;
-
-            temp = vqadd_u8(dval,sval);
-
-            src += (w & 7);
-            dst += (w & 7);
-            w -= (w & 7);
-
-            while (w)
-            {
-                sval = vld1_u8((void*)src);
-                dval = vld1_u8((void*)dst);
-
-                vst1_u8((void*)keep_dst,temp);
-                keep_dst = dst;
-
-                temp = vqadd_u8(dval,sval);
-
-                src+=8;
-                dst+=8;
-                w-=8;
-            }
-            vst1_u8((void*)keep_dst,temp);
-#else
-            asm volatile (
-// avoid using d8-d15 (q4-q7) aapcs callee-save registers
-                        "vld1.8  {d0}, [%[src]]\n\t"
-                        "vld1.8  {d4}, [%[dst]]\n\t"
-                        "mov     %[keep_dst], %[dst]\n\t"
-
-                        "and ip, %[w], #7\n\t"
-                        "add %[src], %[src], ip\n\t"
-                        "add %[dst], %[dst], ip\n\t"
-                        "subs %[w], %[w], ip\n\t"
-                        "b 9f\n\t"
-// LOOP
-                        "2:\n\t"
-                        "vld1.8  {d0}, [%[src]]!\n\t"
-                        "vld1.8  {d4}, [%[dst]]!\n\t"
-                        "vst1.8  {d20}, [%[keep_dst]]\n\t"
-                        "sub     %[keep_dst], %[dst], #8\n\t"
-                        "subs %[w], %[w], #8\n\t"
-                        "9:\n\t"
-                        "vqadd.u8 d20, d0, d4\n\t"
-
-                        "bne 2b\n\t"
-
-                        "1:\n\t"
-                        "vst1.8  {d20}, [%[keep_dst]]\n\t"
-
-                        : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "+r" (keep_dst)
-                        :
-                        : "ip", "cc", "memory", "d0","d4",
-                          "d20"
-                        );
-#endif
-        }
-    }
-    else
-    {
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
-            uint8x8_t sval, dval;
-            uint8_t *dst4, *dst2;
-
-            if (w&4)
-            {
-                sval = vreinterpret_u8_u32(vld1_lane_u32((void*)src,vreinterpret_u32_u8(sval),1));
-                dval = vreinterpret_u8_u32(vld1_lane_u32((void*)dst,vreinterpret_u32_u8(dval),1));
-                dst4=dst;
-                src+=4;
-                dst+=4;
-            }
-            if (w&2)
-            {
-                sval = vreinterpret_u8_u16(vld1_lane_u16((void*)src,vreinterpret_u16_u8(sval),1));
-                dval = vreinterpret_u8_u16(vld1_lane_u16((void*)dst,vreinterpret_u16_u8(dval),1));
-                dst2=dst;
-                src+=2;
-                dst+=2;
-            }
-            if (w&1)
-            {
-                sval = vld1_lane_u8((void*)src,sval,1);
-                dval = vld1_lane_u8((void*)dst,dval,1);
-            }
-
-            dval = vqadd_u8(dval,sval);
-
-            if (w&1)
-                vst1_lane_u8((void*)dst,dval,1);
-            if (w&2)
-                vst1_lane_u16((void*)dst2,vreinterpret_u16_u8(dval),1);
-            if (w&4)
-                vst1_lane_u32((void*)dst4,vreinterpret_u32_u8(dval),1);
-        }
-    }
+#define BIND_N_NULL_DST(name, dst_type, dst_cnt)                        \
+void                                                                    \
+pixman_composite_##name##_asm_neon (int32_t    w,                       \
+                                    int32_t    h,                       \
+                                    dst_type  *dst,                     \
+                                    int32_t    dst_stride,              \
+                                    uint32_t   src);                    \
+                                                                        \
+static void                                                             \
+neon_composite_##name (pixman_implementation_t *imp,                    \
+                       pixman_op_t              op,                     \
+                       pixman_image_t *         src_image,              \
+                       pixman_image_t *         mask_image,             \
+                       pixman_image_t *         dst_image,              \
+                       int32_t                  src_x,                  \
+                       int32_t                  src_y,                  \
+                       int32_t                  mask_x,                 \
+                       int32_t                  mask_y,                 \
+                       int32_t                  dest_x,                 \
+                       int32_t                  dest_y,                 \
+                       int32_t                  width,                  \
+                       int32_t                  height)                 \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    int32_t    dst_stride;                                              \
+    uint32_t   src;                                                     \
+                                                                        \
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);  \
+                                                                        \
+    if (src == 0)                                                       \
+	return;                                                         \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+                                                                        \
+    pixman_composite_##name##_asm_neon (width, height,                  \
+                                        dst_line, dst_stride,           \
+                                        src);                           \
 }
 
-
-void
-fbCompositeSrc_8888x8888neon (pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
-			 int16_t      xSrc,
-			 int16_t      ySrc,
-			 int16_t      xMask,
-			 int16_t      yMask,
-			 int16_t      xDst,
-			 int16_t      yDst,
-			 uint16_t     width,
-			 uint16_t     height)
-{
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
-    uint32_t	w;
-
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method  
-        while (height--)
-        {
-	    dst = dstLine;
-	    dstLine += dstStride;
-	    src = srcLine;
-	    srcLine += srcStride;
-	    w = width;
-
-            uint32_t *keep_dst;
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8x4_t sval,dval,temp;
-
-            sval = vld4_u8((void*)src);
-            dval = vld4_u8((void*)dst);
-            keep_dst = dst;
-
-            temp = neon8mul(dval,vmvn_u8(sval.val[3]));
-            temp = neon8qadd(sval,temp);
-
-            src += (w & 7);
-            dst += (w & 7);
-            w -= (w & 7);
-
-            while (w)
-            {
-                sval = vld4_u8((void*)src);
-                dval = vld4_u8((void*)dst);
-
-                vst4_u8((void*)keep_dst,temp);
-                keep_dst = dst;
-
-                temp = neon8mul(dval,vmvn_u8(sval.val[3]));
-                temp = neon8qadd(sval,temp);
-
-                src+=8;
-                dst+=8;
-                w-=8;
-            }
-            vst4_u8((void*)keep_dst,temp);
-#else
-            asm volatile (
-// avoid using d8-d15 (q4-q7) aapcs callee-save registers
-                        "vld4.8  {d0-d3}, [%[src]]\n\t"
-                        "vld4.8  {d4-d7}, [%[dst]]\n\t"
-                        "mov     %[keep_dst], %[dst]\n\t"
-
-                        "and ip, %[w], #7\n\t"
-                        "add %[src], %[src], ip, LSL#2\n\t"
-                        "add %[dst], %[dst], ip, LSL#2\n\t"
-                        "subs %[w], %[w], ip\n\t"
-                        "b 9f\n\t"
-// LOOP
-                        "2:\n\t"
-                        "vld4.8  {d0-d3}, [%[src]]!\n\t"
-                        "vld4.8  {d4-d7}, [%[dst]]!\n\t"
-                        "vst4.8  {d20-d23}, [%[keep_dst]]\n\t"
-                        "sub     %[keep_dst], %[dst], #8*4\n\t"
-                        "subs %[w], %[w], #8\n\t"
-                        "9:\n\t"
-                        "vmvn.8  d31, d3\n\t"
-                        "vmull.u8 q10, d31, d4\n\t"
-                        "vmull.u8 q11, d31, d5\n\t"
-                        "vmull.u8 q12, d31, d6\n\t"
-                        "vmull.u8 q13, d31, d7\n\t"
-                        "vrshr.u16 q8, q10, #8\n\t"
-                        "vrshr.u16 q9, q11, #8\n\t"
-                        "vraddhn.u16 d20, q10, q8\n\t"
-                        "vraddhn.u16 d21, q11, q9\n\t"
-                        "vrshr.u16 q8, q12, #8\n\t"
-                        "vrshr.u16 q9, q13, #8\n\t"
-                        "vraddhn.u16 d22, q12, q8\n\t"
-                        "vraddhn.u16 d23, q13, q9\n\t"
-// result in d20-d23
-                        "vqadd.u8 d20, d0, d20\n\t"
-                        "vqadd.u8 d21, d1, d21\n\t"
-                        "vqadd.u8 d22, d2, d22\n\t"
-                        "vqadd.u8 d23, d3, d23\n\t"
-
-                        "bne 2b\n\t"
-
-                        "1:\n\t"
-                        "vst4.8  {d20-d23}, [%[keep_dst]]\n\t"
-
-                        : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "+r" (keep_dst)
-                        : 
-                        : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-                          "d16","d17","d18","d19","d20","d21","d22","d23"
-                        );
-#endif
-        }
-    }
-    else
-    {
-        uint8x8_t    alpha_selector=vreinterpret_u8_u64(vcreate_u64(0x0707070703030303ULL));
-
-        // Handle width<8
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
-
-            while (w>=2)
-            {
-                uint8x8_t sval,dval;
-
-                /* two 32-bit pixels packed into D-reg; ad-hoc vectorization */
-                sval = vreinterpret_u8_u32(vld1_u32((void*)src));
-                dval = vreinterpret_u8_u32(vld1_u32((void*)dst));
-                dval = neon2mul(dval,vtbl1_u8(vmvn_u8(sval),alpha_selector));
-                vst1_u8((void*)dst,vqadd_u8(sval,dval));
-
-                src+=2;
-                dst+=2;
-                w-=2;
-            }
-
-            if (w)
-            {
-                uint8x8_t sval,dval;
-
-                /* single 32-bit pixel in lane 0 */
-                sval = vreinterpret_u8_u32(vld1_dup_u32((void*)src));  // only interested in lane 0
-                dval = vreinterpret_u8_u32(vld1_dup_u32((void*)dst));  // only interested in lane 0
-                dval = neon2mul(dval,vtbl1_u8(vmvn_u8(sval),alpha_selector));
-                vst1_lane_u32((void*)dst,vreinterpret_u32_u8(vqadd_u8(sval,dval)),0);
-            }
-        }
-    }
+#define BIND_N_MASK_DST(name, mask_type, mask_cnt, dst_type, dst_cnt)   \
+void                                                                    \
+pixman_composite_##name##_asm_neon (int32_t    w,                       \
+                                    int32_t    h,                       \
+                                    dst_type  *dst,                     \
+                                    int32_t    dst_stride,              \
+                                    uint32_t   src,                     \
+                                    int32_t    unused,                  \
+                                    mask_type *mask,                    \
+                                    int32_t    mask_stride);            \
+                                                                        \
+static void                                                             \
+neon_composite_##name (pixman_implementation_t *imp,                    \
+                       pixman_op_t              op,                     \
+                       pixman_image_t *         src_image,              \
+                       pixman_image_t *         mask_image,             \
+                       pixman_image_t *         dst_image,              \
+                       int32_t                  src_x,                  \
+                       int32_t                  src_y,                  \
+                       int32_t                  mask_x,                 \
+                       int32_t                  mask_y,                 \
+                       int32_t                  dest_x,                 \
+                       int32_t                  dest_y,                 \
+                       int32_t                  width,                  \
+                       int32_t                  height)                 \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    mask_type *mask_line;                                               \
+    int32_t    dst_stride, mask_stride;                                 \
+    uint32_t   src;                                                     \
+                                                                        \
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);  \
+                                                                        \
+    if (src == 0)                                                       \
+	return;                                                         \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type,       \
+                           mask_stride, mask_line, mask_cnt);           \
+                                                                        \
+    pixman_composite_##name##_asm_neon (width, height,                  \
+                                        dst_line, dst_stride,           \
+                                        src, 0,                         \
+                                        mask_line, mask_stride);        \
 }
 
-
-
-void
-fbCompositeSrc_x888x0565neon (pixman_op_t op,
-                          pixman_image_t * pSrc,
-                          pixman_image_t * pMask,
-                          pixman_image_t * pDst,
-                          int16_t      xSrc,
-                          int16_t      ySrc,
-                          int16_t      xMask,
-                          int16_t      yMask,
-                          int16_t      xDst,
-                          int16_t      yDst,
-                          uint16_t     width,
-                          uint16_t     height)
-{
-    uint16_t    *dstLine, *dst;
-    uint32_t    *srcLine, *src;
-    int dstStride, srcStride;
-    uint32_t    w;
-
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-
-    if (width>=8)
-    {
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
+#define BIND_SRC_N_DST(name, src_type, src_cnt, dst_type, dst_cnt)      \
+void                                                                    \
+pixman_composite_##name##_asm_neon (int32_t    w,                       \
+                                    int32_t    h,                       \
+                                    dst_type  *dst,                     \
+                                    int32_t    dst_stride,              \
+                                    src_type  *src,                     \
+                                    int32_t    src_stride,              \
+                                    uint32_t   mask);                   \
+                                                                        \
+static void                                                             \
+neon_composite_##name (pixman_implementation_t *imp,                    \
+                       pixman_op_t              op,                     \
+                       pixman_image_t *         src_image,              \
+                       pixman_image_t *         mask_image,             \
+                       pixman_image_t *         dst_image,              \
+                       int32_t                  src_x,                  \
+                       int32_t                  src_y,                  \
+                       int32_t                  mask_x,                 \
+                       int32_t                  mask_y,                 \
+                       int32_t                  dest_x,                 \
+                       int32_t                  dest_y,                 \
+                       int32_t                  width,                  \
+                       int32_t                  height)                 \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    src_type  *src_line;                                                \
+    int32_t    dst_stride, src_stride;                                  \
+    uint32_t   mask;                                                    \
+                                                                        \
+    mask = _pixman_image_get_solid (mask_image, dst_image->bits.format);\
+                                                                        \
+    if (mask == 0)                                                      \
+	return;                                                         \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+                                                                        \
+    pixman_composite_##name##_asm_neon (width, height,                  \
+                                        dst_line, dst_stride,           \
+                                        src_line, src_stride,           \
+                                        mask);                          \
+}
 
-	    do {
-	        while (w>=8)
-	        {
-#ifndef USE_NEON_INLINE_ASM
-	            vst1q_u16(dst, pack0565(vld4_u8((void*)src)));
-#else
-                    asm volatile (
-                        "vld4.8       {d4-d7}, [%[src]]\n\t"
-                        "vshll.u8     q0, d6, #8\n\t"
-                        "vshll.u8     q1, d5, #8\n\t"
-                        "vsriq.u16    q0, q1, #5\t\n"
-                        "vshll.u8     q1, d4, #8\n\t"
-                        "vsriq.u16    q0, q1, #11\t\n"
-                        "vst1.16      {q0}, [%[dst]]\n\t"
-                        :
-                        : [dst] "r" (dst), [src] "r" (src)
-                        : "memory", "d0","d1","d2","d3","d4","d5","d6","d7"
-                        );
-#endif
-	            src+=8;
-	            dst+=8;
-	            w-=8;
-          	}
-                if (w != 0)
-                {
-                    src -= (8-w);
-                    dst -= (8-w);
-                    w = 8;  // do another vector
-                }
-            } while (w!=0);
-        }
-    }
-    else
-    {
-        // Handle width<8
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
-
-	    while (w>=2)
-	    {
-	        uint32x2_t sval, rgb, g, b;
-	        sval = vld1_u32(src);
-	        rgb = vshr_n_u32(sval,8-5); // r (5 bits) 
-	        g = vshr_n_u32(sval,8+8-6);  // g to bottom byte
-	        rgb = vsli_n_u32(rgb, g, 5);
-	        b = vshr_n_u32(sval,8+8+8-5);  // b to bottom byte
-                rgb = vsli_n_u32(rgb, b, 11);
-	        vst1_lane_u16(dst++,vreinterpret_u16_u32(rgb),0);
-	        vst1_lane_u16(dst++,vreinterpret_u16_u32(rgb),2);
-	        src+=2;
-	        w-=2;
-	    }
-            if (w)
-            {
-                uint32x2_t sval, rgb, g, b;
-                sval = vld1_dup_u32(src);
-                rgb = vshr_n_u32(sval,8-5); // r (5 bits)
-                g = vshr_n_u32(sval,8+8-6);  // g to bottom byte
-                rgb = vsli_n_u32(rgb, g, 5);
-                b = vshr_n_u32(sval,8+8+8-5);  // b to bottom byte
-                rgb = vsli_n_u32(rgb, b, 11);
-                vst1_lane_u16(dst++,vreinterpret_u16_u32(rgb),0);
-            }
-	}
-    }
+#define BIND_SRC_MASK_DST(name, src_type, src_cnt, mask_type, mask_cnt, \
+                          dst_type, dst_cnt)                            \
+void                                                                    \
+pixman_composite_##name##_asm_neon (int32_t    w,                       \
+                                    int32_t    h,                       \
+                                    dst_type  *dst,                     \
+                                    int32_t    dst_stride,              \
+                                    src_type  *src,                     \
+                                    int32_t    src_stride,              \
+                                    mask_type *mask,                    \
+                                    int32_t    mask_stride);            \
+                                                                        \
+static void                                                             \
+neon_composite_##name (pixman_implementation_t *imp,                    \
+                       pixman_op_t              op,                     \
+                       pixman_image_t *         src_image,              \
+                       pixman_image_t *         mask_image,             \
+                       pixman_image_t *         dst_image,              \
+                       int32_t                  src_x,                  \
+                       int32_t                  src_y,                  \
+                       int32_t                  mask_x,                 \
+                       int32_t                  mask_y,                 \
+                       int32_t                  dest_x,                 \
+                       int32_t                  dest_y,                 \
+                       int32_t                  width,                  \
+                       int32_t                  height)                 \
+{                                                                       \
+    dst_type  *dst_line;                                                \
+    src_type  *src_line;                                                \
+    mask_type *mask_line;                                               \
+    int32_t    dst_stride, src_stride, mask_stride;                     \
+                                                                        \
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, dst_type,         \
+                           dst_stride, dst_line, dst_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type,           \
+                           src_stride, src_line, src_cnt);              \
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type,       \
+                           mask_stride, mask_line, mask_cnt);           \
+                                                                        \
+    pixman_composite_##name##_asm_neon (width, height,                  \
+                                        dst_line, dst_stride,           \
+                                        src_line, src_stride,           \
+                                        mask_line, mask_stride);        \
 }
 
 
-void
-fbCompositeSrc_8888x8x8888neon (pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
-			       int16_t	xSrc,
-			       int16_t	ySrc,
-			       int16_t      xMask,
-			       int16_t      yMask,
-			       int16_t      xDst,
-			       int16_t      yDst,
-			       uint16_t     width,
-			       uint16_t     height)
-{
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    uint32_t	mask;
-    int	dstStride, srcStride;
-    uint32_t	w;
-    uint8x8_t mask_alpha;
+BIND_SRC_NULL_DST(src_8888_8888, uint32_t, 1, uint32_t, 1)
+BIND_SRC_NULL_DST(src_0565_0565, uint16_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(src_0888_0888, uint8_t, 3, uint8_t, 3)
+BIND_SRC_NULL_DST(src_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(src_0565_8888, uint16_t, 1, uint32_t, 1)
+BIND_SRC_NULL_DST(src_0888_8888_rev, uint8_t, 3, uint32_t, 1)
+BIND_SRC_NULL_DST(src_0888_0565_rev, uint8_t, 3, uint16_t, 1)
+BIND_SRC_NULL_DST(src_pixbuf_8888, uint32_t, 1, uint32_t, 1)
+BIND_SRC_NULL_DST(add_8000_8000, uint8_t, 1, uint8_t, 1)
+BIND_SRC_NULL_DST(add_8888_8888, uint32_t, 1, uint32_t, 1)
+
+BIND_N_NULL_DST(over_n_0565, uint16_t, 1)
+BIND_N_NULL_DST(over_n_8888, uint32_t, 1)
 
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
+BIND_SRC_NULL_DST(over_8888_0565, uint32_t, 1, uint16_t, 1)
+BIND_SRC_NULL_DST(over_8888_8888, uint32_t, 1, uint32_t, 1)
 
-    fbComposeGetSolid (pMask, mask, pDst->bits.format);
-    mask_alpha = vdup_n_u8((mask) >> 24);
+BIND_N_MASK_DST(over_n_8_0565, uint8_t, 1, uint16_t, 1)
+BIND_N_MASK_DST(over_n_8_8888, uint8_t, 1, uint32_t, 1)
+BIND_N_MASK_DST(add_n_8_8, uint8_t, 1, uint8_t, 1)
 
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
+BIND_SRC_N_DST(over_8888_n_8888, uint32_t, 1, uint32_t, 1)
 
-            uint32_t *keep_dst;
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8x4_t sval,dval,temp;
-
-            sval = vld4_u8((void*)src);
-            dval = vld4_u8((void*)dst);
-            keep_dst = dst;
+BIND_SRC_MASK_DST(add_8_8_8, uint8_t, 1, uint8_t, 1, uint8_t, 1)
+BIND_SRC_MASK_DST(add_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1)
+BIND_SRC_MASK_DST(over_8888_8_8888, uint32_t, 1, uint8_t, 1, uint32_t, 1)
+BIND_SRC_MASK_DST(over_8888_8888_8888, uint32_t, 1, uint32_t, 1, uint32_t, 1)
 
-            sval = neon8mul(sval,mask_alpha);
-            temp = neon8mul(dval,vmvn_u8(sval.val[3]));
-            temp = neon8qadd(sval,temp);
-
-            src += (w & 7);
-            dst += (w & 7);
-            w -= (w & 7);
+void
+pixman_composite_src_n_8_asm_neon (int32_t   w,
+                                   int32_t   h,
+                                   uint8_t  *dst,
+                                   int32_t   dst_stride,
+                                   uint8_t   src);
 
-            while (w)
-            {
-                sval = vld4_u8((void*)src);
-                dval = vld4_u8((void*)dst);
-
-                vst4_u8((void*)keep_dst,temp);
-                keep_dst = dst;
-
-                sval = neon8mul(sval,mask_alpha);
-                temp = neon8mul(dval,vmvn_u8(sval.val[3]));
-                temp = neon8qadd(sval,temp);
+void
+pixman_composite_src_n_0565_asm_neon (int32_t   w,
+                                      int32_t   h,
+                                      uint16_t *dst,
+                                      int32_t   dst_stride,
+                                      uint16_t  src);
 
-                src+=8;
-                dst+=8;
-                w-=8;
-            }
-            vst4_u8((void*)keep_dst,temp);
-#else
-            asm volatile (
-// avoid using d8-d15 (q4-q7) aapcs callee-save registers
-                        "vdup.32      d30, %[mask]\n\t"
-                        "vdup.8       d30, d30[3]\n\t"
+void
+pixman_composite_src_n_8888_asm_neon (int32_t   w,
+                                      int32_t   h,
+                                      uint32_t *dst,
+                                      int32_t   dst_stride,
+                                      uint32_t  src);
 
-                        "vld4.8       {d0-d3}, [%[src]]\n\t"
-                        "vld4.8       {d4-d7}, [%[dst]]\n\t"
-                        "mov  %[keep_dst], %[dst]\n\t"
-
-                        "and  ip, %[w], #7\n\t"
-                        "add  %[src], %[src], ip, LSL#2\n\t"
-                        "add  %[dst], %[dst], ip, LSL#2\n\t"
-                        "subs  %[w], %[w], ip\n\t"
-                        "b 9f\n\t"
-// LOOP
-                        "2:\n\t"
-                        "vld4.8       {d0-d3}, [%[src]]!\n\t"
-                        "vld4.8       {d4-d7}, [%[dst]]!\n\t"
-                        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-                        "sub  %[keep_dst], %[dst], #8*4\n\t"
-                        "subs  %[w], %[w], #8\n\t"
+static pixman_bool_t
+pixman_fill_neon (uint32_t *bits,
+                  int       stride,
+                  int       bpp,
+                  int       x,
+                  int       y,
+                  int       width,
+                  int       height,
+                  uint32_t  _xor)
+{
+    /* stride is always multiple of 32bit units in pixman */
+    uint32_t byte_stride = stride * sizeof(uint32_t);
 
-                        "9:\n\t"
-                        "vmull.u8     q10, d30, d0\n\t"
-                        "vmull.u8     q11, d30, d1\n\t"
-                        "vmull.u8     q12, d30, d2\n\t"
-                        "vmull.u8     q13, d30, d3\n\t"
-                        "vrshr.u16    q8, q10, #8\n\t"
-                        "vrshr.u16    q9, q11, #8\n\t"
-                        "vraddhn.u16  d0, q10, q8\n\t"
-                        "vraddhn.u16  d1, q11, q9\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d3, q13, q9\n\t"
-                        "vraddhn.u16  d2, q12, q8\n\t"
-
-                        "vmvn.8       d31, d3\n\t"
-                        "vmull.u8     q10, d31, d4\n\t"
-                        "vmull.u8     q11, d31, d5\n\t"
-                        "vmull.u8     q12, d31, d6\n\t"
-                        "vmull.u8     q13, d31, d7\n\t"
-                        "vrshr.u16    q8, q10, #8\n\t"
-                        "vrshr.u16    q9, q11, #8\n\t"
-                        "vraddhn.u16  d20, q10, q8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d21, q11, q9\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vraddhn.u16  d22, q12, q8\n\t"
-                        "vraddhn.u16  d23, q13, q9\n\t"
-// result in d20-d23
-                        "vqadd.u8     d20, d0, d20\n\t"
-                        "vqadd.u8     d21, d1, d21\n\t"
-                        "vqadd.u8     d22, d2, d22\n\t"
-                        "vqadd.u8     d23, d3, d23\n\t"
-
-                        "bne  2b\n\t"
-
-                        "1:\n\t"
-                        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-
-                        : [w] "+r" (w), [src] "+r" (src), [dst] "+r" (dst), [keep_dst] "+r" (keep_dst)
-                        : [mask] "r" (mask)
-                        : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-                          "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27",
-                          "d30","d31"
-                        );
-#endif
-        }
+    switch (bpp)
+    {
+    case 8:
+	pixman_composite_src_n_8_asm_neon (
+		width,
+		height,
+		(uint8_t *)(((char *) bits) + y * byte_stride + x),
+		byte_stride,
+		_xor & 0xff);
+	return TRUE;
+    case 16:
+	pixman_composite_src_n_0565_asm_neon (
+		width,
+		height,
+		(uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
+		byte_stride / 2,
+		_xor & 0xffff);
+	return TRUE;
+    case 32:
+	pixman_composite_src_n_8888_asm_neon (
+		width,
+		height,
+		(uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
+		byte_stride / 4,
+		_xor);
+	return TRUE;
+    default:
+	return FALSE;
     }
-    else
-    {
-        uint8x8_t    alpha_selector=vreinterpret_u8_u64(vcreate_u64(0x0707070703030303ULL));
+}
 
-        // Handle width<8
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            src = srcLine;
-            srcLine += srcStride;
-            w = width;
-
-            while (w>=2)
-            {
-                uint8x8_t sval,dval;
-
-                sval = vreinterpret_u8_u32(vld1_u32((void*)src));
-                dval = vreinterpret_u8_u32(vld1_u32((void*)dst));
-
-                /* sval * const alpha_mul */
-                sval = neon2mul(sval,mask_alpha);
+static pixman_bool_t
+pixman_blt_neon (uint32_t *src_bits,
+                 uint32_t *dst_bits,
+                 int       src_stride,
+                 int       dst_stride,
+                 int       src_bpp,
+                 int       dst_bpp,
+                 int       src_x,
+                 int       src_y,
+                 int       dst_x,
+                 int       dst_y,
+                 int       width,
+                 int       height)
+{
+    if (src_bpp != dst_bpp)
+	return FALSE;
 
-                /* dval * 255-(src alpha) */
-                dval = neon2mul(dval,vtbl1_u8(vmvn_u8(sval), alpha_selector));
-
-                vst1_u8((void*)dst,vqadd_u8(sval,dval));
-
-                src+=2;
-                dst+=2;
-                w-=2;
-            }
-
-            if (w)
-            {
-                uint8x8_t sval,dval;
-
-                sval = vreinterpret_u8_u32(vld1_dup_u32((void*)src));
-                dval = vreinterpret_u8_u32(vld1_dup_u32((void*)dst));
-
-                /* sval * const alpha_mul */
-                sval = neon2mul(sval,mask_alpha);
-
-                /* dval * 255-(src alpha) */
-                dval = neon2mul(dval,vtbl1_u8(vmvn_u8(sval), alpha_selector));
-
-                vst1_lane_u32((void*)dst,vreinterpret_u32_u8(vqadd_u8(sval,dval)),0);
-            }
-        }
+    switch (src_bpp)
+    {
+    case 16:
+	pixman_composite_src_0565_0565_asm_neon (
+		width, height,
+		(uint16_t *)(((char *) dst_bits) +
+		dst_y * dst_stride * 4 + dst_x * 2), dst_stride * 2,
+		(uint16_t *)(((char *) src_bits) +
+		src_y * src_stride * 4 + src_x * 2), src_stride * 2);
+	return TRUE;
+    case 32:
+	pixman_composite_src_8888_8888_asm_neon (
+		width, height,
+		(uint32_t *)(((char *) dst_bits) +
+		dst_y * dst_stride * 4 + dst_x * 4), dst_stride,
+		(uint32_t *)(((char *) src_bits) +
+		src_y * src_stride * 4 + src_x * 4), src_stride);
+	return TRUE;
+    default:
+	return FALSE;
     }
 }
 
-
-
-void
-fbCompositeSolidMask_nx8x0565neon (pixman_op_t op,
-                               pixman_image_t * pSrc,
-                               pixman_image_t * pMask,
-                               pixman_image_t * pDst,
-                               int16_t      xSrc,
-                               int16_t      ySrc,
-                               int16_t      xMask,
-                               int16_t      yMask,
-                               int16_t      xDst,
-                               int16_t      yDst,
-                               uint16_t     width,
-                               uint16_t     height)
+static const pixman_fast_path_t arm_neon_fast_path_array[] =
 {
-    uint32_t     src, srca;
-    uint16_t    *dstLine, *dst;
-    uint8_t     *maskLine, *mask;
-    int          dstStride, maskStride;
-    uint32_t     w;
-    uint8x8_t    sval2;
-    uint8x8x4_t  sval8;
-
-    fbComposeGetSolid(pSrc, src, pDst->bits.format);
-
-    srca = src >> 24;
-    if (src == 0)
-        return;
-
-    sval2=vreinterpret_u8_u32(vdup_n_u32(src));
-    sval8.val[0]=vdup_lane_u8(sval2,0);
-    sval8.val[1]=vdup_lane_u8(sval2,1);
-    sval8.val[2]=vdup_lane_u8(sval2,2);
-    sval8.val[3]=vdup_lane_u8(sval2,3);
-
-    fbComposeGetStart (pDst, xDst, yDst, uint16_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method, modified to avoid rewritten dest being reused
-        while (height--)
-        {
-            uint16_t *keep_dst;
-
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8_t alpha;
-            uint16x8_t dval, temp; 
-            uint8x8x4_t sval8temp;
-
-            alpha = vld1_u8((void*)mask);
-            dval = vld1q_u16((void*)dst);
-            keep_dst = dst;
-
-            sval8temp = neon8mul(sval8,alpha);
-            temp = pack0565(neon8qadd(sval8temp,neon8mul(unpack0565(dval),vmvn_u8(sval8temp.val[3]))));
-
-            mask += (w & 7);
-            dst += (w & 7);
-            w -= (w & 7);
-
-            while (w)
-            {
-                dval = vld1q_u16((void*)dst);
-	        alpha = vld1_u8((void*)mask);
-
-                vst1q_u16((void*)keep_dst,temp);
-                keep_dst = dst;
-
-                sval8temp = neon8mul(sval8,alpha);
-                temp = pack0565(neon8qadd(sval8temp,neon8mul(unpack0565(dval),vmvn_u8(sval8temp.val[3]))));
-
-                mask+=8;
-                dst+=8;
-                w-=8;
-            }
-            vst1q_u16((void*)keep_dst,temp);
-#else
-        asm volatile (
-                        "vdup.32      d0, %[src]\n\t"
-                        "vdup.8       d1, d0[1]\n\t"
-                        "vdup.8       d2, d0[2]\n\t"
-                        "vdup.8       d3, d0[3]\n\t"
-                        "vdup.8       d0, d0[0]\n\t"
-
-                        "vld1.8       {q12}, [%[dst]]\n\t"
-                        "vld1.8       {d31}, [%[mask]]\n\t"
-                        "mov  %[keep_dst], %[dst]\n\t"
-
-                        "and  ip, %[w], #7\n\t"
-                        "add  %[mask], %[mask], ip\n\t"
-                        "add  %[dst], %[dst], ip, LSL#1\n\t"
-                        "subs  %[w], %[w], ip\n\t"
-                        "b  9f\n\t"
-// LOOP
-                        "2:\n\t"
-
-                        "vld1.16      {q12}, [%[dst]]!\n\t"
-                        "vld1.8       {d31}, [%[mask]]!\n\t"
-                        "vst1.16      {q10}, [%[keep_dst]]\n\t"
-                        "sub  %[keep_dst], %[dst], #8*2\n\t"
-                        "subs  %[w], %[w], #8\n\t"
-                        "9:\n\t"
-// expand 0565 q12 to 8888 {d4-d7}
-                        "vmovn.u16    d4, q12\t\n"
-                        "vshr.u16     q11, q12, #5\t\n"
-                        "vshr.u16     q10, q12, #6+5\t\n"
-                        "vmovn.u16    d5, q11\t\n"
-                        "vmovn.u16    d6, q10\t\n"
-                        "vshl.u8      d4, d4, #3\t\n"
-                        "vshl.u8      d5, d5, #2\t\n"
-                        "vshl.u8      d6, d6, #3\t\n"
-                        "vsri.u8      d4, d4, #5\t\n"
-                        "vsri.u8      d5, d5, #6\t\n"
-                        "vsri.u8      d6, d6, #5\t\n"
-
-                        "vmull.u8     q10, d31, d0\n\t"
-                        "vmull.u8     q11, d31, d1\n\t"
-                        "vmull.u8     q12, d31, d2\n\t"
-                        "vmull.u8     q13, d31, d3\n\t"
-                        "vrshr.u16    q8, q10, #8\n\t"
-                        "vrshr.u16    q9, q11, #8\n\t"
-                        "vraddhn.u16  d20, q10, q8\n\t"
-                        "vraddhn.u16  d21, q11, q9\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d23, q13, q9\n\t"
-                        "vraddhn.u16  d22, q12, q8\n\t"
-
-// duplicate in 4/2/1 & 8pix vsns
-                        "vmvn.8       d30, d23\n\t"
-                        "vmull.u8     q14, d30, d6\n\t"
-                        "vmull.u8     q13, d30, d5\n\t"
-                        "vmull.u8     q12, d30, d4\n\t"
-                        "vrshr.u16    q8, q14, #8\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vraddhn.u16  d6, q14, q8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d5, q13, q9\n\t"
-                        "vqadd.u8     d6, d6, d22\n\t"  // moved up
-                        "vraddhn.u16  d4, q12, q8\n\t"
-// intentionally don't calculate alpha
-// result in d4-d6
-
-//                      "vqadd.u8     d6, d6, d22\n\t"  ** moved up
-                        "vqadd.u8     d5, d5, d21\n\t"
-                        "vqadd.u8     d4, d4, d20\n\t"
-
-// pack 8888 {d20-d23} to 0565 q10
-                        "vshll.u8     q10, d6, #8\n\t"
-                        "vshll.u8     q3, d5, #8\n\t"
-                        "vshll.u8     q2, d4, #8\n\t"
-                        "vsri.u16     q10, q3, #5\t\n"
-                        "vsri.u16     q10, q2, #11\t\n"
-
-                        "bne 2b\n\t"
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_0565_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_0565_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_8888_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_src_8888_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_src_0565_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_r5g6b5,   PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_0565_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_src_0565_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_b5g6r5,   PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_0565_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_8888_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_x8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_src_8888_8888    },
+    { PIXMAN_OP_SRC,  PIXMAN_r8g8b8,   PIXMAN_null,     PIXMAN_r8g8b8,   neon_composite_src_0888_0888    },
+    { PIXMAN_OP_SRC,  PIXMAN_b8g8r8,   PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_src_0888_8888_rev },
+    { PIXMAN_OP_SRC,  PIXMAN_b8g8r8,   PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_src_0888_0565_rev },
+    { PIXMAN_OP_SRC,  PIXMAN_pixbuf,   PIXMAN_pixbuf,   PIXMAN_a8r8g8b8, neon_composite_src_pixbuf_8888  },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_r5g6b5,   neon_composite_over_n_8_0565    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_b5g6r5,   neon_composite_over_n_8_0565    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_n_8_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_n_8_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_n_8_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_n_8_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_n_0565      },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_n_8888      },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_n_8888      },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_a8r8g8b8, neon_composite_over_8888_n_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_x8r8g8b8, neon_composite_over_8888_n_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_a8r8g8b8, neon_composite_over_8888_8_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8,       PIXMAN_x8r8g8b8, neon_composite_over_8888_8_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_a8b8g8r8, neon_composite_over_8888_8_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_a8,       PIXMAN_x8b8g8r8, neon_composite_over_8888_8_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, neon_composite_over_8888_8888_8888 },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   neon_composite_over_8888_0565   },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_b5g6r5,   neon_composite_over_8888_0565   },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_over_8888_8888   },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, neon_composite_over_8888_8888   },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_over_8888_8888   },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, neon_composite_over_8888_8888   },
+    { PIXMAN_OP_ADD,  PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_n_8_8        },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_a8,       PIXMAN_a8,       neon_composite_add_8_8_8        },
+    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, PIXMAN_a8r8g8b8, neon_composite_add_8888_8888_8888 },
+    { PIXMAN_OP_ADD,  PIXMAN_a8,       PIXMAN_null,     PIXMAN_a8,       neon_composite_add_8000_8000    },
+    { PIXMAN_OP_ADD,  PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, neon_composite_add_8888_8888    },
+    { PIXMAN_OP_ADD,  PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, neon_composite_add_8888_8888    },
+    { PIXMAN_OP_NONE },
+};
 
-                        "1:\n\t"
-                        "vst1.16      {q10}, [%[keep_dst]]\n\t"
-
-                        : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "+r" (keep_dst)
-                        : [src] "r" (src)
-                        : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-                          "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
-                          "d30","d31"
-                        );
-#endif
-        }
-    }
-    else
-    {
-        while (height--)
-        {
-            void *dst4, *dst2;
-
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8_t alpha;
-            uint16x8_t dval, temp;
-            uint8x8x4_t sval8temp;
-
-            if (w&4)
-            {
-                alpha = vreinterpret_u8_u32(vld1_lane_u32((void*)mask,vreinterpret_u32_u8(alpha),1));
-                dval = vreinterpretq_u16_u64(vld1q_lane_u64((void*)dst,vreinterpretq_u64_u16(dval),1));
-                dst4=dst;
-                mask+=4;
-                dst+=4;
-            }
-            if (w&2)
-            {
-                alpha = vreinterpret_u8_u16(vld1_lane_u16((void*)mask,vreinterpret_u16_u8(alpha),1));
-                dval = vreinterpretq_u16_u32(vld1q_lane_u32((void*)dst,vreinterpretq_u32_u16(dval),1));
-                dst2=dst;
-                mask+=2;
-                dst+=2;
-            }
-            if (w&1)
-            {
-                alpha = vld1_lane_u8((void*)mask,alpha,1);
-                dval = vld1q_lane_u16((void*)dst,dval,1);
-            }
-
-            sval8temp = neon8mul(sval8,alpha);
-            temp = pack0565(neon8qadd(sval8temp,neon8mul(unpack0565(dval),vmvn_u8(sval8temp.val[3]))));
-
-            if (w&1)
-                vst1q_lane_u16((void*)dst,temp,1);
-            if (w&2)
-                vst1q_lane_u32((void*)dst2,vreinterpretq_u32_u16(temp),1);
-            if (w&4)
-                vst1q_lane_u64((void*)dst4,vreinterpretq_u64_u16(temp),1);
-#else
-            asm volatile (
-                        "vdup.32      d0, %[src]\n\t"
-                        "vdup.8       d1, d0[1]\n\t"
-                        "vdup.8       d2, d0[2]\n\t"
-                        "vdup.8       d3, d0[3]\n\t"
-                        "vdup.8       d0, d0[0]\n\t"
-
-                        "tst  %[w], #4\t\n"
-                        "beq  skip_load4\t\n"
-
-                        "vld1.64      {d25}, [%[dst]]\n\t"
-                        "vld1.32      {d31[1]}, [%[mask]]\n\t"
-                        "mov  %[dst4], %[dst]\t\n"
-                        "add  %[mask], %[mask], #4\t\n"
-                        "add  %[dst], %[dst], #4*2\t\n"
+const pixman_fast_path_t *const arm_neon_fast_paths = arm_neon_fast_path_array;
 
-                        "skip_load4:\t\n"
-                        "tst  %[w], #2\t\n"
-                        "beq  skip_load2\t\n"
-                        "vld1.32      {d24[1]}, [%[dst]]\n\t"
-                        "vld1.16      {d31[1]}, [%[mask]]\n\t"
-                        "mov  %[dst2], %[dst]\t\n"
-                        "add  %[mask], %[mask], #2\t\n"
-                        "add  %[dst], %[dst], #2*2\t\n"
-
-                        "skip_load2:\t\n"
-                        "tst  %[w], #1\t\n"
-                        "beq  skip_load1\t\n"
-                        "vld1.16      {d24[1]}, [%[dst]]\n\t"
-                        "vld1.8       {d31[1]}, [%[mask]]\n\t"
-
-                        "skip_load1:\t\n"
-// expand 0565 q12 to 8888 {d4-d7}
-                        "vmovn.u16    d4, q12\t\n"
-                        "vshr.u16     q11, q12, #5\t\n"
-                        "vshr.u16     q10, q12, #6+5\t\n"
-                        "vmovn.u16    d5, q11\t\n"
-                        "vmovn.u16    d6, q10\t\n"
-                        "vshl.u8      d4, d4, #3\t\n"
-                        "vshl.u8      d5, d5, #2\t\n"
-                        "vshl.u8      d6, d6, #3\t\n"
-                        "vsri.u8      d4, d4, #5\t\n"
-                        "vsri.u8      d5, d5, #6\t\n"
-                        "vsri.u8      d6, d6, #5\t\n"
-
-                        "vmull.u8     q10, d31, d0\n\t"
-                        "vmull.u8     q11, d31, d1\n\t"
-                        "vmull.u8     q12, d31, d2\n\t"
-                        "vmull.u8     q13, d31, d3\n\t"
-                        "vrshr.u16    q8, q10, #8\n\t"
-                        "vrshr.u16    q9, q11, #8\n\t"
-                        "vraddhn.u16  d20, q10, q8\n\t"
-                        "vraddhn.u16  d21, q11, q9\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d23, q13, q9\n\t"
-                        "vraddhn.u16  d22, q12, q8\n\t"
+static void
+arm_neon_composite (pixman_implementation_t *imp,
+                    pixman_op_t              op,
+                    pixman_image_t *         src,
+                    pixman_image_t *         mask,
+                    pixman_image_t *         dest,
+                    int32_t                  src_x,
+                    int32_t                  src_y,
+                    int32_t                  mask_x,
+                    int32_t                  mask_y,
+                    int32_t                  dest_x,
+                    int32_t                  dest_y,
+                    int32_t                  width,
+                    int32_t                  height)
+{
+    if (_pixman_run_fast_path (arm_neon_fast_paths, imp,
+                               op, src, mask, dest,
+                               src_x, src_y,
+                               mask_x, mask_y,
+                               dest_x, dest_y,
+                               width, height))
+    {
+	return;
+    }
 
-// duplicate in 4/2/1 & 8pix vsns
-                        "vmvn.8       d30, d23\n\t"
-                        "vmull.u8     q14, d30, d6\n\t"
-                        "vmull.u8     q13, d30, d5\n\t"
-                        "vmull.u8     q12, d30, d4\n\t"
-                        "vrshr.u16    q8, q14, #8\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vraddhn.u16  d6, q14, q8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d5, q13, q9\n\t"
-                        "vqadd.u8     d6, d6, d22\n\t"  // moved up
-                        "vraddhn.u16  d4, q12, q8\n\t"
-// intentionally don't calculate alpha
-// result in d4-d6
-
-//                      "vqadd.u8     d6, d6, d22\n\t"  ** moved up
-                        "vqadd.u8     d5, d5, d21\n\t"
-                        "vqadd.u8     d4, d4, d20\n\t"
-
-// pack 8888 {d20-d23} to 0565 q10
-                        "vshll.u8     q10, d6, #8\n\t"
-                        "vshll.u8     q3, d5, #8\n\t"
-                        "vshll.u8     q2, d4, #8\n\t"
-                        "vsri.u16     q10, q3, #5\t\n"
-                        "vsri.u16     q10, q2, #11\t\n"
-
-                        "tst  %[w], #1\n\t"
-                        "beq skip_store1\t\n"
-                        "vst1.16      {d20[1]}, [%[dst]]\t\n"
-                        "skip_store1:\t\n"
-                        "tst  %[w], #2\n\t"
-                        "beq  skip_store2\t\n"
-                        "vst1.32      {d20[1]}, [%[dst2]]\t\n"
-                        "skip_store2:\t\n"
-                        "tst  %[w], #4\n\t"
-                        "beq skip_store4\t\n"
-                        "vst1.16      {d21}, [%[dst4]]\t\n"
-                        "skip_store4:\t\n"
-
-                        : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [dst4] "+r" (dst4), [dst2] "+r" (dst2)
-                        : [src] "r" (src)
-                        : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-                          "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
-                          "d30","d31"
-                        );
-#endif
-        }
-    }
+    _pixman_implementation_composite (imp->delegate, op,
+                                      src, mask, dest,
+                                      src_x, src_y,
+                                      mask_x, mask_y,
+                                      dest_x, dest_y,
+                                      width, height);
 }
 
-
-void
-fbCompositeSolidMask_nx8x8888neon (pixman_op_t      op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
-			       int16_t      xSrc,
-			       int16_t      ySrc,
-			       int16_t      xMask,
-			       int16_t      yMask,
-			       int16_t      xDst,
-			       int16_t      yDst,
-			       uint16_t     width,
-			       uint16_t     height)
+static pixman_bool_t
+arm_neon_blt (pixman_implementation_t *imp,
+              uint32_t *               src_bits,
+              uint32_t *               dst_bits,
+              int                      src_stride,
+              int                      dst_stride,
+              int                      src_bpp,
+              int                      dst_bpp,
+              int                      src_x,
+              int                      src_y,
+              int                      dst_x,
+              int                      dst_y,
+              int                      width,
+              int                      height)
 {
-    uint32_t	 src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int		 dstStride, maskStride;
-    uint32_t	 w;
-    uint8x8_t    sval2;
-    uint8x8x4_t  sval8;
-    uint8x8_t    mask_selector=vreinterpret_u8_u64(vcreate_u64(0x0101010100000000ULL));
-    uint8x8_t    alpha_selector=vreinterpret_u8_u64(vcreate_u64(0x0707070703030303ULL));
-
-    fbComposeGetSolid(pSrc, src, pDst->bits.format);
-
-    srca = src >> 24;
-    if (src == 0)
-	return;
-
-    sval2=vreinterpret_u8_u32(vdup_n_u32(src));
-    sval8.val[0]=vdup_lane_u8(sval2,0);
-    sval8.val[1]=vdup_lane_u8(sval2,1);
-    sval8.val[2]=vdup_lane_u8(sval2,2);
-    sval8.val[3]=vdup_lane_u8(sval2,3);
-
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method, modified to avoid rewritten dest being reused
-        while (height--)
-        {
-            uint32_t *keep_dst;
-
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-#ifndef USE_NEON_INLINE_ASM
-            uint8x8_t alpha;
-            uint8x8x4_t dval, temp;
-
-            alpha = vld1_u8((void*)mask);
-            dval = vld4_u8((void*)dst);
-            keep_dst = dst;
-
-            temp = neon8mul(sval8,alpha);
-            dval = neon8mul(dval,vmvn_u8(temp.val[3]));
-            temp = neon8qadd(temp,dval);
-
-            mask += (w & 7);
-            dst += (w & 7);
-            w -= (w & 7);
-
-            while (w)
-            {
-                alpha = vld1_u8((void*)mask);
-                dval = vld4_u8((void*)dst);
-
-                vst4_u8((void*)keep_dst,temp);
-                keep_dst = dst;
-
-                temp = neon8mul(sval8,alpha);
-                dval = neon8mul(dval,vmvn_u8(temp.val[3]));
-                temp = neon8qadd(temp,dval);
-
-                mask+=8;
-                dst+=8;
-                w-=8;
-            }
-            vst4_u8((void*)keep_dst,temp);
-#else
-        asm volatile (
-                        "vdup.32      d0, %[src]\n\t"
-                        "vdup.8       d1, d0[1]\n\t"
-                        "vdup.8       d2, d0[2]\n\t"
-                        "vdup.8       d3, d0[3]\n\t"
-                        "vdup.8       d0, d0[0]\n\t"
-
-                        "vld4.8       {d4-d7}, [%[dst]]\n\t"
-                        "vld1.8       {d31}, [%[mask]]\n\t"
-                        "mov  %[keep_dst], %[dst]\n\t"
+    if (!pixman_blt_neon (
+            src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+            src_x, src_y, dst_x, dst_y, width, height))
 
-                        "and  ip, %[w], #7\n\t"
-                        "add  %[mask], %[mask], ip\n\t"
-                        "add  %[dst], %[dst], ip, LSL#2\n\t"
-                        "subs  %[w], %[w], ip\n\t"
-                        "b 9f\n\t"
-// LOOP
-                        "2:\n\t" 
-                        "vld4.8       {d4-d7}, [%[dst]]!\n\t"
-                        "vld1.8       {d31}, [%[mask]]!\n\t"
-                        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-                        "sub  %[keep_dst], %[dst], #8*4\n\t"
-                        "subs  %[w], %[w], #8\n\t"
-                        "9:\n\t"
-
-                        "vmull.u8     q10, d31, d0\n\t"
-                        "vmull.u8     q11, d31, d1\n\t"
-                        "vmull.u8     q12, d31, d2\n\t"
-                        "vmull.u8     q13, d31, d3\n\t"
-                        "vrshr.u16    q8, q10, #8\n\t"
-                        "vrshr.u16    q9, q11, #8\n\t"
-                        "vraddhn.u16  d20, q10, q8\n\t"
-                        "vraddhn.u16  d21, q11, q9\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vraddhn.u16  d23, q13, q9\n\t"
-                        "vraddhn.u16  d22, q12, q8\n\t"
-
-                        "vmvn.8       d30, d23\n\t"
-                        "vmull.u8     q12, d30, d4\n\t"
-                        "vmull.u8     q13, d30, d5\n\t"
-                        "vmull.u8     q14, d30, d6\n\t"
-                        "vmull.u8     q15, d30, d7\n\t"
-
-                        "vrshr.u16    q8, q12, #8\n\t"
-                        "vrshr.u16    q9, q13, #8\n\t"
-                        "vraddhn.u16  d4, q12, q8\n\t"
-                        "vrshr.u16    q8, q14, #8\n\t"
-                        "vraddhn.u16  d5, q13, q9\n\t"
-                        "vrshr.u16    q9, q15, #8\n\t"
-                        "vraddhn.u16  d6, q14, q8\n\t"
-                        "vraddhn.u16  d7, q15, q9\n\t"
-// result in d4-d7
-
-                        "vqadd.u8     d20, d4, d20\n\t"
-                        "vqadd.u8     d21, d5, d21\n\t"
-                        "vqadd.u8     d22, d6, d22\n\t"
-                        "vqadd.u8     d23, d7, d23\n\t"
-
-                        "bne 2b\n\t"
+    {
+	return _pixman_implementation_blt (
+	    imp->delegate,
+	    src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
+	    src_x, src_y, dst_x, dst_y, width, height);
+    }
 
-                        "1:\n\t"
-                        "vst4.8       {d20-d23}, [%[keep_dst]]\n\t"
-
-                        : [w] "+r" (w), [dst] "+r" (dst), [mask] "+r" (mask), [keep_dst] "+r" (keep_dst)
-                        : [src] "r" (src) 
-                        : "ip", "cc", "memory", "d0","d1","d2","d3","d4","d5","d6","d7",
-                          "d16","d17","d18","d19","d20","d21","d22","d23","d24","d25","d26","d27","d28","d29",
-                          "d30","d31"
-                        );
-#endif
-        }
-    }
-    else
-    {
-        while (height--)
-        {
-            uint8x8_t alpha;
-
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-            while (w>=2)
-            {
-                uint8x8_t dval, temp, res;
-
-                alpha = vtbl1_u8(vreinterpret_u8_u16(vld1_dup_u16((void*)mask)), mask_selector);
-                dval = vld1_u8((void*)dst);
-
-                temp = neon2mul(sval2,alpha);
-                res = vqadd_u8(temp,neon2mul(dval,vtbl1_u8(vmvn_u8(temp), alpha_selector)));
-
-                vst1_u8((void*)dst,res);
-
-                mask+=2;
-                dst+=2;
-                w-=2;
-            }
-            if (w)
-            {
-                uint8x8_t dval, temp, res;
-
-                alpha = vtbl1_u8(vld1_dup_u8((void*)mask), mask_selector);
-                dval = vreinterpret_u8_u32(vld1_dup_u32((void*)dst));
-
-                temp = neon2mul(sval2,alpha);
-                res = vqadd_u8(temp,neon2mul(dval,vtbl1_u8(vmvn_u8(temp), alpha_selector)));
-
-                vst1_lane_u32((void*)dst,vreinterpret_u32_u8(res),0);
-            }
-        }
-    }
+    return TRUE;
 }
 
-
-void
-fbCompositeSrcAdd_8888x8x8neon (pixman_op_t op,
-                            pixman_image_t * pSrc,
-                            pixman_image_t * pMask,
-                            pixman_image_t * pDst,
-                            int16_t      xSrc,
-                            int16_t      ySrc,
-                            int16_t      xMask,
-                            int16_t      yMask,
-                            int16_t      xDst,
-                            int16_t      yDst,
-                            uint16_t     width,
-                            uint16_t     height)
+static pixman_bool_t
+arm_neon_fill (pixman_implementation_t *imp,
+               uint32_t *               bits,
+               int                      stride,
+               int                      bpp,
+               int                      x,
+               int                      y,
+               int                      width,
+               int                      height,
+               uint32_t xor)
 {
-    uint8_t     *dstLine, *dst;
-    uint8_t     *maskLine, *mask;
-    int dstStride, maskStride;
-    uint32_t    w;
-    uint32_t    src;
-    uint8x8_t   sa;
-
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-    fbComposeGetSolid (pSrc, src, pDst->bits.format);
-    sa = vdup_n_u8((src) >> 24);
-
-    if (width>=8)
-    {
-        // Use overlapping 8-pixel method, modified to avoid rewritten dest being reused
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-            uint8x8_t mval, dval, res;
-            uint8_t     *keep_dst;
-
-            mval = vld1_u8((void *)mask);
-            dval = vld1_u8((void *)dst);
-            keep_dst = dst;
-
-            res = vqadd_u8(neon2mul(mval,sa),dval);
-
-            mask += (w & 7);
-            dst += (w & 7);
-            w -= w & 7;
+    if (pixman_fill_neon (bits, stride, bpp, x, y, width, height, xor))
+	return TRUE;
 
-            while (w)
-            {
-                mval = vld1_u8((void *)mask);
-                dval = vld1_u8((void *)dst);
-                vst1_u8((void *)keep_dst, res);
-                keep_dst = dst;
-
-                res = vqadd_u8(neon2mul(mval,sa),dval);
-
-                mask += 8;
-                dst += 8;
-                w -= 8;
-            }
-            vst1_u8((void *)keep_dst, res);
-        }
-    }
-    else
-    {
-        // Use 4/2/1 load/store method to handle 1-7 pixels
-        while (height--)
-        {
-            dst = dstLine;
-            dstLine += dstStride;
-            mask = maskLine;
-            maskLine += maskStride;
-            w = width;
-
-            uint8x8_t mval, dval, res;
-            uint8_t *dst4, *dst2;
-
-            if (w&4)
-            {
-                mval = vreinterpret_u8_u32(vld1_lane_u32((void *)mask, vreinterpret_u32_u8(mval), 1));
-                dval = vreinterpret_u8_u32(vld1_lane_u32((void *)dst, vreinterpret_u32_u8(dval), 1));
-
-                dst4 = dst;
-                mask += 4;
-                dst += 4;
-            }
-            if (w&2)
-            {
-                mval = vreinterpret_u8_u16(vld1_lane_u16((void *)mask, vreinterpret_u16_u8(mval), 1));
-                dval = vreinterpret_u8_u16(vld1_lane_u16((void *)dst, vreinterpret_u16_u8(dval), 1));
-                dst2 = dst;
-                mask += 2;
-                dst += 2;
-            }
-            if (w&1)
-            {
-                mval = vld1_lane_u8((void *)mask, mval, 1);
-                dval = vld1_lane_u8((void *)dst, dval, 1);
-            }
-
-            res = vqadd_u8(neon2mul(mval,sa),dval);
-
-            if (w&1)
-                vst1_lane_u8((void *)dst, res, 1);
-            if (w&2)
-                vst1_lane_u16((void *)dst2, vreinterpret_u16_u8(res), 1);
-            if (w&4)
-                vst1_lane_u32((void *)dst4, vreinterpret_u32_u8(res), 1);
-        }
-    }
+    return _pixman_implementation_fill (
+	imp->delegate, bits, stride, bpp, x, y, width, height, xor);
 }
 
+#define BIND_COMBINE_U(name)                                             \
+void                                                                     \
+pixman_composite_scanline_##name##_mask_asm_neon (int32_t         w,     \
+                                                  const uint32_t *dst,   \
+                                                  const uint32_t *src,   \
+                                                  const uint32_t *mask); \
+                                                                         \
+void                                                                     \
+pixman_composite_scanline_##name##_asm_neon (int32_t         w,          \
+                                             const uint32_t *dst,        \
+                                             const uint32_t *src);       \
+                                                                         \
+static void                                                              \
+neon_combine_##name##_u (pixman_implementation_t *imp,                   \
+                         pixman_op_t              op,                    \
+                         uint32_t *               dest,                  \
+                         const uint32_t *         src,                   \
+                         const uint32_t *         mask,                  \
+                         int                      width)                 \
+{                                                                        \
+    if (mask)                                                            \
+	pixman_composite_scanline_##name##_mask_asm_neon (width, dest,   \
+	                                                  src, mask);    \
+    else                                                                 \
+	pixman_composite_scanline_##name##_asm_neon (width, dest, src);  \
+}
+
+BIND_COMBINE_U (over)
+BIND_COMBINE_U (add)
+
+pixman_implementation_t *
+_pixman_implementation_create_arm_neon (void)
+{
+    pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
+    pixman_implementation_t *imp = _pixman_implementation_create (general);
+
+    imp->combine_32[PIXMAN_OP_OVER] = neon_combine_over_u;
+    imp->combine_32[PIXMAN_OP_ADD] = neon_combine_add_u;
+
+    imp->composite = arm_neon_composite;
+    imp->blt = arm_neon_blt;
+    imp->fill = arm_neon_fill;
+
+    return imp;
+}
new file mode 100644
--- /dev/null
+++ b/gfx/cairo/libpixman/src/pixman-arm-simd-asm.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright © 2008 Mozilla Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Mozilla Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Mozilla Corporation makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author:  Jeff Muizelaar (jeff@infidigm.net)
+ *
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "pixman-private.h"
+
+static void
+arm_composite_add_8000_8000 (pixman_implementation_t * impl,
+			     pixman_op_t               op,
+			     pixman_image_t *          src_image,
+			     pixman_image_t *          mask_image,
+			     pixman_image_t *          dst_image,
+			     int32_t                   src_x,
+			     int32_t                   src_y,
+			     int32_t                   mask_x,
+			     int32_t                   mask_y,
+			     int32_t                   dest_x,
+			     int32_t                   dest_y,
+			     int32_t                   width,
+			     int32_t                   height)
+{
+    uint8_t     *dst_line, *dst;
+    uint8_t     *src_line, *src;
+    int dst_stride, src_stride;
+    uint16_t w;
+    uint8_t s, d;
+
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1);
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1);
+
+    while (height--)
+    {
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
+	w = width;
+
+	/* ensure both src and dst are properly aligned before doing 32 bit reads
+	 * we'll stay in this loop if src and dst have differing alignments
+	 */
+	while (w && (((unsigned long)dst & 3) || ((unsigned long)src & 3)))
+	{
+	    s = *src;
+	    d = *dst;
+	    asm ("uqadd8 %0, %1, %2" : "+r" (d) : "r" (s));
+	    *dst = d;
+
+	    dst++;
+	    src++;
+	    w--;
+	}
+
+	while (w >= 4)
+	{
+	    asm ("uqadd8 %0, %1, %2"
+		 : "=r" (*(uint32_t*)dst)
+		 : "r" (*(uint32_t*)src), "r" (*(uint32_t*)dst));
+	    dst += 4;
+	    src += 4;
+	    w -= 4;
+	}
+
+	while (w)
+	{
+	    s = *src;
+	    d = *dst;
+	    asm ("uqadd8 %0, %1, %2" : "+r" (d) : "r" (s));
+	    *dst = d;
+
+	    dst++;
+	    src++;
+	    w--;
+	}
+    }
+
+}
+
+static void
+arm_composite_over_8888_8888 (pixman_implementation_t * impl,
+			      pixman_op_t               op,
+			      pixman_image_t *          src_image,
+			      pixman_image_t *          mask_image,
+			      pixman_image_t *          dst_image,
+			      int32_t                   src_x,
+			      int32_t                   src_y,
+			      int32_t                   mask_x,
+			      int32_t                   mask_y,
+			      int32_t                   dest_x,
+			      int32_t                   dest_y,
+			      int32_t                   width,
+			      int32_t                   height)
+{
+    uint32_t    *dst_line, *dst;
+    uint32_t    *src_line, *src;
+    int dst_stride, src_stride;
+    uint16_t w;
+    uint32_t component_half = 0x800080;
+    uint32_t upper_component_mask = 0xff00ff00;
+    uint32_t alpha_mask = 0xff;
+
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+    while (height--)
+    {
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
+	w = width;
+
+/* #define inner_branch */
+	asm volatile (
+	    "cmp %[w], #0\n\t"
+	    "beq 2f\n\t"
+	    "1:\n\t"
+	    /* load src */
+	    "ldr r5, [%[src]], #4\n\t"
+#ifdef inner_branch
+	    /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
+	     * The 0x0 case also allows us to avoid doing an unecessary data
+	     * write which is more valuable so we only check for that
+	     */
+	    "cmp r5, #0\n\t"
+	    "beq 3f\n\t"
+
+	    /* = 255 - alpha */
+	    "sub r8, %[alpha_mask], r5, lsr #24\n\t"
+
+	    "ldr r4, [%[dest]] \n\t"
+
+#else
+	    "ldr r4, [%[dest]] \n\t"
+
+	    /* = 255 - alpha */
+	    "sub r8, %[alpha_mask], r5, lsr #24\n\t"
+#endif
+	    "uxtb16 r6, r4\n\t"
+	    "uxtb16 r7, r4, ror #8\n\t"
+
+	    /* multiply by 257 and divide by 65536 */
+	    "mla r6, r6, r8, %[component_half]\n\t"
+	    "mla r7, r7, r8, %[component_half]\n\t"
+
+	    "uxtab16 r6, r6, r6, ror #8\n\t"
+	    "uxtab16 r7, r7, r7, ror #8\n\t"
+
+	    /* recombine the 0xff00ff00 bytes of r6 and r7 */
+	    "and r7, r7, %[upper_component_mask]\n\t"
+	    "uxtab16 r6, r7, r6, ror #8\n\t"
+
+	    "uqadd8 r5, r6, r5\n\t"
+
+#ifdef inner_branch
+	    "3:\n\t"
+
+#endif
+	    "str r5, [%[dest]], #4\n\t"
+	    /* increment counter and jmp to top */
+	    "subs	%[w], %[w], #1\n\t"
+	    "bne	1b\n\t"
+	    "2:\n\t"
+	    : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
+	    : [component_half] "r" (component_half), [upper_component_mask] "r" (upper_component_mask),
+	      [alpha_mask] "r" (alpha_mask)
+	    : "r4", "r5", "r6", "r7", "r8", "cc", "memory"
+	    );
+    }
+}
+
+static void
+arm_composite_over_8888_n_8888 (pixman_implementation_t * impl,
+				pixman_op_t               op,
+				pixman_image_t *          src_image,
+				pixman_image_t *          mask_image,
+				pixman_image_t *          dst_image,
+				int32_t                   src_x,
+				int32_t                   src_y,
+				int32_t                   mask_x,
+				int32_t                   mask_y,
+				int32_t                   dest_x,
+				int32_t                   dest_y,
+				int32_t                   width,
+				int32_t                   height)
+{
+    uint32_t *dst_line, *dst;
+    uint32_t *src_line, *src;
+    uint32_t mask;
+    int dst_stride, src_stride;
+    uint16_t w;
+    uint32_t component_half = 0x800080;
+    uint32_t alpha_mask = 0xff;
+
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
+
+    mask = _pixman_image_get_solid (mask_image, PIXMAN_a8r8g8b8);
+    mask = (mask) >> 24;
+
+    while (height--)
+    {
+	dst = dst_line;
+	dst_line += dst_stride;
+	src = src_line;
+	src_line += src_stride;
+	w = width;
+
+/* #define inner_branch */
+	asm volatile (
+	    "cmp %[w], #0\n\t"
+	    "beq 2f\n\t"
+	    "1:\n\t"
+	    /* load src */
+	    "ldr r5, [%[src]], #4\n\t"
+#ifdef inner_branch
+	    /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
+	     * The 0x0 case also allows us to avoid doing an unecessary data
+	     * write which is more valuable so we only check for that
+	     */
+	    "cmp r5, #0\n\t"
+	    "beq 3f\n\t"
+
+#endif
+	    "ldr r4, [%[dest]] \n\t"
+
+	    "uxtb16 r6, r5\n\t"
+	    "uxtb16 r7, r5, ror #8\n\t"
+
+	    /* multiply by alpha (r8) then by 257 and divide by 65536 */
+	    "mla r6, r6, %[mask_alpha], %[component_half]\n\t"
+	    "mla r7, r7, %[mask_alpha], %[component_half]\n\t"
+
+	    "uxtab16 r6, r6, r6, ror #8\n\t"
+	    "uxtab16 r7, r7, r7, ror #8\n\t"
+
+	    "uxtb16 r6, r6, ror #8\n\t"
+	    "uxtb16 r7, r7, ror #8\n\t"
+
+	    /* recombine */
+	    "orr r5, r6, r7, lsl #8\n\t"
+
+	    "uxtb16 r6, r4\n\t"
+	    "uxtb16 r7, r4, ror #8\n\t"
+
+	    /* 255 - alpha */
+	    "sub r8, %[alpha_mask], r5, lsr #24\n\t"
+
+	    /* multiply by alpha (r8) then by 257 and divide by 65536 */
+	    "mla r6, r6, r8, %[component_half]\n\t"
+	    "mla r7, r7, r8, %[component_half]\n\t"
+
+	    "uxtab16 r6, r6, r6, ror #8\n\t"
+	    "uxtab16 r7, r7, r7, ror #8\n\t"
+
+	    "uxtb16 r6, r6, ror #8\n\t"
+	    "uxtb16 r7, r7, ror #8\n\t"
+
+	    /* recombine */
+	    "orr r6, r6, r7, lsl #8\n\t"
+
+	    "uqadd8 r5, r6, r5\n\t"
+
+#ifdef inner_branch
+	    "3:\n\t"
+
+#endif
+	    "str r5, [%[dest]], #4\n\t"
+	    /* increment counter and jmp to top */
+	    "subs	%[w], %[w], #1\n\t"
+	    "bne	1b\n\t"
+	    "2:\n\t"
+	    : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
+	    : [component_half] "r" (component_half), [mask_alpha] "r" (mask),
+	      [alpha_mask] "r" (alpha_mask)
+	    : "r4", "r5", "r6", "r7", "r8", "r9", "cc", "memory"
+	    );
+    }
+}
+
+static void
+arm_composite_over_n_8_8888 (pixman_implementation_t * impl,
+			     pixman_op_t               op,
+			     pixman_image_t *          src_image,
+			     pixman_image_t *          mask_image,
+			     pixman_image_t *          dst_image,
+			     int32_t                   src_x,
+			     int32_t                   src_y,
+			     int32_t                   mask_x,
+			     int32_t                   mask_y,
+			     int32_t                   dest_x,
+			     int32_t                   dest_y,
+			     int32_t                   width,
+			     int32_t                   height)
+{
+    uint32_t src, srca;
+    uint32_t *dst_line, *dst;
+    uint8_t  *mask_line, *mask;
+    int dst_stride, mask_stride;
+    uint16_t w;
+
+    src = _pixman_image_get_solid (src_image, dst_image->bits.format);
+
+    /* bail out if fully transparent */
+    srca = src >> 24;
+    if (src == 0)
+	return;
+
+    uint32_t component_mask = 0xff00ff;
+    uint32_t component_half = 0x800080;
+
+    uint32_t src_hi = (src >> 8) & component_mask;
+    uint32_t src_lo = src & component_mask;
+
+    PIXMAN_IMAGE_GET_LINE (dst_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1);
+    PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1);
+
+    while (height--)
+    {
+	dst = dst_line;
+	dst_line += dst_stride;
+	mask = mask_line;
+	mask_line += mask_stride;
+	w = width;
+
+/* #define inner_branch */
+	asm volatile (
+	    "cmp %[w], #0\n\t"
+	    "beq 2f\n\t"
+	    "1:\n\t"
+	    /* load mask */
+	    "ldrb r5, [%[mask]], #1\n\t"
+#ifdef inner_branch
+	    /* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
+	     * The 0x0 case also allows us to avoid doing an unecessary data
+	     * write which is more valuable so we only check for that
+	     */
+	    "cmp r5, #0\n\t"
+	    "beq 3f\n\t"
+
+#endif
+	    "ldr r4, [%[dest]] \n\t"
+
+	    /* multiply by alpha (r8) then by 257 and divide by 65536 */
+	    "mla r6, %[src_lo], r5, %[component_half]\n\t"
+	    "mla r7, %[src_hi], r5, %[component_half]\n\t"
+
+	    "uxtab16 r6, r6, r6, ror #8\n\t"
+	    "uxtab16 r7, r7, r7, ror #8\n\t"
+
+	    "uxtb16 r6, r6, ror #8\n\t"
+	    "uxtb16 r7, r7, ror #8\n\t"
+
+	    /* recombine */
+	    "orr r5, r6, r7, lsl #8\n\t"
+
+	    "uxtb16 r6, r4\n\t"
+	    "uxtb16 r7, r4, ror #8\n\t"
+
+	    /* we could simplify this to use 'sub' if we were
+	     * willing to give up a register for alpha_mask
+	     */
+	    "mvn r8, r5\n\t"
+	    "mov r8, r8, lsr #24\n\t"
+
+	    /* multiply by alpha (r8) then by 257 and divide by 65536 */
+	    "mla r6, r6, r8, %[component_half]\n\t"
+	    "mla r7, r7, r8, %[component_half]\n\t"
+
+	    "uxtab16 r6, r6, r6, ror #8\n\t"
+	    "uxtab16 r7, r7, r7, ror #8\n\t"
+
+	    "uxtb16 r6, r6, ror #8\n\t"
+	    "uxtb16 r7, r7, ror #8\n\t"
+
+	    /* recombine */
+	    "orr r6, r6, r7, lsl #8\n\t"
+
+	    "uqadd8 r5, r6, r5\n\t"
+
+#ifdef inner_branch
+	    "3:\n\t"
+
+#endif
+	    "str r5, [%[dest]], #4\n\t"
+	    /* increment counter and jmp to top */
+	    "subs	%[w], %[w], #1\n\t"
+	    "bne	1b\n\t"
+	    "2:\n\t"
+	    : [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src), [mask] "+r" (mask)
+	    : [component_half] "r" (component_half),
+	      [src_hi] "r" (src_hi), [src_lo] "r" (src_lo)
+	    : "r4", "r5", "r6", "r7", "r8", "cc", "memory");
+    }
+}
+
+static const pixman_fast_path_t arm_simd_fast_path_array[] =
+{
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888  },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888  },
+
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_composite_add_8000_8000     },
+
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888     },
+
+    { PIXMAN_OP_NONE },
+};
+
+const pixman_fast_path_t *const arm_simd_fast_paths = arm_simd_fast_path_array;
+
+static void
+arm_simd_composite (pixman_implementation_t *imp,
+                    pixman_op_t              op,
+                    pixman_image_t *         src,
+                    pixman_image_t *         mask,
+                    pixman_image_t *         dest,
+                    int32_t                  src_x,
+                    int32_t                  src_y,
+                    int32_t                  mask_x,
+                    int32_t                  mask_y,
+                    int32_t                  dest_x,
+                    int32_t                  dest_y,
+                    int32_t                  width,
+                    int32_t                  height)
+{
+    if (_pixman_run_fast_path (arm_simd_fast_paths, imp,
+                               op, src, mask, dest,
+                               src_x, src_y,
+                               mask_x, mask_y,
+                               dest_x, dest_y,
+                               width, height))
+    {
+	return;
+    }
+
+    _pixman_implementation_composite (imp->delegate, op,
+                                      src, mask, dest,
+                                      src_x, src_y,
+                                      mask_x, mask_y,
+                                      dest_x, dest_y,
+                                      width, height);
+}
+
+pixman_implementation_t *
+_pixman_implementation_create_arm_simd (void)
+{
+    pixman_implementation_t *general = _pixman_implementation_create_fast_path ();
+    pixman_implementation_t *imp = _pixman_implementation_create (general);
+
+    imp->composite = arm_simd_composite;
+
+    return imp;
+}
new file mode 100644
--- /dev/null
+++ b/gfx/cairo/libpixman/src/pixman-arm-simd-asm.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright © 2008 Mozilla Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Mozilla Corporation not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Mozilla Corporation makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
+ * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ *
+ * Author:  Jeff Muizelaar (jeff@infidigm.net)
+ *
+ */
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include "pixman-private.h"
+
+void
+arm_composite_add_8000_8000 (pixman_implementation_t * impl,
+			     pixman_op_t               op,
+			     pixman_image_t *          src_image,
+			     pixman_image_t *          mask_image,
+			     pixman_image_t *          dst_image,
+			     int32_t                   src_x,
+			     int32_t                   src_y,
+			     int32_t                   mask_x,
+			     int32_t                   mask_y,
+			     int32_t                   dest_x,
+			     int32_t                   dest_y,
+			     int32_t                   width,
+			     int32_t                   height);
+
+void
+arm_composite_over_8888_8888 (pixman_implementation_t * impl,
+			      pixman_op_t               op,
+			      pixman_image_t *          src_image,
+			      pixman_image_t *          mask_image,
+			      pixman_image_t *          dst_image,
+			      int32_t                   src_x,
+			      int32_t                   src_y,
+			      int32_t                   mask_x,
+			      int32_t                   mask_y,
+			      int32_t                   dest_x,
+			      int32_t                   dest_y,
+			      int32_t                   width,
+			      int32_t                   height);
+
+void
+arm_composite_over_8888_n_8888 (pixman_implementation_t * impl,
+				pixman_op_t               op,
+				pixman_image_t *          src_image,
+				pixman_image_t *          mask_image,
+				pixman_image_t *          dst_image,
+				int32_t                   src_x,
+				int32_t                   src_y,
+				int32_t                   mask_x,
+				int32_t                   mask_y,
+				int32_t                   dest_x,
+				int32_t                   dest_y,
+				int32_t                   width,
+				int32_t                   height);
+
+void
+arm_composite_over_n_8_8888 (pixman_implementation_t * impl,
+			     pixman_op_t               op,
+			     pixman_image_t *          src_image,
+			     pixman_image_t *          mask_image,
+			     pixman_image_t *          dst_image,
+			     int32_t                   src_x,
+			     int32_t                   src_y,
+			     int32_t                   mask_x,
+			     int32_t                   mask_y,
+			     int32_t                   dest_x,
+			     int32_t                   dest_y,
+			     int32_t                   width,
+			     int32_t                   height);
+void
+arm_composite_src_8888_0565 (pixman_implementation_t * impl,
+			     pixman_op_t               op,
+			     pixman_image_t *          src_image,
+			     pixman_image_t *          mask_image,
+			     pixman_image_t *          dst_image,
+			     int32_t                   src_x,
+			     int32_t                   src_y,
+			     int32_t                   mask_x,
+			     int32_t                   mask_y,
+			     int32_t                   dest_x,
+			     int32_t                   dest_y,
+			     int32_t                   width,
+			     int32_t                   height);
--- a/gfx/cairo/libpixman/src/pixman-arm-simd.c
+++ b/gfx/cairo/libpixman/src/pixman-arm-simd.c
@@ -1,11 +1,10 @@
 /*
  * Copyright © 2008 Mozilla Corporation
- * Copyright © 2008 Nokia Corporation
  *
  * Permission to use, copy, modify, distribute, and sell this software and its
  * documentation for any purpose is hereby granted without fee, provided that
  * the above copyright notice appear in all copies and that both that
  * copyright notice and this permission notice appear in supporting
  * documentation, and that the name of Mozilla Corporation not be used in
  * advertising or publicity pertaining to distribution of the software without
  * specific, written prior permission.  Mozilla Corporation makes no
@@ -17,670 +16,84 @@
  * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
  * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
  * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
  * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
  * SOFTWARE.
  *
  * Author:  Jeff Muizelaar (jeff@infidigm.net)
- * Author:  Siarhei Siamashka <siarhei.siamashka@nokia.com>
  *
  */
 #ifdef HAVE_CONFIG_H
 #include <config.h>
 #endif
 
-#include "pixman-arm-simd.h"
+#include "pixman-private.h"
+#include "pixman-arm-simd-asm.h"
 
-void
-fbCompositeSrcAdd_8000x8000arm (pixman_op_t op,
-				pixman_image_t * pSrc,
-				pixman_image_t * pMask,
-				pixman_image_t * pDst,
-				int16_t      xSrc,
-				int16_t      ySrc,
-				int16_t      xMask,
-				int16_t      yMask,
-				int16_t      xDst,
-				int16_t      yDst,
-				uint16_t     width,
-				uint16_t     height)
+static const pixman_fast_path_t arm_simd_fast_path_array[] =
 {
-    uint8_t	*dstLine, *dst;
-    uint8_t	*srcLine, *src;
-    int	dstStride, srcStride;
-    uint16_t	w;
-    uint8_t	s, d;
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_a8r8g8b8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_null,     PIXMAN_x8r8g8b8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_a8b8g8r8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8b8g8r8, PIXMAN_null,     PIXMAN_x8b8g8r8, arm_composite_over_8888_8888    },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_a8r8g8b8, arm_composite_over_8888_n_8888  },
+    { PIXMAN_OP_OVER, PIXMAN_a8r8g8b8, PIXMAN_solid,    PIXMAN_x8r8g8b8, arm_composite_over_8888_n_8888  },
+
+    { PIXMAN_OP_ADD, PIXMAN_a8,        PIXMAN_null,     PIXMAN_a8,       arm_composite_add_8000_8000     },
 
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint8_t, srcStride, srcLine, 1);
-    fbComposeGetStart (pDst, xDst, yDst, uint8_t, dstStride, dstLine, 1);
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8r8g8b8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8r8g8b8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_a8b8g8r8, arm_composite_over_n_8_8888     },
+    { PIXMAN_OP_OVER, PIXMAN_solid,    PIXMAN_a8,       PIXMAN_x8b8g8r8, arm_composite_over_n_8_8888     },
 
-    while (height--)
-    {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
-	w = width;
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_r5g6b5,   arm_composite_src_8888_0565    },
+    { PIXMAN_OP_SRC,  PIXMAN_x8r8g8b8, PIXMAN_null,     PIXMAN_b5g6r5,   arm_composite_src_8888_0565    },
+
+    { PIXMAN_OP_NONE },
+};
 
-        /* ensure both src and dst are properly aligned before doing 32 bit reads
-         * we'll stay in this loop if src and dst have differing alignments */
-	while (w && (((unsigned long)dst & 3) || ((unsigned long)src & 3)))
-	{
-	    s = *src;
-	    d = *dst;
-	    asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
-	    *dst = d;
-
-	    dst++;
-	    src++;
-	    w--;
-	}
+const pixman_fast_path_t *const arm_simd_fast_paths = arm_simd_fast_path_array;
 
-	while (w >= 4)
-	{
-	    asm("uqadd8 %0, %1, %2" : "=r"(*(uint32_t*)dst) : "r"(*(uint32_t*)src), "r"(*(uint32_t*)dst));
-	    dst += 4;
-	    src += 4;
-	    w -= 4;
-	}
-
-	while (w)
-	{
-	    s = *src;
-	    d = *dst;
-	    asm("uqadd8 %0, %1, %2" : "+r"(d) : "r"(s));
-	    *dst = d;
-
-	    dst++;
-	    src++;
-	    w--;
-	}
+static void
+arm_simd_composite (pixman_implementation_t *imp,
+                    pixman_op_t              op,
+                    pixman_image_t *         src,
+                    pixman_image_t *         mask,
+                    pixman_image_t *         dest,
+                    int32_t                  src_x,
+                    int32_t                  src_y,
+                    int32_t                  mask_x,
+                    int32_t                  mask_y,
+                    int32_t                  dest_x,
+                    int32_t                  dest_y,
+                    int32_t                  width,
+                    int32_t                  height)
+{
+    if (_pixman_run_fast_path (arm_simd_fast_paths, imp,
+                               op, src, mask, dest,
+                               src_x, src_y,
+                               mask_x, mask_y,
+                               dest_x, dest_y,
+                               width, height))
+    {
+	return;
     }
 
-}
-
-void
-fbCompositeSrc_8888x8888arm (pixman_op_t op,
-			 pixman_image_t * pSrc,
-			 pixman_image_t * pMask,
-			 pixman_image_t * pDst,
-			 int16_t      xSrc,
-			 int16_t      ySrc,
-			 int16_t      xMask,
-			 int16_t      yMask,
-			 int16_t      xDst,
-			 int16_t      yDst,
-			 uint16_t     width,
-			 uint16_t     height)
-{
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    int	dstStride, srcStride;
-    uint16_t	w;
-    uint32_t component_half = 0x800080;
-    uint32_t upper_component_mask = 0xff00ff00;
-    uint32_t alpha_mask = 0xff;
-
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-
-    while (height--)
-    {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
-	w = width;
-
-//#define inner_branch
-	asm volatile (
-			"cmp %[w], #0\n\t"
-			"beq 2f\n\t"
-			"1:\n\t"
-			/* load src */
-			"ldr r5, [%[src]], #4\n\t"
-#ifdef inner_branch
-			/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
-			 * The 0x0 case also allows us to avoid doing an unecessary data
-			 * write which is more valuable so we only check for that */
-			"cmp r5, #0\n\t"
-			"beq 3f\n\t"
-
-			/* = 255 - alpha */
-			"sub r8, %[alpha_mask], r5, lsr #24\n\t"
-
-			"ldr r4, [%[dest]] \n\t"
-
-#else
-			"ldr r4, [%[dest]] \n\t"
-
-			/* = 255 - alpha */
-			"sub r8, %[alpha_mask], r5, lsr #24\n\t"
-#endif
-			"uxtb16 r6, r4\n\t"
-			"uxtb16 r7, r4, ror #8\n\t"
-
-			/* multiply by 257 and divide by 65536 */
-			"mla r6, r6, r8, %[component_half]\n\t"
-			"mla r7, r7, r8, %[component_half]\n\t"
-
-			"uxtab16 r6, r6, r6, ror #8\n\t"
-			"uxtab16 r7, r7, r7, ror #8\n\t"
-
-			/* recombine the 0xff00ff00 bytes of r6 and r7 */
-			"and r7, r7, %[upper_component_mask]\n\t"
-			"uxtab16 r6, r7, r6, ror #8\n\t"
-
-			"uqadd8 r5, r6, r5\n\t"
-
-#ifdef inner_branch
-			"3:\n\t"
-
-#endif
-			"str r5, [%[dest]], #4\n\t"
-			/* increment counter and jmp to top */
-			"subs	%[w], %[w], #1\n\t"
-			"bne	1b\n\t"
-			"2:\n\t"
-			: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
-			: [component_half] "r" (component_half), [upper_component_mask] "r" (upper_component_mask),
-			  [alpha_mask] "r" (alpha_mask)
-			: "r4", "r5", "r6", "r7", "r8", "cc", "memory"
-			);
-    }
-}
-
-void
-fbCompositeSrc_8888x8x8888arm (pixman_op_t op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
-			       int16_t	xSrc,
-			       int16_t	ySrc,
-			       int16_t      xMask,
-			       int16_t      yMask,
-			       int16_t      xDst,
-			       int16_t      yDst,
-			       uint16_t     width,
-			       uint16_t     height)
-{
-    uint32_t	*dstLine, *dst;
-    uint32_t	*srcLine, *src;
-    uint32_t	mask;
-    int	dstStride, srcStride;
-    uint16_t	w;
-    uint32_t component_half = 0x800080;
-    uint32_t alpha_mask = 0xff;
-
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pSrc, xSrc, ySrc, uint32_t, srcStride, srcLine, 1);
-
-    fbComposeGetSolid (pMask, mask, pDst->bits.format);
-    mask = (mask) >> 24;
-
-    while (height--)
-    {
-	dst = dstLine;
-	dstLine += dstStride;
-	src = srcLine;
-	srcLine += srcStride;
-	w = width;
-
-//#define inner_branch
-	asm volatile (
-			"cmp %[w], #0\n\t"
-			"beq 2f\n\t"
-			"1:\n\t"
-			/* load src */
-			"ldr r5, [%[src]], #4\n\t"
-#ifdef inner_branch
-			/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
-			 * The 0x0 case also allows us to avoid doing an unecessary data
-			 * write which is more valuable so we only check for that */
-			"cmp r5, #0\n\t"
-			"beq 3f\n\t"
-
-#endif
-			"ldr r4, [%[dest]] \n\t"
-
-			"uxtb16 r6, r5\n\t"
-			"uxtb16 r7, r5, ror #8\n\t"
-
-			/* multiply by alpha (r8) then by 257 and divide by 65536 */
-			"mla r6, r6, %[mask_alpha], %[component_half]\n\t"
-			"mla r7, r7, %[mask_alpha], %[component_half]\n\t"
-
-			"uxtab16 r6, r6, r6, ror #8\n\t"
-			"uxtab16 r7, r7, r7, ror #8\n\t"
-
-			"uxtb16 r6, r6, ror #8\n\t"
-			"uxtb16 r7, r7, ror #8\n\t"
-
-			/* recombine */
-			"orr r5, r6, r7, lsl #8\n\t"
-
-			"uxtb16 r6, r4\n\t"
-			"uxtb16 r7, r4, ror #8\n\t"
-
-			/* 255 - alpha */
-			"sub r8, %[alpha_mask], r5, lsr #24\n\t"
-
-			/* multiply by alpha (r8) then by 257 and divide by 65536 */
-			"mla r6, r6, r8, %[component_half]\n\t"
-			"mla r7, r7, r8, %[component_half]\n\t"
-
-			"uxtab16 r6, r6, r6, ror #8\n\t"
-			"uxtab16 r7, r7, r7, ror #8\n\t"
-
-			"uxtb16 r6, r6, ror #8\n\t"
-			"uxtb16 r7, r7, ror #8\n\t"
-
-			/* recombine */
-			"orr r6, r6, r7, lsl #8\n\t"
-
-			"uqadd8 r5, r6, r5\n\t"
-
-#ifdef inner_branch
-			"3:\n\t"
-
-#endif
-			"str r5, [%[dest]], #4\n\t"
-			/* increment counter and jmp to top */
-			"subs	%[w], %[w], #1\n\t"
-			"bne	1b\n\t"
-			"2:\n\t"
-			: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src)
-			: [component_half] "r" (component_half), [mask_alpha] "r" (mask),
-			  [alpha_mask] "r" (alpha_mask)
-			: "r4", "r5", "r6", "r7", "r8", "r9", "cc", "memory"
-			);
-    }
+    _pixman_implementation_composite (imp->delegate, op,
+                                      src, mask, dest,
+                                      src_x, src_y,
+                                      mask_x, mask_y,
+                                      dest_x, dest_y,
+                                      width, height);
 }
 
-void
-fbCompositeSolidMask_nx8x8888arm (pixman_op_t      op,
-			       pixman_image_t * pSrc,
-			       pixman_image_t * pMask,
-			       pixman_image_t * pDst,
-			       int16_t      xSrc,
-			       int16_t      ySrc,
-			       int16_t      xMask,
-			       int16_t      yMask,
-			       int16_t      xDst,
-			       int16_t      yDst,
-			       uint16_t     width,
-			       uint16_t     height)
-{
-    uint32_t	 src, srca;
-    uint32_t	*dstLine, *dst;
-    uint8_t	*maskLine, *mask;
-    int		 dstStride, maskStride;
-    uint16_t	 w;
-
-    fbComposeGetSolid(pSrc, src, pDst->bits.format);
-
-    srca = src >> 24;
-    if (src == 0)
-	return;
-
-    uint32_t component_mask = 0xff00ff;
-    uint32_t component_half = 0x800080;
-
-    uint32_t src_hi = (src >> 8) & component_mask;
-    uint32_t src_lo = src & component_mask;
-
-    fbComposeGetStart (pDst, xDst, yDst, uint32_t, dstStride, dstLine, 1);
-    fbComposeGetStart (pMask, xMask, yMask, uint8_t, maskStride, maskLine, 1);
-
-    while (height--)
-    {
-	dst = dstLine;
-	dstLine += dstStride;
-	mask = maskLine;
-	maskLine += maskStride;
-	w = width;
-
-//#define inner_branch
-	asm volatile (
-			"cmp %[w], #0\n\t"
-			"beq 2f\n\t"
-			"1:\n\t"
-			/* load mask */
-			"ldrb r5, [%[mask]], #1\n\t"
-#ifdef inner_branch
-			/* We can avoid doing the multiplication in two cases: 0x0 or 0xff.
-			 * The 0x0 case also allows us to avoid doing an unecessary data
-			 * write which is more valuable so we only check for that */
-			"cmp r5, #0\n\t"
-			"beq 3f\n\t"
-
-#endif
-			"ldr r4, [%[dest]] \n\t"
-
-			/* multiply by alpha (r8) then by 257 and divide by 65536 */
-			"mla r6, %[src_lo], r5, %[component_half]\n\t"
-			"mla r7, %[src_hi], r5, %[component_half]\n\t"
-
-			"uxtab16 r6, r6, r6, ror #8\n\t"
-			"uxtab16 r7, r7, r7, ror #8\n\t"
-
-			"uxtb16 r6, r6, ror #8\n\t"
-			"uxtb16 r7, r7, ror #8\n\t"
-
-			/* recombine */
-			"orr r5, r6, r7, lsl #8\n\t"
-
-			"uxtb16 r6, r4\n\t"
-			"uxtb16 r7, r4, ror #8\n\t"
-
-			/* we could simplify this to use 'sub' if we were
-			 * willing to give up a register for alpha_mask */
-			"mvn r8, r5\n\t"
-			"mov r8, r8, lsr #24\n\t"
-
-			/* multiply by alpha (r8) then by 257 and divide by 65536 */
-			"mla r6, r6, r8, %[component_half]\n\t"
-			"mla r7, r7, r8, %[component_half]\n\t"
-
-			"uxtab16 r6, r6, r6, ror #8\n\t"
-			"uxtab16 r7, r7, r7, ror #8\n\t"
-
-			"uxtb16 r6, r6, ror #8\n\t"
-			"uxtb16 r7, r7, ror #8\n\t"
-
-			/* recombine */
-			"orr r6, r6, r7, lsl #8\n\t"
-
-			"uqadd8 r5, r6, r5\n\t"
-
-#ifdef inner_branch
-			"3:\n\t"
-
-#endif
-			"str r5, [%[dest]], #4\n\t"
-			/* increment counter and jmp to top */
-			"subs	%[w], %[w], #1\n\t"
-			"bne	1b\n\t"
-			"2:\n\t"
-			: [w] "+r" (w), [dest] "+r" (dst), [src] "+r" (src), [mask] "+r" (mask)
-			: [component_half] "r" (component_half),
-			  [src_hi] "r" (src_hi), [src_lo] "r" (src_lo)
-			: "r4", "r5", "r6", "r7", "r8", "cc", "memory"
-			);
-    }
-}
-
-/**
- * Conversion x8r8g8b8 -> r5g6b5
- *
- * TODO: optimize more, eliminate stalls, try to use burst writes (4 words aligned 
- * at 16 byte boundary)
- */
-static inline void fbComposite_x8r8g8b8_src_r5g6b5_internal_mixed_armv6_c(
-    uint16_t *dst, uint32_t *src, int w, int dst_stride,
-    int src_stride, int h)
-{
-    uint32_t a, x, y, c1F001F = 0x1F001F;
-    int backup_w = w;
-    while (h--)
-    {
-        w = backup_w;
-        if (w > 0 && (uintptr_t)dst & 2)
-        {
-            x = *src++;
-
-            a = (x >> 3) & c1F001F;
-            x &= 0xFC00;
-            a |= a >> 5;
-            a |= x >> 5;
-
-            *dst++ = a;
-            w--;
-        }
-
-        asm volatile(
-            "subs  %[w], %[w], #2\n"
-            "blt   2f\n"
-        "1:\n"
-            "ldr   %[x], [%[src]], #4\n"
-            "ldr   %[y], [%[src]], #4\n"
-            "subs  %[w], %[w], #2\n"
-            
-            "and   %[a], %[c1F001F], %[x], lsr #3\n"
-            "and   %[x], %[x], #0xFC00\n\n"
-            "orr   %[a], %[a], %[a], lsr #5\n"
-            "orr   %[x], %[a], %[x], lsr #5\n"
-
-            "and   %[a], %[c1F001F], %[y], lsr #3\n"
-            "and   %[y], %[y], #0xFC00\n\n"
-            "orr   %[a], %[a], %[a], lsr #5\n"
-            "orr   %[y], %[a], %[y], lsr #5\n"
-
-            "pkhbt %[x], %[x], %[y], lsl #16\n"
-            "str   %[x], [%[dst]], #4\n"
-            "bge   1b\n"
-        "2:\n"
-        : [c1F001F] "+&r" (c1F001F), [src] "+&r" (src), [dst] "+&r" (dst), [a] "=&r" (a), 
-          [x] "=&r" (x), [y] "=&r" (y), [w] "+&r" (w)
-        );
-
-        if (w & 1)
-        {
-            x = *src++;
-
-            a = (x >> 3) & c1F001F;
-            x = x & 0xFC00;
-            a |= a >> 5;
-            a |= x >> 5;
-
-            *dst++ = a;
-        }
-
-        src += src_stride - backup_w;
-        dst += dst_stride - backup_w;
-    }
-}
-
-/**
- * Conversion x8r8g8b8 -> r5g6b5
- *
- * Note: 'w' must be >= 7
- */
-static void __attribute__((naked)) fbComposite_x8r8g8b8_src_r5g6b5_internal_armv6(
-    uint16_t *dst, uint32_t *src, int w, int dst_stride,
-    int src_stride, int h)
+pixman_implementation_t *
+_pixman_implementation_create_arm_simd (void)
 {
-    asm volatile(
-        /* define supplementary macros */
-        ".macro cvt8888to565 PIX\n"
-            "and   A, C1F001F, \\PIX, lsr #3\n"
-            "and   \\PIX, \\PIX, #0xFC00\n\n"
-            "orr   A, A, A, lsr #5\n"
-            "orr   \\PIX, A, \\PIX, lsr #5\n"
-        ".endm\n"
-
-        ".macro combine_pixels_pair PIX1, PIX2\n"
-            "pkhbt \\PIX1, \\PIX1, \\PIX2, lsl #16\n" /* Note: assume little endian byte order */
-        ".endm\n"
-
-        /* function entry, save all registers (10 words) to stack */
-        "stmdb   sp!, {r4-r11, ip, lr}\n"
-        
-        /* define some aliases */
-        "DST     .req  r0\n"
-        "SRC     .req  r1\n"
-        "W       .req  r2\n"
-        "H       .req  r3\n"
-