Bug 1063356 - Back out partial libvpx update. r=me
authorRalph Giles <giles@mozilla.com>
Thu, 02 Oct 2014 11:20:47 -0700
changeset 231710 61588fcd074fb2961c768b2ace97a4b8bf0e03ea
parent 231709 944246c0d39fea189c9086f68a0d39e5f13cbe67
child 231711 0d55b8666ca673147018dd3e86b7ca36b57026e1
push id4187
push userbhearsum@mozilla.com
push dateFri, 28 Nov 2014 15:29:12 +0000
treeherdermozilla-beta@f23cc6a30c11 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersme
bugs1063356
milestone35.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1063356 - Back out partial libvpx update. r=me It's taking a while to get the new version building on all platforms. Removing the partial commits so we can land cleanly.
media/libvpx/mingw.patch
media/libvpx/moz.build
media/libvpx/stdint.patch
media/libvpx/unified.patch
media/libvpx/update.py
media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
media/libvpx/vp8/common/arm/neon/copymem_neon.c
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
media/libvpx/vp8/common/arm/neon/reconintra_neon.c
media/libvpx/vp8/common/arm/neon/sad_neon.c
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
media/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
media/libvpx/vp8/common/arm/neon/variance_neon.c
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
media/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
media/libvpx/vp8/decoder/decodeframe.c
media/libvpx/vp8/encoder/arm/neon/denoising_neon.c
media/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c
media/libvpx/vp8/encoder/arm/neon/subtract_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c
media/libvpx/vp8/encoder/x86/quantize_sse4.c
media/libvpx/vp8/encoder/x86/quantize_ssse3.c
media/libvpx/vp8/encoder/x86/ssim_opt_x86_64.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm
media/libvpx/vp9/common/vp9_blockd.c
media/libvpx/vp9/common/vp9_frame_buffers.c
media/libvpx/vp9/common/vp9_frame_buffers.h
media/libvpx/vp9/common/vp9_prob.c
media/libvpx/vp9/common/vp9_prob.h
media/libvpx/vp9/common/vp9_thread.c
media/libvpx/vp9/common/vp9_thread.h
media/libvpx/vp9/common/x86/vp9_high_intrapred_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_high_subpixel_8t_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_subpixel_bilinear_sse2.asm
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.h
media/libvpx/vp9/common/x86/vp9_idct_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_idct_ssse3_x86_64.asm
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_subpixel_bilinear_sse2.asm
media/libvpx/vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm
media/libvpx/vp9/decoder/vp9_decodeframe.c
media/libvpx/vp9/decoder/vp9_decodeframe.h
media/libvpx/vp9/decoder/vp9_decoder.c
media/libvpx/vp9/decoder/vp9_decoder.h
media/libvpx/vp9/decoder/vp9_dthread.c
media/libvpx/vp9/decoder/vp9_dthread.h
media/libvpx/vp9/decoder/vp9_read_bit_buffer.c
media/libvpx/vp9/decoder/vp9_reader.c
media/libvpx/vp9/decoder/vp9_reader.h
media/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_subtract_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_variance_neon.c
media/libvpx/vp9/encoder/vp9_aq_complexity.c
media/libvpx/vp9/encoder/vp9_aq_complexity.h
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h
media/libvpx/vp9/encoder/vp9_aq_variance.c
media/libvpx/vp9/encoder/vp9_aq_variance.h
media/libvpx/vp9/encoder/vp9_context_tree.c
media/libvpx/vp9/encoder/vp9_context_tree.h
media/libvpx/vp9/encoder/vp9_cost.c
media/libvpx/vp9/encoder/vp9_cost.h
media/libvpx/vp9/encoder/vp9_denoiser.c
media/libvpx/vp9/encoder/vp9_denoiser.h
media/libvpx/vp9/encoder/vp9_encoder.c
media/libvpx/vp9/encoder/vp9_encoder.h
media/libvpx/vp9/encoder/vp9_extend.c
media/libvpx/vp9/encoder/vp9_extend.h
media/libvpx/vp9/encoder/vp9_pickmode.c
media/libvpx/vp9/encoder/vp9_pickmode.h
media/libvpx/vp9/encoder/vp9_rd.c
media/libvpx/vp9/encoder/vp9_rd.h
media/libvpx/vp9/encoder/vp9_resize.c
media/libvpx/vp9/encoder/vp9_resize.h
media/libvpx/vp9/encoder/vp9_sad.c
media/libvpx/vp9/encoder/vp9_speed_features.c
media/libvpx/vp9/encoder/vp9_speed_features.h
media/libvpx/vp9/encoder/vp9_ssim.h
media/libvpx/vp9/encoder/vp9_svc_layercontext.c
media/libvpx/vp9/encoder/vp9_svc_layercontext.h
media/libvpx/vp9/encoder/vp9_variance.c
media/libvpx/vp9/encoder/vp9_write_bit_buffer.c
media/libvpx/vp9/encoder/vp9_writer.c
media/libvpx/vp9/encoder/vp9_writer.h
media/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c
media/libvpx/vpx/internal/vpx_psnr.h
media/libvpx/vpx/src/vpx_psnr.c
media/libvpx/vpx/vpx_frame_buffer.h
new file mode 100644
--- /dev/null
+++ b/media/libvpx/mingw.patch
@@ -0,0 +1,30 @@
+diff --git a/media/libvpx/vpx/src/svc_encodeframe.c b/media/libvpx/vpx/src/svc_encodeframe.c
+index 57d21dc..2514ad3 100644
+--- a/media/libvpx/vpx/src/svc_encodeframe.c
++++ b/media/libvpx/vpx/src/svc_encodeframe.c
+@@ -18,21 +18,23 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #define VPX_DISABLE_CTRL_TYPECHECKS 1
+ #define VPX_CODEC_DISABLE_COMPAT 1
+ #include "vpx/svc_context.h"
+ #include "vpx/vp8cx.h"
+ #include "vpx/vpx_encoder.h"
+ 
+-#if defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)
++#ifdef __MINGW32__
+ #define strtok_r strtok_s
++#ifndef MINGW_HAS_SECURE_API
+ // proto from /usr/x86_64-w64-mingw32/include/sec_api/string_s.h
+ _CRTIMP char *__cdecl strtok_s(char *str, const char *delim, char **context);
+-#endif
++#endif  /* MINGW_HAS_SECURE_API */
++#endif  /* __MINGW32__ */
+ 
+ #ifdef _MSC_VER
+ #define strdup _strdup
+ #define strtok_r strtok_s
+ #endif
+ 
+ #define SVC_REFERENCE_FRAMES 8
+ #define SUPERFRAME_SLOTS (8)
--- a/media/libvpx/moz.build
+++ b/media/libvpx/moz.build
@@ -40,23 +40,16 @@ if CONFIG['VPX_X86_ASM']:
     if '64' in CONFIG['OS_TEST']:
         SOURCES += files['X86-64_ASM']
 
     # AVX2 only supported on
     # Darwin toolchain right now
     if CONFIG['OS_TARGET'] == 'Darwin':
         SOURCES += files['AVX2']
 
-    # Expected support is hard-coded in the various vpx_config files but
-    # we need to propagate the config checks here to get the right flags.
-    if CONFIG['HAVE_TOOLCHAIN_SUPPORT_MSSSE3']:
-        CFLAGS += ['-mssse3']
-    if CONFIG['HAVE_TOOLCHAIN_SUPPORT_MSSE4_1']:
-        CFLAGS += ['-msse4.1']
-
     #postproc is only enabled on x86 with asm
     SOURCES += files['VP8_POSTPROC']
 
 arm_asm_files = []
 if CONFIG['VPX_ARM_ASM']:
     arm_asm_files += files['ARM_ASM']
 
     if CONFIG['VPX_AS_CONVERSION']:
--- a/media/libvpx/stdint.patch
+++ b/media/libvpx/stdint.patch
@@ -1,20 +1,41 @@
---- vpx/vpx_integer.h-	2014-09-17 15:49:58.000000000 -0700
-+++ vpx/vpx_integer.h	2014-09-17 15:52:59.000000000 -0700
-@@ -15,6 +15,8 @@
+diff --git a/media/libvpx/vpx/vpx_integer.h b/media/libvpx/vpx/vpx_integer.h
+--- a/media/libvpx/vpx/vpx_integer.h
++++ b/media/libvpx/vpx/vpx_integer.h
+@@ -10,16 +10,18 @@
+ 
+ 
+ #ifndef VPX_INTEGER_H
+ #define VPX_INTEGER_H
+ 
  /* get ptrdiff_t, size_t, wchar_t, NULL */
  #include <stddef.h>
  
 +#if !defined(VPX_DONT_DEFINE_STDINT_TYPES)
 +
- #if defined(_MSC_VER)
- #define VPX_FORCE_INLINE __forceinline
- #define VPX_INLINE __inline
-@@ -56,6 +58,8 @@
+ #if (defined(_MSC_VER) && (_MSC_VER < 1600)) || defined(VPX_EMULATE_INTTYPES)
+ typedef signed char  int8_t;
+ typedef signed short int16_t;
+ typedef signed int   int32_t;
+ 
+ typedef unsigned char  uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int   uint32_t;
+@@ -47,16 +49,18 @@ typedef unsigned int   uintptr_t;
+ 
+ #if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS)
+ #define __STDC_FORMAT_MACROS
+ #endif
+ #include <stdint.h>
  
  #endif
  
-+#endif // VPX_DONT_DEFINE_STDINT_TYPES
++#endif
 +
  /* VS2010 defines stdint.h, but not inttypes.h */
- #if defined(_MSC_VER) && _MSC_VER < 1800
+ #if defined(_MSC_VER)
  #define PRId64 "I64d"
+ #else
+ #include <inttypes.h>
+ #endif
+ 
+ #endif
new file mode 100644
--- /dev/null
+++ b/media/libvpx/unified.patch
@@ -0,0 +1,82 @@
+diff --git a/media/libvpx/vp8/common/setupintrarecon.h b/media/libvpx/vp8/common/setupintrarecon.h
+index e515c3a..9317a6d 100644
+--- a/media/libvpx/vp8/common/setupintrarecon.h
++++ b/media/libvpx/vp8/common/setupintrarecon.h
+@@ -3,16 +3,18 @@
+  *
+  *  Use of this source code is governed by a BSD-style license
+  *  that can be found in the LICENSE file in the root of the source
+  *  tree. An additional intellectual property rights grant can be found
+  *  in the file PATENTS.  All contributing project authors may
+  *  be found in the AUTHORS file in the root of the source tree.
+  */
+ 
++#ifndef SETUPINTRARECON_H
++#define SETUPINTRARECON_H
+ 
+ #include "vpx_scale/yv12config.h"
+ extern void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf);
+ extern void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf);
+ 
+ static
+ void setup_intra_recon_left(unsigned char *y_buffer,
+                             unsigned char *u_buffer,
+@@ -26,8 +28,10 @@ void setup_intra_recon_left(unsigned char *y_buffer,
+         y_buffer[y_stride *i] = (unsigned char) 129;
+ 
+     for (i = 0; i < 8; i++)
+         u_buffer[uv_stride *i] = (unsigned char) 129;
+ 
+     for (i = 0; i < 8; i++)
+         v_buffer[uv_stride *i] = (unsigned char) 129;
+ }
++
++#endif
+diff --git a/media/libvpx/vpx_ports/vpx_once.h b/media/libvpx/vpx_ports/vpx_once.h
+index 16a735c..0387a71 100644
+--- a/media/libvpx/vpx_ports/vpx_once.h
++++ b/media/libvpx/vpx_ports/vpx_once.h
+@@ -2,16 +2,19 @@
+  *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
+  *
+  *  Use of this source code is governed by a BSD-style license
+  *  that can be found in the LICENSE file in the root of the source
+  *  tree. An additional intellectual property rights grant can be found
+  *  in the file PATENTS.  All contributing project authors may
+  *  be found in the AUTHORS file in the root of the source tree.
+  */
++#ifndef VPX_ONCE_H
++#define VPX_ONCE_H
++
+ #include "vpx_config.h"
+ 
+ #if CONFIG_MULTITHREAD && defined(_WIN32)
+ #include <windows.h>
+ #include <stdlib.h>
+ static void once(void (*func)(void))
+ {
+     static CRITICAL_SECTION *lock;
+@@ -90,8 +93,10 @@ static void once(void (*func)(void))
+ 
+     if(!done)
+     {
+         func();
+         done = 1;
+     }
+ }
+ #endif
++
++#endif
+diff --git a/media/libvpx/vp8/common/loopfilter.c b/media/libvpx/vp8/common/loopfilter.c
+index 19857a7..3c0fa63 100644
+--- a/media/libvpx/vp8/common/loopfilter.c
++++ b/media/libvpx/vp8/common/loopfilter.c
+@@ -15,8 +15,6 @@
+ #include "onyxc_int.h"
+ #include "vpx_mem/vpx_mem.h"
+ 
+-typedef unsigned char uc;
+-
+ static void lf_init_lut(loop_filter_info_n *lfi)
+ {
+     int filt_lvl;
--- a/media/libvpx/update.py
+++ b/media/libvpx/update.py
@@ -115,48 +115,42 @@ MODULES = {
     ],
     'ARM_ASM': [
         'PORTS_SRCS-$(ARCH_ARM)',
         'SCALE_SRCS-$(HAVE_NEON)',
         'VP8_COMMON_SRCS-$(ARCH_ARM)',
         'VP8_COMMON_SRCS-$(HAVE_MEDIA)',
         'VP8_COMMON_SRCS-$(HAVE_NEON)',
         'VP9_COMMON_SRCS-$(HAVE_NEON)',
-        'VP9_COMMON_SRCS-$(HAVE_NEON_ASM)',
         'VP8_CX_SRCS-$(ARCH_ARM)',
         'VP8_CX_SRCS-$(HAVE_EDSP)',
         'VP8_CX_SRCS-$(HAVE_MEDIA)',
         'VP8_CX_SRCS-$(HAVE_NEON)',
-        'VP8_CX_SRCS-$(HAVE_NEON_ASM)',
-        'VP9_CX_SRCS-$(HAVE_NEON)',
     ],
     'ERROR_CONCEALMENT': [
         'VP8_DX_SRCS-$(CONFIG_ERROR_CONCEALMENT)',
     ],
     'AVX2': [
         'VP9_COMMON_SRCS-$(HAVE_AVX2)',
-        'VP9_CX_SRCS-$(HAVE_AVX2)',
     ],
     'VP8_POSTPROC': [
         'VP8_COMMON_SRCS-$(CONFIG_POSTPROC)',
     ],
     'VP9_POSTPROC': [
         'VP9_COMMON_SRCS-$(CONFIG_VP9_POSTPROC)',
     ]
 }
 
 DISABLED_MODULES = [
-    'API_SRCS-$(CONFIG_SPATIAL_SVC)',
     'MEM_SRCS-$(CONFIG_MEM_MANAGER)',
     'MEM_SRCS-$(CONFIG_MEM_TRACKER)',
     'VP8_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER)',
     'VP9_COMMON_SRCS-$(CONFIG_POSTPROC_VISUALIZER)',
     'VP8_CX_SRCS-$(CONFIG_INTERNAL_STATS)',
     'VP9_CX_SRCS-$(CONFIG_INTERNAL_STATS)',
-    'VP9_CX_SRCS-$(CONFIG_VP9_TEMPORAL_DENOISING)',
 
     # mips files are also ignored via ignored_folders
     'SCALE_SRCS-$(HAVE_DSPR2)',
     'VP8_COMMON_SRCS-$(HAVE_DSPR2)',
     'VP9_COMMON_SRCS-$(HAVE_DSPR2)',
     'VP8_CX_SRCS_REMOVE-$(HAVE_EDSP)',
 ]
 
@@ -206,115 +200,43 @@ files = {
         'vpx_scale/vpx_scale.h',
         'vpx_scale/yv12config.h',
         'vpx/vp8cx.h',
         'vpx/vp8dx.h',
         'vpx/vp8.h',
         'vpx/vpx_codec.h',
         'vpx/vpx_decoder.h',
         'vpx/vpx_encoder.h',
-        'vpx/vpx_frame_buffer.h',
         'vpx/vpx_image.h',
         'vpx/vpx_integer.h',
     ],
     'X86-64_ASM': [
         'third_party/x86inc/x86inc.asm',
-        'vp8/common/x86/loopfilter_block_sse2_x86_64.asm',
-        'vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm',
+        'vp8/common/x86/loopfilter_block_sse2.asm',
+        'vp9/encoder/x86/vp9_quantize_ssse3.asm',
     ],
     'SOURCES': [
         'vp8/common/rtcd.c',
         'vp8/common/sad_c.c',
-        'vp8/encoder/bitstream.c',
-        'vp8/encoder/onyx_if.c',
         'vp8/vp8_dx_iface.c',
-        'vp9/common/vp9_alloccommon.c',
-        'vp9/common/vp9_blockd.c',
-        'vp9/common/vp9_common_data.c',
-        'vp9/common/vp9_convolve.c',
-        'vp9/common/vp9_debugmodes.c',
-        'vp9/common/vp9_entropy.c',
-        'vp9/common/vp9_entropymode.c',
         'vp9/common/vp9_entropymv.c',
-        'vp9/common/vp9_filter.c',
-        'vp9/common/vp9_frame_buffers.c',
-        'vp9/common/vp9_idct.c',
-        'vp9/common/vp9_loopfilter.c',
-        'vp9/common/vp9_loopfilter_filters.c',
-        'vp9/common/vp9_mvref_common.c',
-        'vp9/common/vp9_pred_common.c',
-        'vp9/common/vp9_prob.c',
-        'vp9/common/vp9_quant_common.c',
-        'vp9/common/vp9_reconinter.c',
-        'vp9/common/vp9_reconintra.c',
         'vp9/common/vp9_rtcd.c',
-        'vp9/common/vp9_scale.c',
-        'vp9/common/vp9_scan.c',
-        'vp9/common/vp9_seg_common.c',
-        'vp9/common/vp9_thread.c',
-        'vp9/common/vp9_tile_common.c',
-        'vp9/decoder/vp9_decodeframe.c',
-        'vp9/decoder/vp9_decodemv.c',
-        'vp9/decoder/vp9_decoder.c',
-        'vp9/decoder/vp9_detokenize.c',
-        'vp9/decoder/vp9_dsubexp.c',
-        'vp9/decoder/vp9_dthread.c',
-        'vp9/decoder/vp9_reader.c',
         'vp9/encoder/vp9_bitstream.c',
-        'vp9/encoder/vp9_aq_complexity.c',
-        'vp9/encoder/vp9_aq_cyclicrefresh.c',
-        'vp9/encoder/vp9_aq_variance.c',
-        'vp9/encoder/vp9_context_tree.c',
-        'vp9/encoder/vp9_cost.c',
-        'vp9/encoder/vp9_dct.c',
-        'vp9/encoder/vp9_encodeframe.c',
-        'vp9/encoder/vp9_encodemb.c',
-        'vp9/encoder/vp9_encodemv.c',
-        'vp9/encoder/vp9_encoder.c',
-        'vp9/encoder/vp9_extend.c',
-        'vp9/encoder/vp9_firstpass.c',
-        'vp9/encoder/vp9_lookahead.c',
-        'vp9/encoder/vp9_mbgraph.c',
-        'vp9/encoder/vp9_mcomp.c',
-        'vp9/encoder/vp9_picklpf.c',
-        'vp9/encoder/vp9_pickmode.c',
-        'vp9/encoder/vp9_quantize.c',
-        'vp9/encoder/vp9_ratectrl.c',
-        'vp9/encoder/vp9_rd.c',
-        'vp9/encoder/vp9_rdopt.c',
-        'vp9/encoder/vp9_resize.c',
-        'vp9/encoder/vp9_sad.c',
-        'vp9/encoder/vp9_segmentation.c',
-        'vp9/encoder/vp9_speed_features.c',
-        'vp9/encoder/vp9_subexp.c',
-        'vp9/encoder/vp9_svc_layercontext.c',
-        'vp9/encoder/vp9_temporal_filter.c',
-        'vp9/encoder/vp9_tokenize.c',
-        'vp9/encoder/vp9_treewriter.c',
-        'vp9/encoder/vp9_variance.c',
-        'vp9/encoder/vp9_write_bit_buffer.c',
-        'vp9/encoder/vp9_writer.c',
-        'vp9/vp9_cx_iface.c',
-        'vp9/vp9_dx_iface.c',
         'vpx/src/svc_encodeframe.c',
-        'vpx/src/vpx_encoder.c',
         'vpx_mem/vpx_mem.c',
-        'vpx_scale/vpx_scale_rtcd.c',
-        'vpx_scale/generic/yv12config.c',
-        'vpx_scale/generic/yv12extend.c',
     ]
 }
 
 manual = [
     # special case in moz.build
     'vp8/encoder/boolhuff.c',
 
     # 64bit only
-    'vp8/common/x86/loopfilter_block_sse2_x86_64.asm',
-    'vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm',
+    'vp8/common/x86/loopfilter_block_sse2.asm',
+    'vp9/encoder/x86/vp9_quantize_ssse3.asm',
 
     # offsets are special cased in Makefile.in
     'vp8/encoder/vp8_asm_enc_offsets.c',
     'vpx_scale/vpx_scale_asm_offsets.c',
 
     # ignore while vp9 postproc is not enabled
     'vp9/common/x86/vp9_postproc_mmx.asm',
     'vp9/common/x86/vp9_postproc_sse2.asm',
@@ -523,17 +445,19 @@ def update_and_remove_files(prefix, libv
     if removed_files:
         print "Remove files:"
         for f in removed_files:
             os.unlink(f)
             print '    ', f
 
 def apply_patches():
     # Patch to permit vpx users to specify their own <stdint.h> types.
-    os.system("patch -p0 < stdint.patch")
+    os.system("patch -p3 < stdint.patch")
+    os.system("patch -p3 < unified.patch")
+    os.system("patch -p3 < mingw.patch")
 
 def update_readme(commit):
     with open('README_MOZILLA') as f:
         readme = f.read()
 
     if 'The git commit ID used was' in readme:
         new_readme = re.sub('The git commit ID used was [a-f0-9]+',
             'The git commit ID used was %s' % commit, readme)
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
+++ /dev/null
@@ -1,699 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const uint8_t bifilter4_coeff[8][2] = {
-    {128,   0},
-    {112,  16},
-    { 96,  32},
-    { 80,  48},
-    { 64,  64},
-    { 48,  80},
-    { 32,  96},
-    { 16, 112}
-};
-
-void vp8_bilinear_predict4x4_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8;
-    uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8;
-    uint8x16_t q1u8, q2u8;
-    uint16x8_t q1u16, q2u16;
-    uint16x8_t q7u16, q8u16, q9u16;
-    uint64x2_t q4u64, q5u64;
-    uint64x1_t d12u64;
-    uint32x2x2_t d0u32x2, d1u32x2, d2u32x2, d3u32x2;
-
-    if (xoffset == 0) {  // skip_1stpass_filter
-        uint32x2_t d28u32 = vdup_n_u32(0);
-        uint32x2_t d29u32 = vdup_n_u32(0);
-        uint32x2_t d30u32 = vdup_n_u32(0);
-
-        d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 0);
-        src_ptr += src_pixels_per_line;
-        d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 1);
-        src_ptr += src_pixels_per_line;
-        d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 0);
-        src_ptr += src_pixels_per_line;
-        d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 1);
-        src_ptr += src_pixels_per_line;
-        d30u32 = vld1_lane_u32((const uint32_t *)src_ptr, d30u32, 0);
-        d28u8 = vreinterpret_u8_u32(d28u32);
-        d29u8 = vreinterpret_u8_u32(d29u32);
-        d30u8 = vreinterpret_u8_u32(d30u32);
-    } else {
-        d2u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d3u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d4u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d5u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d6u8 = vld1_u8(src_ptr);
-
-        q1u8 = vcombine_u8(d2u8, d3u8);
-        q2u8 = vcombine_u8(d4u8, d5u8);
-
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
-        q4u64  = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8);
-        q5u64  = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8);
-        d12u64 = vshr_n_u64(vreinterpret_u64_u8(d6u8), 8);
-
-        d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)),
-                           vreinterpret_u32_u8(vget_high_u8(q1u8)));
-        d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)),
-                           vreinterpret_u32_u8(vget_high_u8(q2u8)));
-        d2u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q4u64)),
-                           vreinterpret_u32_u64(vget_high_u64(q4u64)));
-        d3u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),
-                           vreinterpret_u32_u64(vget_high_u64(q5u64)));
-
-        q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
-        q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
-        q9u16 = vmull_u8(d6u8, d0u8);
-
-        q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d2u32x2.val[0]), d1u8);
-        q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d3u32x2.val[0]), d1u8);
-        q9u16 = vmlal_u8(q9u16, vreinterpret_u8_u64(d12u64), d1u8);
-
-        d28u8 = vqrshrn_n_u16(q7u16, 7);
-        d29u8 = vqrshrn_n_u16(q8u16, 7);
-        d30u8 = vqrshrn_n_u16(q9u16, 7);
-    }
-
-    // secondpass_filter
-    if (yoffset == 0) {  // skip_2ndpass_filter
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d29u8), 1);
-    } else {
-        d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
-        q1u16 = vmull_u8(d28u8, d0u8);
-        q2u16 = vmull_u8(d29u8, d0u8);
-
-        d26u8 = vext_u8(d28u8, d29u8, 4);
-        d27u8 = vext_u8(d29u8, d30u8, 4);
-
-        q1u16 = vmlal_u8(q1u16, d26u8, d1u8);
-        q2u16 = vmlal_u8(q2u16, d27u8, d1u8);
-
-        d2u8 = vqrshrn_n_u16(q1u16, 7);
-        d3u8 = vqrshrn_n_u16(q2u16, 7);
-
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
-    }
-    return;
-}
-
-void vp8_bilinear_predict8x4_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8;
-    uint8x8_t d7u8, d9u8, d11u8, d22u8, d23u8, d24u8, d25u8, d26u8;
-    uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
-    uint16x8_t q1u16, q2u16, q3u16, q4u16;
-    uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
-
-    if (xoffset == 0) {  // skip_1stpass_filter
-        d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d26u8 = vld1_u8(src_ptr);
-    } else {
-        q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q5u8 = vld1q_u8(src_ptr);
-
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
-        q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
-        q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
-        q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-        q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-        q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
-
-        d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
-        d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
-        d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-        d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-        d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-
-        q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
-        q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
-        q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
-        q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
-        q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
-
-        d22u8 = vqrshrn_n_u16(q6u16, 7);
-        d23u8 = vqrshrn_n_u16(q7u16, 7);
-        d24u8 = vqrshrn_n_u16(q8u16, 7);
-        d25u8 = vqrshrn_n_u16(q9u16, 7);
-        d26u8 = vqrshrn_n_u16(q10u16, 7);
-    }
-
-    // secondpass_filter
-    if (yoffset == 0) {  // skip_2ndpass_filter
-        vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d25u8);
-    } else {
-        d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
-        q1u16 = vmull_u8(d22u8, d0u8);
-        q2u16 = vmull_u8(d23u8, d0u8);
-        q3u16 = vmull_u8(d24u8, d0u8);
-        q4u16 = vmull_u8(d25u8, d0u8);
-
-        q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
-        q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
-        q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
-        q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
-
-        d2u8 = vqrshrn_n_u16(q1u16, 7);
-        d3u8 = vqrshrn_n_u16(q2u16, 7);
-        d4u8 = vqrshrn_n_u16(q3u16, 7);
-        d5u8 = vqrshrn_n_u16(q4u16, 7);
-
-        vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d5u8);
-    }
-    return;
-}
-
-void vp8_bilinear_predict8x8_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8, d11u8;
-    uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8;
-    uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8;
-    uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16;
-    uint16x8_t q6u16, q7u16, q8u16, q9u16, q10u16;
-
-    if (xoffset == 0) {  // skip_1stpass_filter
-        d22u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d23u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d24u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d25u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d26u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d27u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d28u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d29u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
-        d30u8 = vld1_u8(src_ptr);
-    } else {
-        q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
-        q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
-        q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
-        q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-        q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-
-        d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
-        d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
-        d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-        d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-
-        q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
-        q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
-        q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
-        q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
-
-        d22u8 = vqrshrn_n_u16(q6u16, 7);
-        d23u8 = vqrshrn_n_u16(q7u16, 7);
-        d24u8 = vqrshrn_n_u16(q8u16, 7);
-        d25u8 = vqrshrn_n_u16(q9u16, 7);
-
-        // first_pass filtering on the rest 5-line data
-        q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-        q5u8 = vld1q_u8(src_ptr);
-
-        q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
-        q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
-        q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-        q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-        q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
-
-        d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
-        d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
-        d7u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-        d9u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-        d11u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-
-        q6u16 = vmlal_u8(q6u16, d3u8, d1u8);
-        q7u16 = vmlal_u8(q7u16, d5u8, d1u8);
-        q8u16 = vmlal_u8(q8u16, d7u8, d1u8);
-        q9u16 = vmlal_u8(q9u16, d9u8, d1u8);
-        q10u16 = vmlal_u8(q10u16, d11u8, d1u8);
-
-        d26u8 = vqrshrn_n_u16(q6u16, 7);
-        d27u8 = vqrshrn_n_u16(q7u16, 7);
-        d28u8 = vqrshrn_n_u16(q8u16, 7);
-        d29u8 = vqrshrn_n_u16(q9u16, 7);
-        d30u8 = vqrshrn_n_u16(q10u16, 7);
-    }
-
-    // secondpass_filter
-    if (yoffset == 0) {  // skip_2ndpass_filter
-        vst1_u8((uint8_t *)dst_ptr, d22u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d23u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d24u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d25u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d26u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d27u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d28u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d29u8);
-    } else {
-        d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
-        q1u16 = vmull_u8(d22u8, d0u8);
-        q2u16 = vmull_u8(d23u8, d0u8);
-        q3u16 = vmull_u8(d24u8, d0u8);
-        q4u16 = vmull_u8(d25u8, d0u8);
-        q5u16 = vmull_u8(d26u8, d0u8);
-        q6u16 = vmull_u8(d27u8, d0u8);
-        q7u16 = vmull_u8(d28u8, d0u8);
-        q8u16 = vmull_u8(d29u8, d0u8);
-
-        q1u16 = vmlal_u8(q1u16, d23u8, d1u8);
-        q2u16 = vmlal_u8(q2u16, d24u8, d1u8);
-        q3u16 = vmlal_u8(q3u16, d25u8, d1u8);
-        q4u16 = vmlal_u8(q4u16, d26u8, d1u8);
-        q5u16 = vmlal_u8(q5u16, d27u8, d1u8);
-        q6u16 = vmlal_u8(q6u16, d28u8, d1u8);
-        q7u16 = vmlal_u8(q7u16, d29u8, d1u8);
-        q8u16 = vmlal_u8(q8u16, d30u8, d1u8);
-
-        d2u8 = vqrshrn_n_u16(q1u16, 7);
-        d3u8 = vqrshrn_n_u16(q2u16, 7);
-        d4u8 = vqrshrn_n_u16(q3u16, 7);
-        d5u8 = vqrshrn_n_u16(q4u16, 7);
-        d6u8 = vqrshrn_n_u16(q5u16, 7);
-        d7u8 = vqrshrn_n_u16(q6u16, 7);
-        d8u8 = vqrshrn_n_u16(q7u16, 7);
-        d9u8 = vqrshrn_n_u16(q8u16, 7);
-
-        vst1_u8((uint8_t *)dst_ptr, d2u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d3u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d4u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d5u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d6u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d7u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d8u8); dst_ptr += dst_pitch;
-        vst1_u8((uint8_t *)dst_ptr, d9u8);
-    }
-    return;
-}
-
-void vp8_bilinear_predict16x16_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    int i;
-    unsigned char tmp[272];
-    unsigned char *tmpp;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
-    uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
-    uint8x8_t d19u8, d20u8, d21u8;
-    uint8x16_t q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8, q10u8;
-    uint8x16_t q11u8, q12u8, q13u8, q14u8, q15u8;
-    uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16;
-    uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16;
-
-    if (xoffset == 0) {  // secondpass_bfilter16x16_only
-        d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
-        q11u8 = vld1q_u8(src_ptr);
-        src_ptr += src_pixels_per_line;
-        for (i = 4; i > 0; i--) {
-            q12u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-            q13u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-            q14u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-            q15u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
-
-            q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
-            q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
-            q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
-            q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
-            q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
-            q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
-            q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
-            q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
-
-            q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
-            q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
-            q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
-            q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
-            q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
-            q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
-            q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
-            q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
-
-            d2u8 = vqrshrn_n_u16(q1u16, 7);
-            d3u8 = vqrshrn_n_u16(q2u16, 7);
-            d4u8 = vqrshrn_n_u16(q3u16, 7);
-            d5u8 = vqrshrn_n_u16(q4u16, 7);
-            d6u8 = vqrshrn_n_u16(q5u16, 7);
-            d7u8 = vqrshrn_n_u16(q6u16, 7);
-            d8u8 = vqrshrn_n_u16(q7u16, 7);
-            d9u8 = vqrshrn_n_u16(q8u16, 7);
-
-            q1u8 = vcombine_u8(d2u8, d3u8);
-            q2u8 = vcombine_u8(d4u8, d5u8);
-            q3u8 = vcombine_u8(d6u8, d7u8);
-            q4u8 = vcombine_u8(d8u8, d9u8);
-
-            q11u8 = q15u8;
-
-            vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch;
-        }
-        return;
-    }
-
-    if (yoffset == 0) {  // firstpass_bfilter16x16_only
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
-        for (i = 4; i > 0 ; i--) {
-            d2u8 = vld1_u8(src_ptr);
-            d3u8 = vld1_u8(src_ptr + 8);
-            d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-            d5u8 = vld1_u8(src_ptr);
-            d6u8 = vld1_u8(src_ptr + 8);
-            d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-            d8u8 = vld1_u8(src_ptr);
-            d9u8 = vld1_u8(src_ptr + 8);
-            d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-            d11u8 = vld1_u8(src_ptr);
-            d12u8 = vld1_u8(src_ptr + 8);
-            d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-
-            q7u16  = vmull_u8(d2u8, d0u8);
-            q8u16  = vmull_u8(d3u8, d0u8);
-            q9u16  = vmull_u8(d5u8, d0u8);
-            q10u16 = vmull_u8(d6u8, d0u8);
-            q11u16 = vmull_u8(d8u8, d0u8);
-            q12u16 = vmull_u8(d9u8, d0u8);
-            q13u16 = vmull_u8(d11u8, d0u8);
-            q14u16 = vmull_u8(d12u8, d0u8);
-
-            d2u8  = vext_u8(d2u8, d3u8, 1);
-            d5u8  = vext_u8(d5u8, d6u8, 1);
-            d8u8  = vext_u8(d8u8, d9u8, 1);
-            d11u8 = vext_u8(d11u8, d12u8, 1);
-
-            q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
-            q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
-            q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
-            q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
-
-            d3u8  = vext_u8(d3u8, d4u8, 1);
-            d6u8  = vext_u8(d6u8, d7u8, 1);
-            d9u8  = vext_u8(d9u8, d10u8, 1);
-            d12u8 = vext_u8(d12u8, d13u8, 1);
-
-            q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
-            q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
-            q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
-            q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
-
-            d14u8 = vqrshrn_n_u16(q7u16, 7);
-            d15u8 = vqrshrn_n_u16(q8u16, 7);
-            d16u8 = vqrshrn_n_u16(q9u16, 7);
-            d17u8 = vqrshrn_n_u16(q10u16, 7);
-            d18u8 = vqrshrn_n_u16(q11u16, 7);
-            d19u8 = vqrshrn_n_u16(q12u16, 7);
-            d20u8 = vqrshrn_n_u16(q13u16, 7);
-            d21u8 = vqrshrn_n_u16(q14u16, 7);
-
-            q7u8 = vcombine_u8(d14u8, d15u8);
-            q8u8 = vcombine_u8(d16u8, d17u8);
-            q9u8 = vcombine_u8(d18u8, d19u8);
-            q10u8 =vcombine_u8(d20u8, d21u8);
-
-            vst1q_u8((uint8_t *)dst_ptr, q7u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q8u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q9u8); dst_ptr += dst_pitch;
-            vst1q_u8((uint8_t *)dst_ptr, q10u8); dst_ptr += dst_pitch;
-        }
-        return;
-    }
-
-    d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-    d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
-
-    d2u8 = vld1_u8(src_ptr);
-    d3u8 = vld1_u8(src_ptr + 8);
-    d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-    d5u8 = vld1_u8(src_ptr);
-    d6u8 = vld1_u8(src_ptr + 8);
-    d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-    d8u8 = vld1_u8(src_ptr);
-    d9u8 = vld1_u8(src_ptr + 8);
-    d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-    d11u8 = vld1_u8(src_ptr);
-    d12u8 = vld1_u8(src_ptr + 8);
-    d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-
-    // First Pass: output_height lines x output_width columns (17x16)
-    tmpp = tmp;
-    for (i = 3; i > 0; i--) {
-        q7u16  = vmull_u8(d2u8, d0u8);
-        q8u16  = vmull_u8(d3u8, d0u8);
-        q9u16  = vmull_u8(d5u8, d0u8);
-        q10u16 = vmull_u8(d6u8, d0u8);
-        q11u16 = vmull_u8(d8u8, d0u8);
-        q12u16 = vmull_u8(d9u8, d0u8);
-        q13u16 = vmull_u8(d11u8, d0u8);
-        q14u16 = vmull_u8(d12u8, d0u8);
-
-        d2u8  = vext_u8(d2u8, d3u8, 1);
-        d5u8  = vext_u8(d5u8, d6u8, 1);
-        d8u8  = vext_u8(d8u8, d9u8, 1);
-        d11u8 = vext_u8(d11u8, d12u8, 1);
-
-        q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
-        q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
-        q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
-        q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
-
-        d3u8  = vext_u8(d3u8, d4u8, 1);
-        d6u8  = vext_u8(d6u8, d7u8, 1);
-        d9u8  = vext_u8(d9u8, d10u8, 1);
-        d12u8 = vext_u8(d12u8, d13u8, 1);
-
-        q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
-        q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
-        q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
-        q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
-
-        d14u8 = vqrshrn_n_u16(q7u16, 7);
-        d15u8 = vqrshrn_n_u16(q8u16, 7);
-        d16u8 = vqrshrn_n_u16(q9u16, 7);
-        d17u8 = vqrshrn_n_u16(q10u16, 7);
-        d18u8 = vqrshrn_n_u16(q11u16, 7);
-        d19u8 = vqrshrn_n_u16(q12u16, 7);
-        d20u8 = vqrshrn_n_u16(q13u16, 7);
-        d21u8 = vqrshrn_n_u16(q14u16, 7);
-
-        d2u8 = vld1_u8(src_ptr);
-        d3u8 = vld1_u8(src_ptr + 8);
-        d4u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-        d5u8 = vld1_u8(src_ptr);
-        d6u8 = vld1_u8(src_ptr + 8);
-        d7u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-        d8u8 = vld1_u8(src_ptr);
-        d9u8 = vld1_u8(src_ptr + 8);
-        d10u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-        d11u8 = vld1_u8(src_ptr);
-        d12u8 = vld1_u8(src_ptr + 8);
-        d13u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-
-        q7u8 = vcombine_u8(d14u8, d15u8);
-        q8u8 = vcombine_u8(d16u8, d17u8);
-        q9u8 = vcombine_u8(d18u8, d19u8);
-        q10u8 = vcombine_u8(d20u8, d21u8);
-
-        vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q9u8); tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q10u8); tmpp += 16;
-    }
-
-    // First-pass filtering for rest 5 lines
-    d14u8 = vld1_u8(src_ptr);
-    d15u8 = vld1_u8(src_ptr + 8);
-    d16u8 = vld1_u8(src_ptr + 16); src_ptr += src_pixels_per_line;
-
-    q9u16  = vmull_u8(d2u8, d0u8);
-    q10u16 = vmull_u8(d3u8, d0u8);
-    q11u16 = vmull_u8(d5u8, d0u8);
-    q12u16 = vmull_u8(d6u8, d0u8);
-    q13u16 = vmull_u8(d8u8, d0u8);
-    q14u16 = vmull_u8(d9u8, d0u8);
-
-    d2u8  = vext_u8(d2u8, d3u8, 1);
-    d5u8  = vext_u8(d5u8, d6u8, 1);
-    d8u8  = vext_u8(d8u8, d9u8, 1);
-
-    q9u16  = vmlal_u8(q9u16, d2u8, d1u8);
-    q11u16 = vmlal_u8(q11u16, d5u8, d1u8);
-    q13u16 = vmlal_u8(q13u16, d8u8, d1u8);
-
-    d3u8  = vext_u8(d3u8, d4u8, 1);
-    d6u8  = vext_u8(d6u8, d7u8, 1);
-    d9u8  = vext_u8(d9u8, d10u8, 1);
-
-    q10u16 = vmlal_u8(q10u16, d3u8, d1u8);
-    q12u16 = vmlal_u8(q12u16, d6u8, d1u8);
-    q14u16 = vmlal_u8(q14u16, d9u8, d1u8);
-
-    q1u16 = vmull_u8(d11u8, d0u8);
-    q2u16 = vmull_u8(d12u8, d0u8);
-    q3u16 = vmull_u8(d14u8, d0u8);
-    q4u16 = vmull_u8(d15u8, d0u8);
-
-    d11u8 = vext_u8(d11u8, d12u8, 1);
-    d14u8 = vext_u8(d14u8, d15u8, 1);
-
-    q1u16 = vmlal_u8(q1u16, d11u8, d1u8);
-    q3u16 = vmlal_u8(q3u16, d14u8, d1u8);
-
-    d12u8 = vext_u8(d12u8, d13u8, 1);
-    d15u8 = vext_u8(d15u8, d16u8, 1);
-
-    q2u16 = vmlal_u8(q2u16, d12u8, d1u8);
-    q4u16 = vmlal_u8(q4u16, d15u8, d1u8);
-
-    d10u8 = vqrshrn_n_u16(q9u16, 7);
-    d11u8 = vqrshrn_n_u16(q10u16, 7);
-    d12u8 = vqrshrn_n_u16(q11u16, 7);
-    d13u8 = vqrshrn_n_u16(q12u16, 7);
-    d14u8 = vqrshrn_n_u16(q13u16, 7);
-    d15u8 = vqrshrn_n_u16(q14u16, 7);
-    d16u8 = vqrshrn_n_u16(q1u16, 7);
-    d17u8 = vqrshrn_n_u16(q2u16, 7);
-    d18u8 = vqrshrn_n_u16(q3u16, 7);
-    d19u8 = vqrshrn_n_u16(q4u16, 7);
-
-    q5u8 = vcombine_u8(d10u8, d11u8);
-    q6u8 = vcombine_u8(d12u8, d13u8);
-    q7u8 = vcombine_u8(d14u8, d15u8);
-    q8u8 = vcombine_u8(d16u8, d17u8);
-    q9u8 = vcombine_u8(d18u8, d19u8);
-
-    vst1q_u8((uint8_t *)tmpp, q5u8); tmpp += 16;
-    vst1q_u8((uint8_t *)tmpp, q6u8); tmpp += 16;
-    vst1q_u8((uint8_t *)tmpp, q7u8); tmpp += 16;
-    vst1q_u8((uint8_t *)tmpp, q8u8); tmpp += 16;
-    vst1q_u8((uint8_t *)tmpp, q9u8);
-
-    // secondpass_filter
-    d0u8 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
-    d1u8 = vdup_n_u8(bifilter4_coeff[yoffset][1]);
-
-    tmpp = tmp;
-    q11u8 = vld1q_u8(tmpp);
-    tmpp += 16;
-    for (i = 4; i > 0; i--) {
-        q12u8 = vld1q_u8(tmpp); tmpp += 16;
-        q13u8 = vld1q_u8(tmpp); tmpp += 16;
-        q14u8 = vld1q_u8(tmpp); tmpp += 16;
-        q15u8 = vld1q_u8(tmpp); tmpp += 16;
-
-        q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
-        q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
-        q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
-        q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
-        q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
-        q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
-        q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
-        q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
-
-        q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
-        q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
-        q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
-        q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
-        q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
-        q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
-        q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
-        q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
-
-        d2u8 = vqrshrn_n_u16(q1u16, 7);
-        d3u8 = vqrshrn_n_u16(q2u16, 7);
-        d4u8 = vqrshrn_n_u16(q3u16, 7);
-        d5u8 = vqrshrn_n_u16(q4u16, 7);
-        d6u8 = vqrshrn_n_u16(q5u16, 7);
-        d7u8 = vqrshrn_n_u16(q6u16, 7);
-        d8u8 = vqrshrn_n_u16(q7u16, 7);
-        d9u8 = vqrshrn_n_u16(q8u16, 7);
-
-        q1u8 = vcombine_u8(d2u8, d3u8);
-        q2u8 = vcombine_u8(d4u8, d5u8);
-        q3u8 = vcombine_u8(d6u8, d7u8);
-        q4u8 = vcombine_u8(d8u8, d9u8);
-
-        q11u8 = q15u8;
-
-        vst1q_u8((uint8_t *)dst_ptr, q1u8); dst_ptr += dst_pitch;
-        vst1q_u8((uint8_t *)dst_ptr, q2u8); dst_ptr += dst_pitch;
-        vst1q_u8((uint8_t *)dst_ptr, q3u8); dst_ptr += dst_pitch;
-        vst1q_u8((uint8_t *)dst_ptr, q4u8); dst_ptr += dst_pitch;
-    }
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/copymem_neon.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_copy_mem8x4_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    uint8x8_t vtmp;
-    int r;
-
-    for (r = 0; r < 4; r++) {
-        vtmp = vld1_u8(src);
-        vst1_u8(dst, vtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
-
-void vp8_copy_mem8x8_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    uint8x8_t vtmp;
-    int r;
-
-    for (r = 0; r < 8; r++) {
-        vtmp = vld1_u8(src);
-        vst1_u8(dst, vtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
-
-void vp8_copy_mem16x16_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    int r;
-    uint8x16_t qtmp;
-
-    for (r = 0; r < 16; r++) {
-        qtmp = vld1q_u8(src);
-        vst1q_u8(dst, qtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_dc_only_idct_add_neon(
-        int16_t input_dc,
-        unsigned char *pred_ptr,
-        int pred_stride,
-        unsigned char *dst_ptr,
-        int dst_stride) {
-    int i;
-    uint16_t a1 = ((input_dc + 4) >> 3);
-    uint32x2_t d2u32 = vdup_n_u32(0);
-    uint8x8_t d2u8;
-    uint16x8_t q1u16;
-    uint16x8_t qAdd;
-
-    qAdd = vdupq_n_u16(a1);
-
-    for (i = 0; i < 2; i++) {
-        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
-        pred_ptr += pred_stride;
-        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);
-        pred_ptr += pred_stride;
-
-        q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32));
-        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
-
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
-        dst_ptr += dst_stride;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
-        dst_ptr += dst_stride;
-    }
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const int16_t cospi8sqrt2minus1 = 20091;
-static const int16_t sinpi8sqrt2       = 35468;
-
-void vp8_dequant_idct_add_neon(
-        int16_t *input,
-        int16_t *dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0;
-    int32x2_t d14, d15;
-    int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
-    int16x8_t q1, q2, q3, q4, q5, q6;
-    int16x8_t qEmpty = vdupq_n_s16(0);
-    int32x2x2_t d2tmp0, d2tmp1;
-    int16x4x2_t d2tmp2, d2tmp3;
-
-    d14 = d15 = vdup_n_s32(0);
-
-    // load input
-    q3 = vld1q_s16(input);
-    vst1q_s16(input, qEmpty);
-    input += 8;
-    q4 = vld1q_s16(input);
-    vst1q_s16(input, qEmpty);
-
-    // load dq
-    q5 = vld1q_s16(dq);
-    dq += 8;
-    q6 = vld1q_s16(dq);
-
-    // load src from dst
-    dst0 = dst;
-    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
-    dst0 += stride;
-    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
-    dst0 += stride;
-    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
-    dst0 += stride;
-    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
-
-    q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3),
-                                         vreinterpretq_u16_s16(q5)));
-    q2 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q4),
-                                         vreinterpretq_u16_s16(q6)));
-
-    d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
-    d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
-
-    q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
-
-    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
-    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
-
-    q3 = vshrq_n_s16(q3, 1);
-    q4 = vshrq_n_s16(q4, 1);
-
-    q3 = vqaddq_s16(q3, q2);
-    q4 = vqaddq_s16(q4, q2);
-
-    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
-    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
-                      vreinterpret_s16_s32(d2tmp1.val[0]));
-    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
-                      vreinterpret_s16_s32(d2tmp1.val[1]));
-
-    // loop 2
-    q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
-
-    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
-    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
-
-    d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
-    d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
-
-    q3 = vshrq_n_s16(q3, 1);
-    q4 = vshrq_n_s16(q4, 1);
-
-    q3 = vqaddq_s16(q3, q2);
-    q4 = vqaddq_s16(q4, q2);
-
-    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
-    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    d2 = vrshr_n_s16(d2, 3);
-    d3 = vrshr_n_s16(d3, 3);
-    d4 = vrshr_n_s16(d4, 3);
-    d5 = vrshr_n_s16(d5, 3);
-
-    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
-                      vreinterpret_s16_s32(d2tmp1.val[0]));
-    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
-                      vreinterpret_s16_s32(d2tmp1.val[1]));
-
-    q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
-    q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
-
-    q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1),
-                                        vreinterpret_u8_s32(d14)));
-    q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2),
-                                        vreinterpret_u8_s32(d15)));
-
-    d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
-    d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
-
-    dst0 = dst;
-    vst1_lane_s32((int32_t *)dst0, d14, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d14, 1);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d15, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d15, 1);
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-#include "vp8/common/blockd.h"
-
-void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
-    int16x8x2_t qQ, qDQC, qDQ;
-
-    qQ   = vld2q_s16(d->qcoeff);
-    qDQC = vld2q_s16(DQC);
-
-    qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
-    qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
-
-    vst2q_s16(d->dqcoeff, qDQ);
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void idct_dequant_0_2x_neon(
-        int16_t *q,
-        int16_t dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0;
-    int i, a0, a1;
-    int16x8x2_t q2Add;
-    int32x2_t d2s32, d4s32;
-    uint8x8_t d2u8, d4u8;
-    uint16x8_t q1u16, q2u16;
-
-    a0 = ((q[0] * dq) + 4) >> 3;
-    a1 = ((q[16] * dq) + 4) >> 3;
-    q[0] = q[16] = 0;
-    q2Add.val[0] = vdupq_n_s16((int16_t)a0);
-    q2Add.val[1] = vdupq_n_s16((int16_t)a1);
-
-    for (i = 0; i < 2; i++, dst += 4) {
-        dst0 = dst;
-        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
-        dst0 += stride;
-        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
-        dst0 += stride;
-        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
-        dst0 += stride;
-        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
-
-        q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
-                         vreinterpret_u8_s32(d2s32));
-        q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
-                         vreinterpret_u8_s32(d4s32));
-
-        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
-        d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
-
-        d2s32 = vreinterpret_s32_u8(d2u8);
-        d4s32 = vreinterpret_s32_u8(d4u8);
-
-        dst0 = dst;
-        vst1_lane_s32((int32_t *)dst0, d2s32, 0);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d2s32, 1);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d4s32, 0);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d4s32, 1);
-    }
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const int16_t cospi8sqrt2minus1 = 20091;
-static const int16_t sinpi8sqrt2       = 17734;
-// because the lowest bit in 0x8a8c is 0, we can pre-shift this
-
-void idct_dequant_full_2x_neon(
-        int16_t *q,
-        int16_t *dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0, *dst1;
-    int32x2_t d28, d29, d30, d31;
-    int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
-    int16x8_t qEmpty = vdupq_n_s16(0);
-    int32x4x2_t q2tmp0, q2tmp1;
-    int16x8x2_t q2tmp2, q2tmp3;
-    int16x4_t dLow0, dLow1, dHigh0, dHigh1;
-
-    d28 = d29 = d30 = d31 = vdup_n_s32(0);
-
-    // load dq
-    q0 = vld1q_s16(dq);
-    dq += 8;
-    q1 = vld1q_s16(dq);
-
-    // load q
-    q2 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q3 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q4 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q5 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-
-    // load src from dst
-    dst0 = dst;
-    dst1 = dst + 4;
-    d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
-    dst0 += stride;
-    d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
-    dst1 += stride;
-    d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
-    dst0 += stride;
-    d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
-    dst1 += stride;
-
-    d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
-    dst0 += stride;
-    d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
-    dst1 += stride;
-    d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
-    d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
-
-    q2 = vmulq_s16(q2, q0);
-    q3 = vmulq_s16(q3, q1);
-    q4 = vmulq_s16(q4, q0);
-    q5 = vmulq_s16(q5, q1);
-
-    // vswp
-    dLow0 = vget_low_s16(q2);
-    dHigh0 = vget_high_s16(q2);
-    dLow1 = vget_low_s16(q4);
-    dHigh1 = vget_high_s16(q4);
-    q2 = vcombine_s16(dLow0, dLow1);
-    q4 = vcombine_s16(dHigh0, dHigh1);
-
-    dLow0 = vget_low_s16(q3);
-    dHigh0 = vget_high_s16(q3);
-    dLow1 = vget_low_s16(q5);
-    dHigh1 = vget_high_s16(q5);
-    q3 = vcombine_s16(dLow0, dLow1);
-    q5 = vcombine_s16(dHigh0, dHigh1);
-
-    q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
-    q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
-    q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
-    q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
-
-    q10 = vqaddq_s16(q2, q3);
-    q11 = vqsubq_s16(q2, q3);
-
-    q8 = vshrq_n_s16(q8, 1);
-    q9 = vshrq_n_s16(q9, 1);
-
-    q4 = vqaddq_s16(q4, q8);
-    q5 = vqaddq_s16(q5, q9);
-
-    q2 = vqsubq_s16(q6, q5);
-    q3 = vqaddq_s16(q7, q4);
-
-    q4 = vqaddq_s16(q10, q3);
-    q5 = vqaddq_s16(q11, q2);
-    q6 = vqsubq_s16(q11, q2);
-    q7 = vqsubq_s16(q10, q3);
-
-    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
-    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
-    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
-                       vreinterpretq_s16_s32(q2tmp1.val[0]));
-    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
-                       vreinterpretq_s16_s32(q2tmp1.val[1]));
-
-    // loop 2
-    q8  = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
-    q9  = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
-    q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
-    q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
-
-    q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
-    q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
-
-    q10 = vshrq_n_s16(q10, 1);
-    q11 = vshrq_n_s16(q11, 1);
-
-    q10 = vqaddq_s16(q2tmp2.val[1], q10);
-    q11 = vqaddq_s16(q2tmp3.val[1], q11);
-
-    q8 = vqsubq_s16(q8, q11);
-    q9 = vqaddq_s16(q9, q10);
-
-    q4 = vqaddq_s16(q2, q9);
-    q5 = vqaddq_s16(q3, q8);
-    q6 = vqsubq_s16(q3, q8);
-    q7 = vqsubq_s16(q2, q9);
-
-    q4 = vrshrq_n_s16(q4, 3);
-    q5 = vrshrq_n_s16(q5, 3);
-    q6 = vrshrq_n_s16(q6, 3);
-    q7 = vrshrq_n_s16(q7, 3);
-
-    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
-    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
-    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
-                       vreinterpretq_s16_s32(q2tmp1.val[0]));
-    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
-                       vreinterpretq_s16_s32(q2tmp1.val[1]));
-
-    q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]),
-                                          vreinterpret_u8_s32(d28)));
-    q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]),
-                                          vreinterpret_u8_s32(d29)));
-    q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]),
-                                          vreinterpret_u8_s32(d30)));
-    q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]),
-                                          vreinterpret_u8_s32(d31)));
-
-    d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
-    d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
-    d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
-    d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
-
-    dst0 = dst;
-    dst1 = dst + 4;
-    vst1_lane_s32((int32_t *)dst0, d28, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d28, 1);
-    dst1 += stride;
-    vst1_lane_s32((int32_t *)dst0, d29, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d29, 1);
-    dst1 += stride;
-
-    vst1_lane_s32((int32_t *)dst0, d30, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d30, 1);
-    dst1 += stride;
-    vst1_lane_s32((int32_t *)dst0, d31, 0);
-    vst1_lane_s32((int32_t *)dst1, d31, 1);
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_short_inv_walsh4x4_neon(
-        int16_t *input,
-        int16_t *mb_dqcoeff) {
-    int16x8_t q0s16, q1s16, q2s16, q3s16;
-    int16x4_t d4s16, d5s16, d6s16, d7s16;
-    int16x4x2_t v2tmp0, v2tmp1;
-    int32x2x2_t v2tmp2, v2tmp3;
-    int16x8_t qAdd3;
-
-    q0s16 = vld1q_s16(input);
-    q1s16 = vld1q_s16(input + 8);
-
-    // 1st for loop
-    d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
-    d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
-    d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
-    d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
-
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-
-    q0s16 = vaddq_s16(q2s16, q3s16);
-    q1s16 = vsubq_s16(q2s16, q3s16);
-
-    v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)),
-                      vreinterpret_s32_s16(vget_low_s16(q1s16)));
-    v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)),
-                      vreinterpret_s32_s16(vget_high_s16(q1s16)));
-    v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),
-                      vreinterpret_s16_s32(v2tmp3.val[0]));
-    v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),
-                      vreinterpret_s16_s32(v2tmp3.val[1]));
-
-    // 2nd for loop
-    d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
-    d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
-    d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
-    d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-
-    qAdd3 = vdupq_n_s16(3);
-
-    q0s16 = vaddq_s16(q2s16, q3s16);
-    q1s16 = vsubq_s16(q2s16, q3s16);
-
-    q0s16 = vaddq_s16(q0s16, qAdd3);
-    q1s16 = vaddq_s16(q1s16, qAdd3);
-
-    q0s16 = vshrq_n_s16(q0s16, 3);
-    q1s16 = vshrq_n_s16(q1s16, 3);
-
-    // store
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3);
-    mb_dqcoeff += 16;
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_loop_filter_neon(
-        uint8x16_t qblimit,  // flimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p3
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r) {   // q1
-    uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q2s16, q11s16;
-    uint16x8_t q4u16;
-    int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
-    int8x8_t d2s8, d3s8;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q3    = vabdq_u8(q9, q8);
-    q4    = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q3    = vmaxq_u8(q3, q4);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q9 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q3);
-
-    q2u8 = vabdq_u8(q5, q8);
-    q9 = vqaddq_u8(q9, q9);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q10 = vdupq_n_u8(0x80);
-    q8 = veorq_u8(q8, q10);
-    q7 = veorq_u8(q7, q10);
-    q6 = veorq_u8(q6, q10);
-    q5 = veorq_u8(q5, q10);
-
-    q2u8 = vshrq_n_u8(q2u8, 1);
-    q9 = vqaddq_u8(q9, q2u8);
-
-    q10 = vdupq_n_u8(3);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q9 = vcgeq_u8(qblimit, q9);
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                    vreinterpretq_s8_u8(q8));
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-
-    q4u16 = vmovl_u8(vget_low_u8(q10));
-    q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
-    q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
-
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
-    q15u8 = vandq_u8(q15u8, q9);
-
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
-
-    q9 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2s8 = vqmovn_s16(q2s16);
-    d3s8 = vqmovn_s16(q11s16);
-    q1s8 = vcombine_s8(d2s8, d3s8);
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-
-    q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
-    q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q1s8 = vshrq_n_s8(q1s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
-
-    q1s8 = vrshrq_n_s8(q1s8, 1);
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
-    q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
-
-    q0u8 = vdupq_n_u8(0x80);
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-    src -= (pitch << 2);
-
-    q3 = vld1q_u8(src);
-    src += pitch;
-    q4 = vld1q_u8(src);
-    src += pitch;
-    q5 = vld1q_u8(src);
-    src += pitch;
-    q6 = vld1q_u8(src);
-    src += pitch;
-    q7 = vld1q_u8(src);
-    src += pitch;
-    q8 = vld1q_u8(src);
-    src += pitch;
-    q9 = vld1q_u8(src);
-    src += pitch;
-    q10 = vld1q_u8(src);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    src -= (pitch * 5);
-    vst1q_u8(src, q5);
-    src += pitch;
-    vst1q_u8(src, q6);
-    src += pitch;
-    vst1q_u8(src, q7);
-    src += pitch;
-    vst1q_u8(src, q8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    u -= (pitch << 2);
-    v -= (pitch << 2);
-
-    d6  = vld1_u8(u);
-    u += pitch;
-    d7  = vld1_u8(v);
-    v += pitch;
-    d8  = vld1_u8(u);
-    u += pitch;
-    d9  = vld1_u8(v);
-    v += pitch;
-    d10 = vld1_u8(u);
-    u += pitch;
-    d11 = vld1_u8(v);
-    v += pitch;
-    d12 = vld1_u8(u);
-    u += pitch;
-    d13 = vld1_u8(v);
-    v += pitch;
-    d14 = vld1_u8(u);
-    u += pitch;
-    d15 = vld1_u8(v);
-    v += pitch;
-    d16 = vld1_u8(u);
-    u += pitch;
-    d17 = vld1_u8(v);
-    v += pitch;
-    d18 = vld1_u8(u);
-    u += pitch;
-    d19 = vld1_u8(v);
-    v += pitch;
-    d20 = vld1_u8(u);
-    d21 = vld1_u8(v);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    u -= (pitch * 5);
-    vst1_u8(u, vget_low_u8(q5));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q6));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q7));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q8));
-
-    v -= (pitch * 5);
-    vst1_u8(v, vget_high_u8(q5));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q6));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q7));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q8));
-    return;
-}
-
-static INLINE void write_4x8(unsigned char *dst, int pitch,
-                             const uint8x8x4_t result) {
-#if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
-    vst4_lane_u8(dst, result, 0);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 1);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 2);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 3);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 4);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 5);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 6);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 7);
-#else
-    /*
-     * uint8x8x4_t result
-    00 01 02 03 | 04 05 06 07
-    10 11 12 13 | 14 15 16 17
-    20 21 22 23 | 24 25 26 27
-    30 31 32 33 | 34 35 36 37
-    ---
-    * after vtrn_u16
-    00 01 20 21 | 04 05 24 25
-    02 03 22 23 | 06 07 26 27
-    10 11 30 31 | 14 15 34 35
-    12 13 32 33 | 16 17 36 37
-    ---
-    * after vtrn_u8
-    00 10 20 30 | 04 14 24 34
-    01 11 21 31 | 05 15 25 35
-    02 12 22 32 | 06 16 26 36
-    03 13 23 33 | 07 17 27 37
-    */
-    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]),
-                                          vreinterpret_u16_u8(result.val[2]));
-    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]),
-                                          vreinterpret_u16_u8(result.val[3]));
-    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
-                                       vreinterpret_u8_u16(r13_u16.val[0]));
-    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
-                                       vreinterpret_u8_u16(r13_u16.val[1]));
-    const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]);
-    const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]);
-    const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]);
-    const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]);
-    vst1_lane_u32((uint32_t *)dst, x_0_4, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_1_5, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_2_6, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_3_7, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_0_4, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_1_5, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_2_6, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_3_7, 1);
-#endif
-}
-
-void vp8_loop_filter_vertical_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    unsigned char *s, *d;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    s = src - 4;
-    d6  = vld1_u8(s);
-    s += pitch;
-    d8  = vld1_u8(s);
-    s += pitch;
-    d10 = vld1_u8(s);
-    s += pitch;
-    d12 = vld1_u8(s);
-    s += pitch;
-    d14 = vld1_u8(s);
-    s += pitch;
-    d16 = vld1_u8(s);
-    s += pitch;
-    d18 = vld1_u8(s);
-    s += pitch;
-    d20 = vld1_u8(s);
-    s += pitch;
-    d7  = vld1_u8(s);
-    s += pitch;
-    d9  = vld1_u8(s);
-    s += pitch;
-    d11 = vld1_u8(s);
-    s += pitch;
-    d13 = vld1_u8(s);
-    s += pitch;
-    d15 = vld1_u8(s);
-    s += pitch;
-    d17 = vld1_u8(s);
-    s += pitch;
-    d19 = vld1_u8(s);
-    s += pitch;
-    d21 = vld1_u8(s);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-
-    d = src - 2;
-    write_4x8(d, pitch, q4ResultL);
-    d += pitch * 8;
-    write_4x8(d, pitch, q4ResultH);
-}
-
-void vp8_loop_filter_vertical_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    unsigned char *us, *ud;
-    unsigned char *vs, *vd;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    us = u - 4;
-    d6 = vld1_u8(us);
-    us += pitch;
-    d8 = vld1_u8(us);
-    us += pitch;
-    d10 = vld1_u8(us);
-    us += pitch;
-    d12 = vld1_u8(us);
-    us += pitch;
-    d14 = vld1_u8(us);
-    us += pitch;
-    d16 = vld1_u8(us);
-    us += pitch;
-    d18 = vld1_u8(us);
-    us += pitch;
-    d20 = vld1_u8(us);
-
-    vs = v - 4;
-    d7 = vld1_u8(vs);
-    vs += pitch;
-    d9 = vld1_u8(vs);
-    vs += pitch;
-    d11 = vld1_u8(vs);
-    vs += pitch;
-    d13 = vld1_u8(vs);
-    vs += pitch;
-    d15 = vld1_u8(vs);
-    vs += pitch;
-    d17 = vld1_u8(vs);
-    vs += pitch;
-    d19 = vld1_u8(vs);
-    vs += pitch;
-    d21 = vld1_u8(vs);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    ud = u - 2;
-    write_4x8(ud, pitch, q4ResultL);
-
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-    vd = v - 2;
-    write_4x8(vd, pitch, q4ResultH);
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_loop_filter_simple_horizontal_edge_neon(
-        unsigned char *s,
-        int p,
-        const unsigned char *blimit) {
-    uint8_t *sp;
-    uint8x16_t qblimit, q0u8;
-    uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8;
-    int16x8_t q2s16, q3s16, q13s16;
-    int8x8_t d8s8, d9s8;
-    int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8;
-
-    qblimit = vdupq_n_u8(*blimit);
-
-    sp = s - (p << 1);
-    q5u8 = vld1q_u8(sp);
-    sp += p;
-    q6u8 = vld1q_u8(sp);
-    sp += p;
-    q7u8 = vld1q_u8(sp);
-    sp += p;
-    q8u8 = vld1q_u8(sp);
-
-    q15u8 = vabdq_u8(q6u8, q7u8);
-    q14u8 = vabdq_u8(q5u8, q8u8);
-
-    q15u8 = vqaddq_u8(q15u8, q15u8);
-    q14u8 = vshrq_n_u8(q14u8, 1);
-    q0u8 = vdupq_n_u8(0x80);
-    q13s16 = vdupq_n_s16(3);
-    q15u8 = vqaddq_u8(q15u8, q14u8);
-
-    q5u8 = veorq_u8(q5u8, q0u8);
-    q6u8 = veorq_u8(q6u8, q0u8);
-    q7u8 = veorq_u8(q7u8, q0u8);
-    q8u8 = veorq_u8(q8u8, q0u8);
-
-    q15u8 = vcgeq_u8(qblimit, q15u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6u8)));
-    q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)),
-                     vget_high_s8(vreinterpretq_s8_u8(q6u8)));
-
-    q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8),
-                     vreinterpretq_s8_u8(q8u8));
-
-    q2s16 = vmulq_s16(q2s16, q13s16);
-    q3s16 = vmulq_s16(q3s16, q13s16);
-
-    q10u8 = vdupq_n_u8(3);
-    q9u8 = vdupq_n_u8(4);
-
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8));
-    q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8));
-
-    d8s8 = vqmovn_s16(q2s16);
-    d9s8 = vqmovn_s16(q3s16);
-    q4s8 = vcombine_s8(d8s8, d9s8);
-
-    q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8));
-
-    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8));
-    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q3s8 = vshrq_n_s8(q3s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8);
-
-    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-
-    vst1q_u8(s, q7u8);
-    s -= p;
-    vst1q_u8(s, q6u8);
-    return;
-}
-
-void vp8_loop_filter_bhs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
-
-void vp8_loop_filter_mbhs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-#if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
-static INLINE void write_2x8(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result,
-                             const uint8x8x2_t result2) {
-  vst2_lane_u8(dst, result, 0);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 1);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 2);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 3);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 4);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 5);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 6);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 7);
-  dst += pitch;
-
-  vst2_lane_u8(dst, result2, 0);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 1);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 2);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 3);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 4);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 5);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 6);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 7);
-}
-#else
-static INLINE void write_2x4(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result) {
-    /*
-     * uint8x8x2_t result
-    00 01 02 03 | 04 05 06 07
-    10 11 12 13 | 14 15 16 17
-    ---
-    * after vtrn_u8
-    00 10 02 12 | 04 14 06 16
-    01 11 03 13 | 05 15 07 17
-    */
-    const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
-                                       result.val[1]);
-    const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
-    const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
-}
-
-static INLINE void write_2x8(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result,
-                             const uint8x8x2_t result2) {
-  write_2x4(dst, pitch, result);
-  dst += pitch * 8;
-  write_2x4(dst, pitch, result2);
-}
-#endif
-
-
-#if (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
-static INLINE
-uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
-    x = vld4_lane_u8(src, x, 0);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 1);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 2);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 3);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 4);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 5);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 6);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 7);
-    return x;
-}
-#else
-static INLINE
-uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
-    const uint8x8_t a = vld1_u8(src);
-    const uint8x8_t b = vld1_u8(src + pitch * 1);
-    const uint8x8_t c = vld1_u8(src + pitch * 2);
-    const uint8x8_t d = vld1_u8(src + pitch * 3);
-    const uint8x8_t e = vld1_u8(src + pitch * 4);
-    const uint8x8_t f = vld1_u8(src + pitch * 5);
-    const uint8x8_t g = vld1_u8(src + pitch * 6);
-    const uint8x8_t h = vld1_u8(src + pitch * 7);
-    const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
-                                          vreinterpret_u32_u8(e));
-    const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
-                                          vreinterpret_u32_u8(f));
-    const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
-                                          vreinterpret_u32_u8(g));
-    const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
-                                          vreinterpret_u32_u8(h));
-    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
-                                          vreinterpret_u16_u32(r26_u32.val[0]));
-    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
-                                          vreinterpret_u16_u32(r37_u32.val[0]));
-    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
-                                       vreinterpret_u8_u16(r13_u16.val[0]));
-    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
-                                       vreinterpret_u8_u16(r13_u16.val[1]));
-    /*
-     * after vtrn_u32
-    00 01 02 03 | 40 41 42 43
-    10 11 12 13 | 50 51 52 53
-    20 21 22 23 | 60 61 62 63
-    30 31 32 33 | 70 71 72 73
-    ---
-    * after vtrn_u16
-    00 01 20 21 | 40 41 60 61
-    02 03 22 23 | 42 43 62 63
-    10 11 30 31 | 50 51 70 71
-    12 13 32 33 | 52 52 72 73
-
-    00 01 20 21 | 40 41 60 61
-    10 11 30 31 | 50 51 70 71
-    02 03 22 23 | 42 43 62 63
-    12 13 32 33 | 52 52 72 73
-    ---
-    * after vtrn_u8
-    00 10 20 30 | 40 50 60 70
-    01 11 21 31 | 41 51 61 71
-    02 12 22 32 | 42 52 62 72
-    03 13 23 33 | 43 53 63 73
-    */
-    x.val[0] = r01_u8.val[0];
-    x.val[1] = r01_u8.val[1];
-    x.val[2] = r23_u8.val[0];
-    x.val[3] = r23_u8.val[1];
-
-    return x;
-}
-#endif
-
-static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
-        unsigned char *s,
-        int p,
-        const unsigned char *blimit) {
-    unsigned char *src1;
-    uint8x16_t qblimit, q0u8;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
-    int16x8_t q2s16, q13s16, q11s16;
-    int8x8_t d28s8, d29s8;
-    int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
-    uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
-    uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
-    uint8x8x2_t d2u8x2;  // d12, d13
-    uint8x8x2_t d3u8x2;  // d14, d15
-
-    qblimit = vdupq_n_u8(*blimit);
-
-    src1 = s - 2;
-    d0u8x4 = read_4x8(src1, p, d0u8x4);
-    src1 += p * 8;
-    d1u8x4 = read_4x8(src1, p, d1u8x4);
-
-    q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
-    q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
-    q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
-    q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13
-
-    q15u8 = vabdq_u8(q5u8, q4u8);
-    q14u8 = vabdq_u8(q3u8, q6u8);
-
-    q15u8 = vqaddq_u8(q15u8, q15u8);
-    q14u8 = vshrq_n_u8(q14u8, 1);
-    q0u8 = vdupq_n_u8(0x80);
-    q11s16 = vdupq_n_s16(3);
-    q15u8 = vqaddq_u8(q15u8, q14u8);
-
-    q3u8 = veorq_u8(q3u8, q0u8);
-    q4u8 = veorq_u8(q4u8, q0u8);
-    q5u8 = veorq_u8(q5u8, q0u8);
-    q6u8 = veorq_u8(q6u8, q0u8);
-
-    q15u8 = vcgeq_u8(qblimit, q15u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
-                     vget_low_s8(vreinterpretq_s8_u8(q5u8)));
-    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
-                      vget_high_s8(vreinterpretq_s8_u8(q5u8)));
-
-    q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
-                      vreinterpretq_s8_u8(q6u8));
-
-    q2s16 = vmulq_s16(q2s16, q11s16);
-    q13s16 = vmulq_s16(q13s16, q11s16);
-
-    q11u8 = vdupq_n_u8(3);
-    q12u8 = vdupq_n_u8(4);
-
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
-    q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
-
-    d28s8 = vqmovn_s16(q2s16);
-    d29s8 = vqmovn_s16(q13s16);
-    q14s8 = vcombine_s8(d28s8, d29s8);
-
-    q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
-
-    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
-    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q14s8 = vshrq_n_s8(q3s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
-
-    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-
-    d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
-    d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
-    d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
-    d3u8x2.val[1] = vget_high_u8(q7u8);  // d15
-
-    src1 = s - 1;
-    write_2x8(src1, p, d2u8x2, d3u8x2);
-}
-
-void vp8_loop_filter_bvs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
-
-void vp8_loop_filter_mbvs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_mbloop_filter_neon(
-        uint8x16_t qblimit,  // mblimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p2
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q4r,     // p1
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r,     // q1
-        uint8x16_t *q9r) {   // q1
-    uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
-    uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
-    int8x16_t q0s8, q12s8, q14s8, q15s8;
-    int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q1u8  = vabdq_u8(q9, q8);
-    q0u8  = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q1u8  = vmaxq_u8(q1u8, q0u8);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q12u8 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q1u8);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    q1u8 = vabdq_u8(q5, q8);
-    q12u8 = vqaddq_u8(q12u8, q12u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q0u8 = vdupq_n_u8(0x80);
-    q9 = veorq_u8(q9, q0u8);
-    q8 = veorq_u8(q8, q0u8);
-    q7 = veorq_u8(q7, q0u8);
-    q6 = veorq_u8(q6, q0u8);
-    q5 = veorq_u8(q5, q0u8);
-    q4 = veorq_u8(q4, q0u8);
-
-    q1u8 = vshrq_n_u8(q1u8, 1);
-    q12u8 = vqaddq_u8(q12u8, q1u8);
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-    q12u8 = vcgeq_u8(qblimit, q12u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                     vreinterpretq_s8_u8(q8));
-
-    q11s16 = vdupq_n_s16(3);
-    q2s16  = vmulq_s16(q2s16, q11s16);
-    q13s16 = vmulq_s16(q13s16, q11s16);
-
-    q15u8 = vandq_u8(q15u8, q12u8);
-
-    q2s16  = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
-
-    q12u8 = vdupq_n_u8(3);
-    q11u8 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2 = vqmovn_s16(q2s16);
-    d3 = vqmovn_s16(q13s16);
-    q1s8 = vcombine_s8(d2, d3);
-    q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
-    q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
-    q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q13s8 = vshrq_n_s8(q13s8, 3);
-
-    q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
-    q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
-
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
-    d5 = vdup_n_s8(9);
-    d4 = vdup_n_s8(18);
-
-    q0s16  = vmlal_s8(vreinterpretq_s16_u16(q0u16),  vget_low_s8(q1s8),  d5);
-    q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
-    d5 = vdup_n_s8(27);
-    q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8),  d4);
-    q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
-    q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8),  d5);
-    q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
-
-    d0  = vqshrn_n_s16(q0s16 , 7);
-    d1  = vqshrn_n_s16(q11s16, 7);
-    d24 = vqshrn_n_s16(q12s16, 7);
-    d25 = vqshrn_n_s16(q13s16, 7);
-    d28 = vqshrn_n_s16(q14s16, 7);
-    d29 = vqshrn_n_s16(q15s16, 7);
-
-    q0s8  = vcombine_s8(d0, d1);
-    q12s8 = vcombine_s8(d24, d25);
-    q14s8 = vcombine_s8(d28, d29);
-
-    q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
-    q0s8  = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
-    q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
-    q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
-    q15s8 = vqsubq_s8((q7s8), q14s8);
-    q14s8 = vqaddq_s8((q6s8), q14s8);
-
-    q1u8 = vdupq_n_u8(0x80);
-    *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
-    *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
-    return;
-}
-
-void vp8_mbloop_filter_horizontal_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    src -= (pitch << 2);
-
-    q3 = vld1q_u8(src);
-    src += pitch;
-    q4 = vld1q_u8(src);
-    src += pitch;
-    q5 = vld1q_u8(src);
-    src += pitch;
-    q6 = vld1q_u8(src);
-    src += pitch;
-    q7 = vld1q_u8(src);
-    src += pitch;
-    q8 = vld1q_u8(src);
-    src += pitch;
-    q9 = vld1q_u8(src);
-    src += pitch;
-    q10 = vld1q_u8(src);
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    src -= (pitch * 6);
-    vst1q_u8(src, q4);
-    src += pitch;
-    vst1q_u8(src, q5);
-    src += pitch;
-    vst1q_u8(src, q6);
-    src += pitch;
-    vst1q_u8(src, q7);
-    src += pitch;
-    vst1q_u8(src, q8);
-    src += pitch;
-    vst1q_u8(src, q9);
-    return;
-}
-
-void vp8_mbloop_filter_horizontal_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    u -= (pitch << 2);
-    v -= (pitch << 2);
-
-    d6 = vld1_u8(u);
-    u += pitch;
-    d7 = vld1_u8(v);
-    v += pitch;
-    d8 = vld1_u8(u);
-    u += pitch;
-    d9 = vld1_u8(v);
-    v += pitch;
-    d10 = vld1_u8(u);
-    u += pitch;
-    d11 = vld1_u8(v);
-    v += pitch;
-    d12 = vld1_u8(u);
-    u += pitch;
-    d13 = vld1_u8(v);
-    v += pitch;
-    d14 = vld1_u8(u);
-    u += pitch;
-    d15 = vld1_u8(v);
-    v += pitch;
-    d16 = vld1_u8(u);
-    u += pitch;
-    d17 = vld1_u8(v);
-    v += pitch;
-    d18 = vld1_u8(u);
-    u += pitch;
-    d19 = vld1_u8(v);
-    v += pitch;
-    d20 = vld1_u8(u);
-    d21 = vld1_u8(v);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    u -= (pitch * 6);
-    v -= (pitch * 6);
-    vst1_u8(u, vget_low_u8(q4));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q4));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q5));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q5));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q6));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q6));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q7));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q7));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q8));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q8));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q9));
-    vst1_u8(v, vget_high_u8(q9));
-    return;
-}
-
-void vp8_mbloop_filter_vertical_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    unsigned char *s1, *s2;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    s1 = src - 4;
-    s2 = s1 + 8 * pitch;
-    d6  = vld1_u8(s1);
-    s1 += pitch;
-    d7  = vld1_u8(s2);
-    s2 += pitch;
-    d8  = vld1_u8(s1);
-    s1 += pitch;
-    d9  = vld1_u8(s2);
-    s2 += pitch;
-    d10 = vld1_u8(s1);
-    s1 += pitch;
-    d11 = vld1_u8(s2);
-    s2 += pitch;
-    d12 = vld1_u8(s1);
-    s1 += pitch;
-    d13 = vld1_u8(s2);
-    s2 += pitch;
-    d14 = vld1_u8(s1);
-    s1 += pitch;
-    d15 = vld1_u8(s2);
-    s2 += pitch;
-    d16 = vld1_u8(s1);
-    s1 += pitch;
-    d17 = vld1_u8(s2);
-    s2 += pitch;
-    d18 = vld1_u8(s1);
-    s1 += pitch;
-    d19 = vld1_u8(s2);
-    s2 += pitch;
-    d20 = vld1_u8(s1);
-    d21 = vld1_u8(s2);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    s1 -= 7 * pitch;
-    s2 -= 7 * pitch;
-
-    vst1_u8(s1, vget_low_u8(q3));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q3));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q4));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q4));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q5));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q5));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q6));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q6));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q7));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q7));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q8));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q8));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q9));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q9));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q10));
-    vst1_u8(s2, vget_high_u8(q10));
-    return;
-}
-
-void vp8_mbloop_filter_vertical_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    unsigned char *us, *ud;
-    unsigned char *vs, *vd;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    us = u - 4;
-    vs = v - 4;
-    d6 = vld1_u8(us);
-    us += pitch;
-    d7 = vld1_u8(vs);
-    vs += pitch;
-    d8 = vld1_u8(us);
-    us += pitch;
-    d9 = vld1_u8(vs);
-    vs += pitch;
-    d10 = vld1_u8(us);
-    us += pitch;
-    d11 = vld1_u8(vs);
-    vs += pitch;
-    d12 = vld1_u8(us);
-    us += pitch;
-    d13 = vld1_u8(vs);
-    vs += pitch;
-    d14 = vld1_u8(us);
-    us += pitch;
-    d15 = vld1_u8(vs);
-    vs += pitch;
-    d16 = vld1_u8(us);
-    us += pitch;
-    d17 = vld1_u8(vs);
-    vs += pitch;
-    d18 = vld1_u8(us);
-    us += pitch;
-    d19 = vld1_u8(vs);
-    vs += pitch;
-    d20 = vld1_u8(us);
-    d21 = vld1_u8(vs);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    ud = u - 4;
-    vst1_u8(ud, vget_low_u8(q3));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q4));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q5));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q6));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q7));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q8));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q9));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q10));
-
-    vd = v - 4;
-    vst1_u8(vd, vget_high_u8(q3));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q4));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q5));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q6));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q7));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q8));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q9));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q10));
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/reconintra_neon.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-#include "vp8/common/blockd.h"
-
-void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *x,
-                                           unsigned char * yabove_row,
-                                           unsigned char * yleft,
-                                           int left_stride,
-                                           unsigned char * ypred_ptr,
-                                           int y_stride) {
-  const int mode = x->mode_info_context->mbmi.mode;
-  int i;
-
-  switch (mode) {
-    case DC_PRED:
-    {
-      int shift = x->up_available + x->left_available;
-      uint8x16_t v_expected_dc = vdupq_n_u8(128);
-
-      if (shift) {
-        unsigned int average = 0;
-        int expected_dc;
-        if (x->up_available) {
-          const uint8x16_t v_above = vld1q_u8(yabove_row);
-          const uint16x8_t a = vpaddlq_u8(v_above);
-          const uint32x4_t b = vpaddlq_u16(a);
-          const uint64x2_t c = vpaddlq_u32(b);
-          const uint32x2_t d = vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)),
-                                        vreinterpret_u32_u64(vget_high_u64(c)));
-          average = vget_lane_u32(d, 0);
-        }
-        if (x->left_available) {
-          for (i = 0; i < 16; ++i) {
-              average += yleft[0];
-              yleft += left_stride;
-          }
-        }
-        shift += 3;
-        expected_dc = (average + (1 << (shift - 1))) >> shift;
-        v_expected_dc = vmovq_n_u8((uint8_t)expected_dc);
-      }
-      for (i = 0; i < 16; ++i) {
-        vst1q_u8(ypred_ptr, v_expected_dc);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case V_PRED:
-    {
-      const uint8x16_t v_above = vld1q_u8(yabove_row);
-      for (i = 0; i < 16; ++i) {
-        vst1q_u8(ypred_ptr, v_above);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case H_PRED:
-    {
-      for (i = 0; i < 16; ++i) {
-        const uint8x16_t v_yleft = vmovq_n_u8((uint8_t)yleft[0]);
-        yleft += left_stride;
-        vst1q_u8(ypred_ptr, v_yleft);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case TM_PRED:
-    {
-      const uint16x8_t v_ytop_left = vmovq_n_u16((int16_t)yabove_row[-1]);
-      const uint8x16_t v_above = vld1q_u8(yabove_row);
-      for (i = 0; i < 16; ++i) {
-        const uint8x8_t v_yleft = vmov_n_u8((int8_t)yleft[0]);
-        const uint16x8_t a_lo = vaddl_u8(vget_low_u8(v_above), v_yleft);
-        const uint16x8_t a_hi = vaddl_u8(vget_high_u8(v_above), v_yleft);
-        const int16x8_t b_lo = vsubq_s16(vreinterpretq_s16_u16(a_lo),
-                                         vreinterpretq_s16_u16(v_ytop_left));
-        const int16x8_t b_hi = vsubq_s16(vreinterpretq_s16_u16(a_hi),
-                                         vreinterpretq_s16_u16(v_ytop_left));
-        const uint8x8_t pred_lo = vqmovun_s16(b_lo);
-        const uint8x8_t pred_hi = vqmovun_s16(b_hi);
-
-        vst1q_u8(ypred_ptr, vcombine_u8(pred_lo, pred_hi));
-        ypred_ptr += y_stride;
-        yleft += left_stride;
-      }
-    }
-    break;
-  }
-}
-
-void vp8_build_intra_predictors_mbuv_s_neon(MACROBLOCKD *x,
-                                            unsigned char * uabove_row,
-                                            unsigned char * vabove_row,
-                                            unsigned char * uleft,
-                                            unsigned char * vleft,
-                                            int left_stride,
-                                            unsigned char * upred_ptr,
-                                            unsigned char * vpred_ptr,
-                                            int pred_stride) {
-  const int mode = x->mode_info_context->mbmi.uv_mode;
-  int i;
-
-  switch (mode) {
-    case DC_PRED:
-    {
-      int shift = x->up_available + x->left_available;
-      uint8x8_t v_expected_udc = vdup_n_u8(128);
-      uint8x8_t v_expected_vdc = vdup_n_u8(128);
-
-      if (shift) {
-        unsigned int average_u = 0;
-        unsigned int average_v = 0;
-        int expected_udc;
-        int expected_vdc;
-        if (x->up_available) {
-          const uint8x8_t v_uabove = vld1_u8(uabove_row);
-          const uint8x8_t v_vabove = vld1_u8(vabove_row);
-          const uint16x8_t a = vpaddlq_u8(vcombine_u8(v_uabove, v_vabove));
-          const uint32x4_t b = vpaddlq_u16(a);
-          const uint64x2_t c = vpaddlq_u32(b);
-          average_u = vgetq_lane_u32(vreinterpretq_u32_u64((c)), 0);
-          average_v = vgetq_lane_u32(vreinterpretq_u32_u64((c)), 2);
-        }
-        if (x->left_available) {
-          for (i = 0; i < 8; ++i) {
-              average_u += uleft[0];
-              uleft += left_stride;
-              average_v += vleft[0];
-              vleft += left_stride;
-          }
-        }
-        shift += 2;
-        expected_udc = (average_u + (1 << (shift - 1))) >> shift;
-        expected_vdc = (average_v + (1 << (shift - 1))) >> shift;
-        v_expected_udc = vmov_n_u8((uint8_t)expected_udc);
-        v_expected_vdc = vmov_n_u8((uint8_t)expected_vdc);
-      }
-      for (i = 0; i < 8; ++i) {
-        vst1_u8(upred_ptr, v_expected_udc);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_expected_vdc);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case V_PRED:
-    {
-      const uint8x8_t v_uabove = vld1_u8(uabove_row);
-      const uint8x8_t v_vabove = vld1_u8(vabove_row);
-      for (i = 0; i < 8; ++i) {
-        vst1_u8(upred_ptr, v_uabove);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_vabove);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case H_PRED:
-    {
-      for (i = 0; i < 8; ++i) {
-        const uint8x8_t v_uleft = vmov_n_u8((uint8_t)uleft[0]);
-        const uint8x8_t v_vleft = vmov_n_u8((uint8_t)vleft[0]);
-        uleft += left_stride;
-        vleft += left_stride;
-        vst1_u8(upred_ptr, v_uleft);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_vleft);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case TM_PRED:
-    {
-      const uint16x8_t v_utop_left = vmovq_n_u16((int16_t)uabove_row[-1]);
-      const uint16x8_t v_vtop_left = vmovq_n_u16((int16_t)vabove_row[-1]);
-      const uint8x8_t v_uabove = vld1_u8(uabove_row);
-      const uint8x8_t v_vabove = vld1_u8(vabove_row);
-      for (i = 0; i < 8; ++i) {
-        const uint8x8_t v_uleft = vmov_n_u8((int8_t)uleft[0]);
-        const uint8x8_t v_vleft = vmov_n_u8((int8_t)vleft[0]);
-        const uint16x8_t a_u = vaddl_u8(v_uabove, v_uleft);
-        const uint16x8_t a_v = vaddl_u8(v_vabove, v_vleft);
-        const int16x8_t b_u = vsubq_s16(vreinterpretq_s16_u16(a_u),
-                                        vreinterpretq_s16_u16(v_utop_left));
-        const int16x8_t b_v = vsubq_s16(vreinterpretq_s16_u16(a_v),
-                                        vreinterpretq_s16_u16(v_vtop_left));
-        const uint8x8_t pred_u = vqmovun_s16(b_u);
-        const uint8x8_t pred_v = vqmovun_s16(b_v);
-
-        vst1_u8(upred_ptr, pred_u);
-        vst1_u8(vpred_ptr, pred_v);
-        upred_ptr += pred_stride;
-        vpred_ptr += pred_stride;
-        uleft += left_stride;
-        vleft += left_stride;
-      }
-    }
-    break;
-  }
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/sad_neon.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-unsigned int vp8_sad8x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 7; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad8x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 15; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad4x4_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x2_t d1;
-    uint64x1_t d3;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 3; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    d1 = vpaddl_u16(vget_low_u16(q12));
-    d3 = vpaddl_u32(d1);
-
-    return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
-}
-
-unsigned int vp8_sad16x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 15; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad16x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 7; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const int16_t cospi8sqrt2minus1 = 20091;
-static const int16_t sinpi8sqrt2       = 35468;
-
-void vp8_short_idct4x4llm_neon(
-        int16_t *input,
-        unsigned char *pred_ptr,
-        int pred_stride,
-        unsigned char *dst_ptr,
-        int dst_stride) {
-    int i;
-    uint32x2_t d6u32 = vdup_n_u32(0);
-    uint8x8_t d1u8;
-    int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
-    uint16x8_t q1u16;
-    int16x8_t q1s16, q2s16, q3s16, q4s16;
-    int32x2x2_t v2tmp0, v2tmp1;
-    int16x4x2_t v2tmp2, v2tmp3;
-
-    d2 = vld1_s16(input);
-    d3 = vld1_s16(input + 4);
-    d4 = vld1_s16(input + 8);
-    d5 = vld1_s16(input + 12);
-
-    // 1st for loop
-    q1s16 = vcombine_s16(d2, d4);  // Swap d3 d4 here
-    q2s16 = vcombine_s16(d3, d5);
-
-    q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
-    q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
-
-    d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
-    d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
-
-    q3s16 = vshrq_n_s16(q3s16, 1);
-    q4s16 = vshrq_n_s16(q4s16, 1);
-
-    q3s16 = vqaddq_s16(q3s16, q2s16);
-    q4s16 = vqaddq_s16(q4s16, q2s16);
-
-    d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
-    d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
-                      vreinterpret_s16_s32(v2tmp1.val[0]));
-    v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
-                      vreinterpret_s16_s32(v2tmp1.val[1]));
-
-    // 2nd for loop
-    q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
-    q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
-
-    q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
-    q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
-
-    d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // a1
-    d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16));  // b1
-
-    q3s16 = vshrq_n_s16(q3s16, 1);
-    q4s16 = vshrq_n_s16(q4s16, 1);
-
-    q3s16 = vqaddq_s16(q3s16, q2s16);
-    q4s16 = vqaddq_s16(q4s16, q2s16);
-
-    d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16));  // c1
-    d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16));  // d1
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    d2 = vrshr_n_s16(d2, 3);
-    d3 = vrshr_n_s16(d3, 3);
-    d4 = vrshr_n_s16(d4, 3);
-    d5 = vrshr_n_s16(d5, 3);
-
-    v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
-                      vreinterpret_s16_s32(v2tmp1.val[0]));
-    v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
-                      vreinterpret_s16_s32(v2tmp1.val[1]));
-
-    q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
-    q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
-
-    // dc_only_idct_add
-    for (i = 0; i < 2; i++, q1s16 = q2s16) {
-        d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
-        pred_ptr += pred_stride;
-        d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
-        pred_ptr += pred_stride;
-
-        q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16),
-                         vreinterpret_u8_u32(d6u32));
-        d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
-
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
-        dst_ptr += dst_stride;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
-        dst_ptr += dst_stride;
-    }
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
+++ /dev/null
@@ -1,1754 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "vpx_ports/mem.h"
-
-static const int8_t vp8_sub_pel_filters[8][8] = {
-    {0,  0,  128,   0,   0, 0, 0, 0},  /* note that 1/8 pel positionyys are */
-    {0, -6,  123,  12,  -1, 0, 0, 0},  /*    just as per alpha -0.5 bicubic */
-    {2, -11, 108,  36,  -8, 1, 0, 0},  /* New 1/4 pel 6 tap filter */
-    {0, -9,   93,  50,  -6, 0, 0, 0},
-    {3, -16,  77,  77, -16, 3, 0, 0},  /* New 1/2 pel 6 tap filter */
-    {0, -6,   50,  93,  -9, 0, 0, 0},
-    {1, -8,   36, 108, -11, 2, 0, 0},  /* New 1/4 pel 6 tap filter */
-    {0, -1,   12, 123,  -6, 0, 0, 0},
-};
-
-void vp8_sixtap_predict4x4_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    unsigned char *src;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d18u8, d19u8, d20u8, d21u8;
-    uint8x8_t d23u8, d24u8, d25u8, d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
-    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
-    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
-    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q11u8;
-    uint64x2_t q3u64, q4u64, q5u64, q6u64, q9u64, q10u64;
-    uint32x2x2_t d0u32x2, d1u32x2;
-
-    if (xoffset == 0) {  // secondpass_filter4x4_only
-        uint32x2_t d27u32 = vdup_n_u32(0);
-        uint32x2_t d28u32 = vdup_n_u32(0);
-        uint32x2_t d29u32 = vdup_n_u32(0);
-        uint32x2_t d30u32 = vdup_n_u32(0);
-        uint32x2_t d31u32 = vdup_n_u32(0);
-
-        // load second_pass filter
-        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-        d0s8 = vdup_lane_s8(dtmps8, 0);
-        d1s8 = vdup_lane_s8(dtmps8, 1);
-        d2s8 = vdup_lane_s8(dtmps8, 2);
-        d3s8 = vdup_lane_s8(dtmps8, 3);
-        d4s8 = vdup_lane_s8(dtmps8, 4);
-        d5s8 = vdup_lane_s8(dtmps8, 5);
-        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-        // load src data
-        src = src_ptr - src_pixels_per_line * 2;
-        d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 0);
-        src += src_pixels_per_line;
-        d27u32 = vld1_lane_u32((const uint32_t *)src, d27u32, 1);
-        src += src_pixels_per_line;
-        d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 0);
-        src += src_pixels_per_line;
-        d28u32 = vld1_lane_u32((const uint32_t *)src, d28u32, 1);
-        src += src_pixels_per_line;
-        d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 0);
-        src += src_pixels_per_line;
-        d29u32 = vld1_lane_u32((const uint32_t *)src, d29u32, 1);
-        src += src_pixels_per_line;
-        d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 0);
-        src += src_pixels_per_line;
-        d30u32 = vld1_lane_u32((const uint32_t *)src, d30u32, 1);
-        src += src_pixels_per_line;
-        d31u32 = vld1_lane_u32((const uint32_t *)src, d31u32, 0);
-
-        d27u8 = vreinterpret_u8_u32(d27u32);
-        d28u8 = vreinterpret_u8_u32(d28u32);
-        d29u8 = vreinterpret_u8_u32(d29u32);
-        d30u8 = vreinterpret_u8_u32(d30u32);
-        d31u8 = vreinterpret_u8_u32(d31u32);
-
-        d23u8 = vext_u8(d27u8, d28u8, 4);
-        d24u8 = vext_u8(d28u8, d29u8, 4);
-        d25u8 = vext_u8(d29u8, d30u8, 4);
-        d26u8 = vext_u8(d30u8, d31u8, 4);
-
-        q3u16 = vmull_u8(d27u8, d0u8);
-        q4u16 = vmull_u8(d28u8, d0u8);
-        q5u16 = vmull_u8(d25u8, d5u8);
-        q6u16 = vmull_u8(d26u8, d5u8);
-
-        q3u16 = vmlsl_u8(q3u16, d29u8, d4u8);
-        q4u16 = vmlsl_u8(q4u16, d30u8, d4u8);
-        q5u16 = vmlsl_u8(q5u16, d23u8, d1u8);
-        q6u16 = vmlsl_u8(q6u16, d24u8, d1u8);
-
-        q3u16 = vmlal_u8(q3u16, d28u8, d2u8);
-        q4u16 = vmlal_u8(q4u16, d29u8, d2u8);
-        q5u16 = vmlal_u8(q5u16, d24u8, d3u8);
-        q6u16 = vmlal_u8(q6u16, d25u8, d3u8);
-
-        q3s16 = vreinterpretq_s16_u16(q3u16);
-        q4s16 = vreinterpretq_s16_u16(q4u16);
-        q5s16 = vreinterpretq_s16_u16(q5u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-
-        q5s16 = vqaddq_s16(q5s16, q3s16);
-        q6s16 = vqaddq_s16(q6s16, q4s16);
-
-        d3u8 = vqrshrun_n_s16(q5s16, 7);
-        d4u8 = vqrshrun_n_s16(q6s16, 7);
-
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1);
-        return;
-    }
-
-    // load first_pass filter
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    // First pass: output_height lines x output_width columns (9x4)
-
-    if (yoffset == 0)  // firstpass_filter4x4_only
-        src = src_ptr - 2;
-    else
-        src = src_ptr - 2 - (src_pixels_per_line * 2);
-
-    q3u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q4u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q5u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q6u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-
-    d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-    d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-    d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-    d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
-    // vswp here
-    q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8));
-    q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8));
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8),  // d18 d19
-                       vreinterpret_u32_u8(d19u8));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8),  // d20 d21
-                       vreinterpret_u32_u8(d21u8));
-    q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
-    q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8);
-
-    // keep original src data in q4 q6
-    q4u64 = vreinterpretq_u64_u8(q3u8);
-    q6u64 = vreinterpretq_u64_u8(q5u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)),  // d6 d7
-                       vreinterpret_u32_u8(vget_high_u8(q3u8)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)),  // d10 d11
-                       vreinterpret_u32_u8(vget_high_u8(q5u8)));
-    q9u64 = vshrq_n_u64(q4u64, 8);
-    q10u64 = vshrq_n_u64(q6u64, 8);
-    q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
-    q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)),   // d18 d19
-                       vreinterpret_u32_u64(vget_high_u64(q9u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)),  // d20 d211
-                       vreinterpret_u32_u64(vget_high_u64(q10u64)));
-    q3u64 = vshrq_n_u64(q4u64, 32);
-    q5u64 = vshrq_n_u64(q6u64, 32);
-    q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8);
-    q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)),  // d6 d7
-                       vreinterpret_u32_u64(vget_high_u64(q3u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),  // d10 d11
-                       vreinterpret_u32_u64(vget_high_u64(q5u64)));
-    q9u64 = vshrq_n_u64(q4u64, 16);
-    q10u64 = vshrq_n_u64(q6u64, 16);
-    q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8);
-    q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)),   // d18 d19
-                       vreinterpret_u32_u64(vget_high_u64(q9u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)),  // d20 d211
-                       vreinterpret_u32_u64(vget_high_u64(q10u64)));
-    q3u64 = vshrq_n_u64(q4u64, 24);
-    q5u64 = vshrq_n_u64(q6u64, 24);
-    q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8);
-    q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)),  // d6 d7
-                       vreinterpret_u32_u64(vget_high_u64(q3u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),  // d10 d11
-                       vreinterpret_u32_u64(vget_high_u64(q5u64)));
-    q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8);
-    q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8);
-
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-    q7s16 = vqaddq_s16(q7s16, q9s16);
-    q8s16 = vqaddq_s16(q8s16, q10s16);
-
-    d27u8 = vqrshrun_n_s16(q7s16, 7);
-    d28u8 = vqrshrun_n_s16(q8s16, 7);
-
-    if (yoffset == 0) {  // firstpass_filter4x4_only
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d27u8), 1);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 0);
-        dst_ptr += dst_pitch;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d28u8), 1);
-        return;
-    }
-
-    // First Pass on rest 5-line data
-    q3u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q4u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q5u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q6u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q11u8 = vld1q_u8(src);
-
-    d18u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-    d19u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-    d20u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-    d21u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
-    // vswp here
-    q3u8 = vcombine_u8(vget_low_u8(q3u8), vget_low_u8(q4u8));
-    q5u8 = vcombine_u8(vget_low_u8(q5u8), vget_low_u8(q6u8));
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u8(d18u8),  // d18 d19
-                       vreinterpret_u32_u8(d19u8));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u8(d20u8),  // d20 d21
-                       vreinterpret_u32_u8(d21u8));
-    d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 5);
-    q7u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d5u8);
-    q8u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d5u8);
-    q12u16 = vmull_u8(d31u8, d5u8);
-
-    q4u64 = vreinterpretq_u64_u8(q3u8);
-    q6u64 = vreinterpretq_u64_u8(q5u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q3u8)),  // d6 d7
-                       vreinterpret_u32_u8(vget_high_u8(q3u8)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q5u8)),  // d10 d11
-                       vreinterpret_u32_u8(vget_high_u8(q5u8)));
-    q9u64 = vshrq_n_u64(q4u64, 8);
-    q10u64 = vshrq_n_u64(q6u64, 8);
-    q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d0u8);
-    q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d0u8);
-    q12u16 = vmlal_u8(q12u16, vget_low_u8(q11u8), d0u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)),   // d18 d19
-                       vreinterpret_u32_u64(vget_high_u64(q9u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)),  // d20 d211
-                       vreinterpret_u32_u64(vget_high_u64(q10u64)));
-    q3u64 = vshrq_n_u64(q4u64, 32);
-    q5u64 = vshrq_n_u64(q6u64, 32);
-    d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 1);
-    q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d1u8);
-    q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d1u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)),  // d6 d7
-                       vreinterpret_u32_u64(vget_high_u64(q3u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),  // d10 d11
-                       vreinterpret_u32_u64(vget_high_u64(q5u64)));
-    q9u64 = vshrq_n_u64(q4u64, 16);
-    q10u64 = vshrq_n_u64(q6u64, 16);
-    d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 4);
-    q7u16 = vmlsl_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d4u8);
-    q8u16 = vmlsl_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d4u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q9u64)),   // d18 d19
-                       vreinterpret_u32_u64(vget_high_u64(q9u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q10u64)),  // d20 d211
-                       vreinterpret_u32_u64(vget_high_u64(q10u64)));
-    q3u64 = vshrq_n_u64(q4u64, 24);
-    q5u64 = vshrq_n_u64(q6u64, 24);
-    d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 2);
-    q7u16 = vmlal_u8(q7u16, vreinterpret_u8_u32(d0u32x2.val[0]), d2u8);
-    q8u16 = vmlal_u8(q8u16, vreinterpret_u8_u32(d1u32x2.val[0]), d2u8);
-    q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
-
-    d0u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q3u64)),  // d6 d7
-                       vreinterpret_u32_u64(vget_high_u64(q3u64)));
-    d1u32x2 = vzip_u32(vreinterpret_u32_u64(vget_low_u64(q5u64)),  // d10 d11
-                       vreinterpret_u32_u64(vget_high_u64(q5u64)));
-    d31u8 = vext_u8(vget_low_u8(q11u8), vget_high_u8(q11u8), 3);
-    q9u16 = vmull_u8(vreinterpret_u8_u32(d0u32x2.val[0]), d3u8);
-    q10u16 = vmull_u8(vreinterpret_u8_u32(d1u32x2.val[0]), d3u8);
-    q11u16 = vmull_u8(d31u8, d3u8);
-
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-    q11s16 = vreinterpretq_s16_u16(q11u16);
-    q12s16 = vreinterpretq_s16_u16(q12u16);
-    q7s16 = vqaddq_s16(q7s16, q9s16);
-    q8s16 = vqaddq_s16(q8s16, q10s16);
-    q12s16 = vqaddq_s16(q12s16, q11s16);
-
-    d29u8 = vqrshrun_n_s16(q7s16, 7);
-    d30u8 = vqrshrun_n_s16(q8s16, 7);
-    d31u8 = vqrshrun_n_s16(q12s16, 7);
-
-    // Second pass: 4x4
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    d23u8 = vext_u8(d27u8, d28u8, 4);
-    d24u8 = vext_u8(d28u8, d29u8, 4);
-    d25u8 = vext_u8(d29u8, d30u8, 4);
-    d26u8 = vext_u8(d30u8, d31u8, 4);
-
-    q3u16 = vmull_u8(d27u8, d0u8);
-    q4u16 = vmull_u8(d28u8, d0u8);
-    q5u16 = vmull_u8(d25u8, d5u8);
-    q6u16 = vmull_u8(d26u8, d5u8);
-
-    q3u16 = vmlsl_u8(q3u16, d29u8, d4u8);
-    q4u16 = vmlsl_u8(q4u16, d30u8, d4u8);
-    q5u16 = vmlsl_u8(q5u16, d23u8, d1u8);
-    q6u16 = vmlsl_u8(q6u16, d24u8, d1u8);
-
-    q3u16 = vmlal_u8(q3u16, d28u8, d2u8);
-    q4u16 = vmlal_u8(q4u16, d29u8, d2u8);
-    q5u16 = vmlal_u8(q5u16, d24u8, d3u8);
-    q6u16 = vmlal_u8(q6u16, d25u8, d3u8);
-
-    q3s16 = vreinterpretq_s16_u16(q3u16);
-    q4s16 = vreinterpretq_s16_u16(q4u16);
-    q5s16 = vreinterpretq_s16_u16(q5u16);
-    q6s16 = vreinterpretq_s16_u16(q6u16);
-
-    q5s16 = vqaddq_s16(q5s16, q3s16);
-    q6s16 = vqaddq_s16(q6s16, q4s16);
-
-    d3u8 = vqrshrun_n_s16(q5s16, 7);
-    d4u8 = vqrshrun_n_s16(q6s16, 7);
-
-    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 0);
-    dst_ptr += dst_pitch;
-    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d3u8), 1);
-    dst_ptr += dst_pitch;
-    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 0);
-    dst_ptr += dst_pitch;
-    vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d4u8), 1);
-    return;
-}
-
-void vp8_sixtap_predict8x4_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    unsigned char *src;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
-    uint8x8_t d22u8, d23u8, d24u8, d25u8, d26u8;
-    uint8x8_t d27u8, d28u8, d29u8, d30u8, d31u8;
-    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
-    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
-    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8;
-
-    if (xoffset == 0) {  // secondpass_filter8x4_only
-        // load second_pass filter
-        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-        d0s8 = vdup_lane_s8(dtmps8, 0);
-        d1s8 = vdup_lane_s8(dtmps8, 1);
-        d2s8 = vdup_lane_s8(dtmps8, 2);
-        d3s8 = vdup_lane_s8(dtmps8, 3);
-        d4s8 = vdup_lane_s8(dtmps8, 4);
-        d5s8 = vdup_lane_s8(dtmps8, 5);
-        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-        // load src data
-        src = src_ptr - src_pixels_per_line * 2;
-        d22u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d23u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d24u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d25u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d26u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d27u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d28u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d29u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d30u8 = vld1_u8(src);
-
-        q3u16 = vmull_u8(d22u8, d0u8);
-        q4u16 = vmull_u8(d23u8, d0u8);
-        q5u16 = vmull_u8(d24u8, d0u8);
-        q6u16 = vmull_u8(d25u8, d0u8);
-
-        q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
-        q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
-        q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
-        q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
-
-        q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
-        q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
-        q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
-        q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
-
-        q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
-        q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
-        q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
-        q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
-
-        q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
-        q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
-        q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
-        q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
-
-        q7u16 = vmull_u8(d25u8, d3u8);
-        q8u16 = vmull_u8(d26u8, d3u8);
-        q9u16 = vmull_u8(d27u8, d3u8);
-        q10u16 = vmull_u8(d28u8, d3u8);
-
-        q3s16 = vreinterpretq_s16_u16(q3u16);
-        q4s16 = vreinterpretq_s16_u16(q4u16);
-        q5s16 = vreinterpretq_s16_u16(q5u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-        q7s16 = vreinterpretq_s16_u16(q7u16);
-        q8s16 = vreinterpretq_s16_u16(q8u16);
-        q9s16 = vreinterpretq_s16_u16(q9u16);
-        q10s16 = vreinterpretq_s16_u16(q10u16);
-
-        q7s16 = vqaddq_s16(q7s16, q3s16);
-        q8s16 = vqaddq_s16(q8s16, q4s16);
-        q9s16 = vqaddq_s16(q9s16, q5s16);
-        q10s16 = vqaddq_s16(q10s16, q6s16);
-
-        d6u8 = vqrshrun_n_s16(q7s16, 7);
-        d7u8 = vqrshrun_n_s16(q8s16, 7);
-        d8u8 = vqrshrun_n_s16(q9s16, 7);
-        d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-        vst1_u8(dst_ptr, d6u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d7u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d8u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d9u8);
-        return;
-    }
-
-    // load first_pass filter
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    // First pass: output_height lines x output_width columns (9x4)
-    if (yoffset == 0)  // firstpass_filter4x4_only
-        src = src_ptr - 2;
-    else
-        src = src_ptr - 2 - (src_pixels_per_line * 2);
-    q3u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q4u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q5u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q6u8 = vld1q_u8(src);
-
-    q7u16  = vmull_u8(vget_low_u8(q3u8), d0u8);
-    q8u16  = vmull_u8(vget_low_u8(q4u8), d0u8);
-    q9u16  = vmull_u8(vget_low_u8(q5u8), d0u8);
-    q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
-
-    d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-    d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-    d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-    d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
-
-    q7u16  = vmlsl_u8(q7u16, d28u8, d1u8);
-    q8u16  = vmlsl_u8(q8u16, d29u8, d1u8);
-    q9u16  = vmlsl_u8(q9u16, d30u8, d1u8);
-    q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
-
-    d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
-    d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
-    d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
-    d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
-
-    q7u16  = vmlsl_u8(q7u16, d28u8, d4u8);
-    q8u16  = vmlsl_u8(q8u16, d29u8, d4u8);
-    q9u16  = vmlsl_u8(q9u16, d30u8, d4u8);
-    q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
-
-    d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
-    d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
-    d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
-    d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
-
-    q7u16  = vmlal_u8(q7u16, d28u8, d2u8);
-    q8u16  = vmlal_u8(q8u16, d29u8, d2u8);
-    q9u16  = vmlal_u8(q9u16, d30u8, d2u8);
-    q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
-
-    d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-    d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-    d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-    d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
-    q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
-    q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
-    q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
-    q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
-
-    d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
-    d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
-    d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
-    d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
-
-    q3u16 = vmull_u8(d28u8, d3u8);
-    q4u16 = vmull_u8(d29u8, d3u8);
-    q5u16 = vmull_u8(d30u8, d3u8);
-    q6u16 = vmull_u8(d31u8, d3u8);
-
-    q3s16 = vreinterpretq_s16_u16(q3u16);
-    q4s16 = vreinterpretq_s16_u16(q4u16);
-    q5s16 = vreinterpretq_s16_u16(q5u16);
-    q6s16 = vreinterpretq_s16_u16(q6u16);
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-
-    q7s16 = vqaddq_s16(q7s16, q3s16);
-    q8s16 = vqaddq_s16(q8s16, q4s16);
-    q9s16 = vqaddq_s16(q9s16, q5s16);
-    q10s16 = vqaddq_s16(q10s16, q6s16);
-
-    d22u8 = vqrshrun_n_s16(q7s16, 7);
-    d23u8 = vqrshrun_n_s16(q8s16, 7);
-    d24u8 = vqrshrun_n_s16(q9s16, 7);
-    d25u8 = vqrshrun_n_s16(q10s16, 7);
-
-    if (yoffset == 0) {  // firstpass_filter8x4_only
-        vst1_u8(dst_ptr, d22u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d23u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d24u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d25u8);
-        return;
-    }
-
-    // First Pass on rest 5-line data
-    src += src_pixels_per_line;
-    q3u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q4u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q5u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q6u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q7u8 = vld1q_u8(src);
-
-    q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-    q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-    q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
-    q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
-    q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
-
-    q8u16  = vmlsl_u8(q8u16, d27u8, d1u8);
-    q9u16  = vmlsl_u8(q9u16, d28u8, d1u8);
-    q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
-    q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
-
-    q8u16  = vmlsl_u8(q8u16, d27u8, d4u8);
-    q9u16  = vmlsl_u8(q9u16, d28u8, d4u8);
-    q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
-    q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
-
-    q8u16  = vmlal_u8(q8u16, d27u8, d2u8);
-    q9u16  = vmlal_u8(q9u16, d28u8, d2u8);
-    q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
-    q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
-    q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
-
-    q8u16  = vmlal_u8(q8u16, d27u8, d5u8);
-    q9u16  = vmlal_u8(q9u16, d28u8, d5u8);
-    q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
-    q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
-    q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
-
-    q3u16 = vmull_u8(d27u8, d3u8);
-    q4u16 = vmull_u8(d28u8, d3u8);
-    q5u16 = vmull_u8(d29u8, d3u8);
-    q6u16 = vmull_u8(d30u8, d3u8);
-    q7u16 = vmull_u8(d31u8, d3u8);
-
-    q3s16 = vreinterpretq_s16_u16(q3u16);
-    q4s16 = vreinterpretq_s16_u16(q4u16);
-    q5s16 = vreinterpretq_s16_u16(q5u16);
-    q6s16 = vreinterpretq_s16_u16(q6u16);
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-    q11s16 = vreinterpretq_s16_u16(q11u16);
-    q12s16 = vreinterpretq_s16_u16(q12u16);
-
-    q8s16 = vqaddq_s16(q8s16, q3s16);
-    q9s16 = vqaddq_s16(q9s16, q4s16);
-    q10s16 = vqaddq_s16(q10s16, q5s16);
-    q11s16 = vqaddq_s16(q11s16, q6s16);
-    q12s16 = vqaddq_s16(q12s16, q7s16);
-
-    d26u8 = vqrshrun_n_s16(q8s16, 7);
-    d27u8 = vqrshrun_n_s16(q9s16, 7);
-    d28u8 = vqrshrun_n_s16(q10s16, 7);
-    d29u8 = vqrshrun_n_s16(q11s16, 7);
-    d30u8 = vqrshrun_n_s16(q12s16, 7);
-
-    // Second pass: 8x4
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    q3u16 = vmull_u8(d22u8, d0u8);
-    q4u16 = vmull_u8(d23u8, d0u8);
-    q5u16 = vmull_u8(d24u8, d0u8);
-    q6u16 = vmull_u8(d25u8, d0u8);
-
-    q3u16 = vmlsl_u8(q3u16, d23u8, d1u8);
-    q4u16 = vmlsl_u8(q4u16, d24u8, d1u8);
-    q5u16 = vmlsl_u8(q5u16, d25u8, d1u8);
-    q6u16 = vmlsl_u8(q6u16, d26u8, d1u8);
-
-    q3u16 = vmlsl_u8(q3u16, d26u8, d4u8);
-    q4u16 = vmlsl_u8(q4u16, d27u8, d4u8);
-    q5u16 = vmlsl_u8(q5u16, d28u8, d4u8);
-    q6u16 = vmlsl_u8(q6u16, d29u8, d4u8);
-
-    q3u16 = vmlal_u8(q3u16, d24u8, d2u8);
-    q4u16 = vmlal_u8(q4u16, d25u8, d2u8);
-    q5u16 = vmlal_u8(q5u16, d26u8, d2u8);
-    q6u16 = vmlal_u8(q6u16, d27u8, d2u8);
-
-    q3u16 = vmlal_u8(q3u16, d27u8, d5u8);
-    q4u16 = vmlal_u8(q4u16, d28u8, d5u8);
-    q5u16 = vmlal_u8(q5u16, d29u8, d5u8);
-    q6u16 = vmlal_u8(q6u16, d30u8, d5u8);
-
-    q7u16 = vmull_u8(d25u8, d3u8);
-    q8u16 = vmull_u8(d26u8, d3u8);
-    q9u16 = vmull_u8(d27u8, d3u8);
-    q10u16 = vmull_u8(d28u8, d3u8);
-
-    q3s16 = vreinterpretq_s16_u16(q3u16);
-    q4s16 = vreinterpretq_s16_u16(q4u16);
-    q5s16 = vreinterpretq_s16_u16(q5u16);
-    q6s16 = vreinterpretq_s16_u16(q6u16);
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-
-    q7s16 = vqaddq_s16(q7s16, q3s16);
-    q8s16 = vqaddq_s16(q8s16, q4s16);
-    q9s16 = vqaddq_s16(q9s16, q5s16);
-    q10s16 = vqaddq_s16(q10s16, q6s16);
-
-    d6u8 = vqrshrun_n_s16(q7s16, 7);
-    d7u8 = vqrshrun_n_s16(q8s16, 7);
-    d8u8 = vqrshrun_n_s16(q9s16, 7);
-    d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-    vst1_u8(dst_ptr, d6u8);
-    dst_ptr += dst_pitch;
-    vst1_u8(dst_ptr, d7u8);
-    dst_ptr += dst_pitch;
-    vst1_u8(dst_ptr, d8u8);
-    dst_ptr += dst_pitch;
-    vst1_u8(dst_ptr, d9u8);
-    return;
-}
-
-void vp8_sixtap_predict8x8_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    unsigned char *src, *tmpp;
-    unsigned char tmp[64];
-    int i;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
-    uint8x8_t d18u8, d19u8, d20u8, d21u8, d22u8, d23u8, d24u8, d25u8;
-    uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8, d31u8;
-    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
-    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16;
-    uint16x8_t q8u16, q9u16, q10u16, q11u16, q12u16;
-    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16;
-    int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q9u8, q10u8, q11u8, q12u8;
-
-    if (xoffset == 0) {  // secondpass_filter8x8_only
-        // load second_pass filter
-        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-        d0s8 = vdup_lane_s8(dtmps8, 0);
-        d1s8 = vdup_lane_s8(dtmps8, 1);
-        d2s8 = vdup_lane_s8(dtmps8, 2);
-        d3s8 = vdup_lane_s8(dtmps8, 3);
-        d4s8 = vdup_lane_s8(dtmps8, 4);
-        d5s8 = vdup_lane_s8(dtmps8, 5);
-        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-        // load src data
-        src = src_ptr - src_pixels_per_line * 2;
-        d18u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d19u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d20u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d21u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d22u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d23u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d24u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d25u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d26u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d27u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d28u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d29u8 = vld1_u8(src);
-        src += src_pixels_per_line;
-        d30u8 = vld1_u8(src);
-
-        for (i = 2; i > 0; i--) {
-            q3u16 = vmull_u8(d18u8, d0u8);
-            q4u16 = vmull_u8(d19u8, d0u8);
-            q5u16 = vmull_u8(d20u8, d0u8);
-            q6u16 = vmull_u8(d21u8, d0u8);
-
-            q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
-            q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
-            q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
-            q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
-
-            q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
-            q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
-            q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
-            q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
-
-            q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
-            q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
-            q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
-            q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
-
-            q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
-            q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
-            q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
-            q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
-
-            q7u16 = vmull_u8(d21u8, d3u8);
-            q8u16 = vmull_u8(d22u8, d3u8);
-            q9u16 = vmull_u8(d23u8, d3u8);
-            q10u16 = vmull_u8(d24u8, d3u8);
-
-            q3s16 = vreinterpretq_s16_u16(q3u16);
-            q4s16 = vreinterpretq_s16_u16(q4u16);
-            q5s16 = vreinterpretq_s16_u16(q5u16);
-            q6s16 = vreinterpretq_s16_u16(q6u16);
-            q7s16 = vreinterpretq_s16_u16(q7u16);
-            q8s16 = vreinterpretq_s16_u16(q8u16);
-            q9s16 = vreinterpretq_s16_u16(q9u16);
-            q10s16 = vreinterpretq_s16_u16(q10u16);
-
-            q7s16 = vqaddq_s16(q7s16, q3s16);
-            q8s16 = vqaddq_s16(q8s16, q4s16);
-            q9s16 = vqaddq_s16(q9s16, q5s16);
-            q10s16 = vqaddq_s16(q10s16, q6s16);
-
-            d6u8 = vqrshrun_n_s16(q7s16, 7);
-            d7u8 = vqrshrun_n_s16(q8s16, 7);
-            d8u8 = vqrshrun_n_s16(q9s16, 7);
-            d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-            d18u8 = d22u8;
-            d19u8 = d23u8;
-            d20u8 = d24u8;
-            d21u8 = d25u8;
-            d22u8 = d26u8;
-            d23u8 = d27u8;
-            d24u8 = d28u8;
-            d25u8 = d29u8;
-            d26u8 = d30u8;
-
-            vst1_u8(dst_ptr, d6u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d7u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d8u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d9u8);
-            dst_ptr += dst_pitch;
-        }
-        return;
-    }
-
-    // load first_pass filter
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    // First pass: output_height lines x output_width columns (9x4)
-    if (yoffset == 0)  // firstpass_filter4x4_only
-        src = src_ptr - 2;
-    else
-        src = src_ptr - 2 - (src_pixels_per_line * 2);
-
-    tmpp = tmp;
-    for (i = 2; i > 0; i--) {
-        q3u8 = vld1q_u8(src);
-        src += src_pixels_per_line;
-        q4u8 = vld1q_u8(src);
-        src += src_pixels_per_line;
-        q5u8 = vld1q_u8(src);
-        src += src_pixels_per_line;
-        q6u8 = vld1q_u8(src);
-        src += src_pixels_per_line;
-
-        __builtin_prefetch(src);
-        __builtin_prefetch(src + src_pixels_per_line);
-        __builtin_prefetch(src + src_pixels_per_line * 2);
-
-        q7u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-        q8u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-        q9u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
-        q10u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
-
-        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
-
-        q7u16 = vmlsl_u8(q7u16, d28u8, d1u8);
-        q8u16 = vmlsl_u8(q8u16, d29u8, d1u8);
-        q9u16 = vmlsl_u8(q9u16, d30u8, d1u8);
-        q10u16 = vmlsl_u8(q10u16, d31u8, d1u8);
-
-        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
-        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
-        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
-        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
-
-        q7u16 = vmlsl_u8(q7u16, d28u8, d4u8);
-        q8u16 = vmlsl_u8(q8u16, d29u8, d4u8);
-        q9u16 = vmlsl_u8(q9u16, d30u8, d4u8);
-        q10u16 = vmlsl_u8(q10u16, d31u8, d4u8);
-
-        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
-        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
-        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
-        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
-
-        q7u16 = vmlal_u8(q7u16, d28u8, d2u8);
-        q8u16 = vmlal_u8(q8u16, d29u8, d2u8);
-        q9u16 = vmlal_u8(q9u16, d30u8, d2u8);
-        q10u16 = vmlal_u8(q10u16, d31u8, d2u8);
-
-        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-
-        q7u16 = vmlal_u8(q7u16, d28u8, d5u8);
-        q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
-        q9u16 = vmlal_u8(q9u16, d30u8, d5u8);
-        q10u16 = vmlal_u8(q10u16, d31u8, d5u8);
-
-        d28u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
-        d29u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
-        d30u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
-        d31u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
-
-        q3u16 = vmull_u8(d28u8, d3u8);
-        q4u16 = vmull_u8(d29u8, d3u8);
-        q5u16 = vmull_u8(d30u8, d3u8);
-        q6u16 = vmull_u8(d31u8, d3u8);
-
-        q3s16 = vreinterpretq_s16_u16(q3u16);
-        q4s16 = vreinterpretq_s16_u16(q4u16);
-        q5s16 = vreinterpretq_s16_u16(q5u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-        q7s16 = vreinterpretq_s16_u16(q7u16);
-        q8s16 = vreinterpretq_s16_u16(q8u16);
-        q9s16 = vreinterpretq_s16_u16(q9u16);
-        q10s16 = vreinterpretq_s16_u16(q10u16);
-
-        q7s16 = vqaddq_s16(q7s16, q3s16);
-        q8s16 = vqaddq_s16(q8s16, q4s16);
-        q9s16 = vqaddq_s16(q9s16, q5s16);
-        q10s16 = vqaddq_s16(q10s16, q6s16);
-
-        d22u8 = vqrshrun_n_s16(q7s16, 7);
-        d23u8 = vqrshrun_n_s16(q8s16, 7);
-        d24u8 = vqrshrun_n_s16(q9s16, 7);
-        d25u8 = vqrshrun_n_s16(q10s16, 7);
-
-        if (yoffset == 0) {  // firstpass_filter8x4_only
-            vst1_u8(dst_ptr, d22u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d23u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d24u8);
-            dst_ptr += dst_pitch;
-            vst1_u8(dst_ptr, d25u8);
-            dst_ptr += dst_pitch;
-        } else {
-            vst1_u8(tmpp, d22u8);
-            tmpp += 8;
-            vst1_u8(tmpp, d23u8);
-            tmpp += 8;
-            vst1_u8(tmpp, d24u8);
-            tmpp += 8;
-            vst1_u8(tmpp, d25u8);
-            tmpp += 8;
-        }
-    }
-    if (yoffset == 0)
-        return;
-
-    // First Pass on rest 5-line data
-    q3u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q4u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q5u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q6u8 = vld1q_u8(src);
-    src += src_pixels_per_line;
-    q7u8 = vld1q_u8(src);
-
-    q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
-    q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
-    q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
-    q11u16 = vmull_u8(vget_low_u8(q6u8), d0u8);
-    q12u16 = vmull_u8(vget_low_u8(q7u8), d0u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 1);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 1);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 1);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 1);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 1);
-
-    q8u16 = vmlsl_u8(q8u16, d27u8, d1u8);
-    q9u16 = vmlsl_u8(q9u16, d28u8, d1u8);
-    q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
-    q11u16 = vmlsl_u8(q11u16, d30u8, d1u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d1u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 4);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 4);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 4);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 4);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 4);
-
-    q8u16 = vmlsl_u8(q8u16, d27u8, d4u8);
-    q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
-    q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
-    q11u16 = vmlsl_u8(q11u16, d30u8, d4u8);
-    q12u16 = vmlsl_u8(q12u16, d31u8, d4u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 2);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 2);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 2);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 2);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 2);
-
-    q8u16 = vmlal_u8(q8u16, d27u8, d2u8);
-    q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
-    q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
-    q11u16 = vmlal_u8(q11u16, d30u8, d2u8);
-    q12u16 = vmlal_u8(q12u16, d31u8, d2u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 5);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 5);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 5);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 5);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 5);
-
-    q8u16 = vmlal_u8(q8u16, d27u8, d5u8);
-    q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
-    q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
-    q11u16 = vmlal_u8(q11u16, d30u8, d5u8);
-    q12u16 = vmlal_u8(q12u16, d31u8, d5u8);
-
-    d27u8 = vext_u8(vget_low_u8(q3u8), vget_high_u8(q3u8), 3);
-    d28u8 = vext_u8(vget_low_u8(q4u8), vget_high_u8(q4u8), 3);
-    d29u8 = vext_u8(vget_low_u8(q5u8), vget_high_u8(q5u8), 3);
-    d30u8 = vext_u8(vget_low_u8(q6u8), vget_high_u8(q6u8), 3);
-    d31u8 = vext_u8(vget_low_u8(q7u8), vget_high_u8(q7u8), 3);
-
-    q3u16 = vmull_u8(d27u8, d3u8);
-    q4u16 = vmull_u8(d28u8, d3u8);
-    q5u16 = vmull_u8(d29u8, d3u8);
-    q6u16 = vmull_u8(d30u8, d3u8);
-    q7u16 = vmull_u8(d31u8, d3u8);
-
-    q3s16 = vreinterpretq_s16_u16(q3u16);
-    q4s16 = vreinterpretq_s16_u16(q4u16);
-    q5s16 = vreinterpretq_s16_u16(q5u16);
-    q6s16 = vreinterpretq_s16_u16(q6u16);
-    q7s16 = vreinterpretq_s16_u16(q7u16);
-    q8s16 = vreinterpretq_s16_u16(q8u16);
-    q9s16 = vreinterpretq_s16_u16(q9u16);
-    q10s16 = vreinterpretq_s16_u16(q10u16);
-    q11s16 = vreinterpretq_s16_u16(q11u16);
-    q12s16 = vreinterpretq_s16_u16(q12u16);
-
-    q8s16 = vqaddq_s16(q8s16, q3s16);
-    q9s16 = vqaddq_s16(q9s16, q4s16);
-    q10s16 = vqaddq_s16(q10s16, q5s16);
-    q11s16 = vqaddq_s16(q11s16, q6s16);
-    q12s16 = vqaddq_s16(q12s16, q7s16);
-
-    d26u8 = vqrshrun_n_s16(q8s16, 7);
-    d27u8 = vqrshrun_n_s16(q9s16, 7);
-    d28u8 = vqrshrun_n_s16(q10s16, 7);
-    d29u8 = vqrshrun_n_s16(q11s16, 7);
-    d30u8 = vqrshrun_n_s16(q12s16, 7);
-
-    // Second pass: 8x8
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    tmpp = tmp;
-    q9u8 = vld1q_u8(tmpp);
-    tmpp += 16;
-    q10u8 = vld1q_u8(tmpp);
-    tmpp += 16;
-    q11u8 = vld1q_u8(tmpp);
-    tmpp += 16;
-    q12u8 = vld1q_u8(tmpp);
-
-    d18u8 = vget_low_u8(q9u8);
-    d19u8 = vget_high_u8(q9u8);
-    d20u8 = vget_low_u8(q10u8);
-    d21u8 = vget_high_u8(q10u8);
-    d22u8 = vget_low_u8(q11u8);
-    d23u8 = vget_high_u8(q11u8);
-    d24u8 = vget_low_u8(q12u8);
-    d25u8 = vget_high_u8(q12u8);
-
-    for (i = 2; i > 0; i--) {
-        q3u16 = vmull_u8(d18u8, d0u8);
-        q4u16 = vmull_u8(d19u8, d0u8);
-        q5u16 = vmull_u8(d20u8, d0u8);
-        q6u16 = vmull_u8(d21u8, d0u8);
-
-        q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
-        q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
-        q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
-        q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
-
-        q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
-        q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
-        q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
-        q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
-
-        q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
-        q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
-        q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
-        q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
-
-        q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
-        q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
-        q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
-        q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
-
-        q7u16 = vmull_u8(d21u8, d3u8);
-        q8u16 = vmull_u8(d22u8, d3u8);
-        q9u16 = vmull_u8(d23u8, d3u8);
-        q10u16 = vmull_u8(d24u8, d3u8);
-
-        q3s16 = vreinterpretq_s16_u16(q3u16);
-        q4s16 = vreinterpretq_s16_u16(q4u16);
-        q5s16 = vreinterpretq_s16_u16(q5u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-        q7s16 = vreinterpretq_s16_u16(q7u16);
-        q8s16 = vreinterpretq_s16_u16(q8u16);
-        q9s16 = vreinterpretq_s16_u16(q9u16);
-        q10s16 = vreinterpretq_s16_u16(q10u16);
-
-        q7s16 = vqaddq_s16(q7s16, q3s16);
-        q8s16 = vqaddq_s16(q8s16, q4s16);
-        q9s16 = vqaddq_s16(q9s16, q5s16);
-        q10s16 = vqaddq_s16(q10s16, q6s16);
-
-        d6u8 = vqrshrun_n_s16(q7s16, 7);
-        d7u8 = vqrshrun_n_s16(q8s16, 7);
-        d8u8 = vqrshrun_n_s16(q9s16, 7);
-        d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-        d18u8 = d22u8;
-        d19u8 = d23u8;
-        d20u8 = d24u8;
-        d21u8 = d25u8;
-        d22u8 = d26u8;
-        d23u8 = d27u8;
-        d24u8 = d28u8;
-        d25u8 = d29u8;
-        d26u8 = d30u8;
-
-        vst1_u8(dst_ptr, d6u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d7u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d8u8);
-        dst_ptr += dst_pitch;
-        vst1_u8(dst_ptr, d9u8);
-        dst_ptr += dst_pitch;
-    }
-    return;
-}
-
-void vp8_sixtap_predict16x16_neon(
-        unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        unsigned char *dst_ptr,
-        int dst_pitch) {
-    unsigned char *src, *src_tmp, *dst, *tmpp;
-    unsigned char tmp[336];
-    int i, j;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
-    uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d18u8, d19u8;
-    uint8x8_t d20u8, d21u8, d22u8, d23u8, d24u8, d25u8, d26u8, d27u8;
-    uint8x8_t d28u8, d29u8, d30u8, d31u8;
-    int8x8_t dtmps8, d0s8, d1s8, d2s8, d3s8, d4s8, d5s8;
-    uint8x16_t q3u8, q4u8;
-    uint16x8_t q3u16, q4u16, q5u16, q6u16, q7u16, q8u16, q9u16, q10u16;
-    uint16x8_t q11u16, q12u16, q13u16, q15u16;
-    int16x8_t q3s16, q4s16, q5s16, q6s16, q7s16, q8s16, q9s16, q10s16;
-    int16x8_t q11s16, q12s16, q13s16, q15s16;
-
-    if (xoffset == 0) {  // secondpass_filter8x8_only
-        // load second_pass filter
-        dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-        d0s8 = vdup_lane_s8(dtmps8, 0);
-        d1s8 = vdup_lane_s8(dtmps8, 1);
-        d2s8 = vdup_lane_s8(dtmps8, 2);
-        d3s8 = vdup_lane_s8(dtmps8, 3);
-        d4s8 = vdup_lane_s8(dtmps8, 4);
-        d5s8 = vdup_lane_s8(dtmps8, 5);
-        d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-        d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-        d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-        d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-        d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-        d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-        // load src data
-        src_tmp = src_ptr - src_pixels_per_line * 2;
-        for (i = 0; i < 2; i++) {
-            src = src_tmp + i * 8;
-            dst = dst_ptr + i * 8;
-            d18u8 = vld1_u8(src);
-            src += src_pixels_per_line;
-            d19u8 = vld1_u8(src);
-            src += src_pixels_per_line;
-            d20u8 = vld1_u8(src);
-            src += src_pixels_per_line;
-            d21u8 = vld1_u8(src);
-            src += src_pixels_per_line;
-            d22u8 = vld1_u8(src);
-            src += src_pixels_per_line;
-            for (j = 0; j < 4; j++) {
-                d23u8 = vld1_u8(src);
-                src += src_pixels_per_line;
-                d24u8 = vld1_u8(src);
-                src += src_pixels_per_line;
-                d25u8 = vld1_u8(src);
-                src += src_pixels_per_line;
-                d26u8 = vld1_u8(src);
-                src += src_pixels_per_line;
-
-                q3u16 = vmull_u8(d18u8, d0u8);
-                q4u16 = vmull_u8(d19u8, d0u8);
-                q5u16 = vmull_u8(d20u8, d0u8);
-                q6u16 = vmull_u8(d21u8, d0u8);
-
-                q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
-                q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
-                q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
-                q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
-
-                q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
-                q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
-                q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
-                q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
-
-                q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
-                q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
-                q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
-                q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
-
-                q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
-                q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
-                q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
-                q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
-
-                q7u16 = vmull_u8(d21u8, d3u8);
-                q8u16 = vmull_u8(d22u8, d3u8);
-                q9u16 = vmull_u8(d23u8, d3u8);
-                q10u16 = vmull_u8(d24u8, d3u8);
-
-                q3s16 = vreinterpretq_s16_u16(q3u16);
-                q4s16 = vreinterpretq_s16_u16(q4u16);
-                q5s16 = vreinterpretq_s16_u16(q5u16);
-                q6s16 = vreinterpretq_s16_u16(q6u16);
-                q7s16 = vreinterpretq_s16_u16(q7u16);
-                q8s16 = vreinterpretq_s16_u16(q8u16);
-                q9s16 = vreinterpretq_s16_u16(q9u16);
-                q10s16 = vreinterpretq_s16_u16(q10u16);
-
-                q7s16 = vqaddq_s16(q7s16, q3s16);
-                q8s16 = vqaddq_s16(q8s16, q4s16);
-                q9s16 = vqaddq_s16(q9s16, q5s16);
-                q10s16 = vqaddq_s16(q10s16, q6s16);
-
-                d6u8 = vqrshrun_n_s16(q7s16, 7);
-                d7u8 = vqrshrun_n_s16(q8s16, 7);
-                d8u8 = vqrshrun_n_s16(q9s16, 7);
-                d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-                d18u8 = d22u8;
-                d19u8 = d23u8;
-                d20u8 = d24u8;
-                d21u8 = d25u8;
-                d22u8 = d26u8;
-
-                vst1_u8(dst, d6u8);
-                dst += dst_pitch;
-                vst1_u8(dst, d7u8);
-                dst += dst_pitch;
-                vst1_u8(dst, d8u8);
-                dst += dst_pitch;
-                vst1_u8(dst, d9u8);
-                dst += dst_pitch;
-            }
-        }
-        return;
-    }
-
-    // load first_pass filter
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[xoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    // First pass: output_height lines x output_width columns (9x4)
-    if (yoffset == 0) {  // firstpass_filter4x4_only
-        src = src_ptr - 2;
-        dst = dst_ptr;
-        for (i = 0; i < 8; i++) {
-            d6u8 = vld1_u8(src);
-            d7u8 = vld1_u8(src + 8);
-            d8u8 = vld1_u8(src + 16);
-            src += src_pixels_per_line;
-            d9u8 = vld1_u8(src);
-            d10u8 = vld1_u8(src + 8);
-            d11u8 = vld1_u8(src + 16);
-            src += src_pixels_per_line;
-
-            __builtin_prefetch(src);
-            __builtin_prefetch(src + src_pixels_per_line);
-
-            q6u16 = vmull_u8(d6u8, d0u8);
-            q7u16 = vmull_u8(d7u8, d0u8);
-            q8u16 = vmull_u8(d9u8, d0u8);
-            q9u16 = vmull_u8(d10u8, d0u8);
-
-            d20u8 = vext_u8(d6u8, d7u8, 1);
-            d21u8 = vext_u8(d9u8, d10u8, 1);
-            d22u8 = vext_u8(d7u8, d8u8, 1);
-            d23u8 = vext_u8(d10u8, d11u8, 1);
-            d24u8 = vext_u8(d6u8, d7u8, 4);
-            d25u8 = vext_u8(d9u8, d10u8, 4);
-            d26u8 = vext_u8(d7u8, d8u8, 4);
-            d27u8 = vext_u8(d10u8, d11u8, 4);
-            d28u8 = vext_u8(d6u8, d7u8, 5);
-            d29u8 = vext_u8(d9u8, d10u8, 5);
-
-            q6u16 = vmlsl_u8(q6u16, d20u8, d1u8);
-            q8u16 = vmlsl_u8(q8u16, d21u8, d1u8);
-            q7u16 = vmlsl_u8(q7u16, d22u8, d1u8);
-            q9u16 = vmlsl_u8(q9u16, d23u8, d1u8);
-            q6u16 = vmlsl_u8(q6u16, d24u8, d4u8);
-            q8u16 = vmlsl_u8(q8u16, d25u8, d4u8);
-            q7u16 = vmlsl_u8(q7u16, d26u8, d4u8);
-            q9u16 = vmlsl_u8(q9u16, d27u8, d4u8);
-            q6u16 = vmlal_u8(q6u16, d28u8, d5u8);
-            q8u16 = vmlal_u8(q8u16, d29u8, d5u8);
-
-            d20u8 = vext_u8(d7u8, d8u8, 5);
-            d21u8 = vext_u8(d10u8, d11u8, 5);
-            d22u8 = vext_u8(d6u8, d7u8, 2);
-            d23u8 = vext_u8(d9u8, d10u8, 2);
-            d24u8 = vext_u8(d7u8, d8u8, 2);
-            d25u8 = vext_u8(d10u8, d11u8, 2);
-            d26u8 = vext_u8(d6u8, d7u8, 3);
-            d27u8 = vext_u8(d9u8, d10u8, 3);
-            d28u8 = vext_u8(d7u8, d8u8, 3);
-            d29u8 = vext_u8(d10u8, d11u8, 3);
-
-            q7u16 = vmlal_u8(q7u16, d20u8, d5u8);
-            q9u16 = vmlal_u8(q9u16, d21u8, d5u8);
-            q6u16 = vmlal_u8(q6u16, d22u8, d2u8);
-            q8u16 = vmlal_u8(q8u16, d23u8, d2u8);
-            q7u16 = vmlal_u8(q7u16, d24u8, d2u8);
-            q9u16 = vmlal_u8(q9u16, d25u8, d2u8);
-
-            q10u16 = vmull_u8(d26u8, d3u8);
-            q11u16 = vmull_u8(d27u8, d3u8);
-            q12u16 = vmull_u8(d28u8, d3u8);
-            q15u16 = vmull_u8(d29u8, d3u8);
-
-            q6s16 = vreinterpretq_s16_u16(q6u16);
-            q7s16 = vreinterpretq_s16_u16(q7u16);
-            q8s16 = vreinterpretq_s16_u16(q8u16);
-            q9s16 = vreinterpretq_s16_u16(q9u16);
-            q10s16 = vreinterpretq_s16_u16(q10u16);
-            q11s16 = vreinterpretq_s16_u16(q11u16);
-            q12s16 = vreinterpretq_s16_u16(q12u16);
-            q15s16 = vreinterpretq_s16_u16(q15u16);
-
-            q6s16 = vqaddq_s16(q6s16, q10s16);
-            q8s16 = vqaddq_s16(q8s16, q11s16);
-            q7s16 = vqaddq_s16(q7s16, q12s16);
-            q9s16 = vqaddq_s16(q9s16, q15s16);
-
-            d6u8 = vqrshrun_n_s16(q6s16, 7);
-            d7u8 = vqrshrun_n_s16(q7s16, 7);
-            d8u8 = vqrshrun_n_s16(q8s16, 7);
-            d9u8 = vqrshrun_n_s16(q9s16, 7);
-
-            q3u8 = vcombine_u8(d6u8, d7u8);
-            q4u8 = vcombine_u8(d8u8, d9u8);
-            vst1q_u8(dst, q3u8);
-            dst += dst_pitch;
-            vst1q_u8(dst, q4u8);
-            dst += dst_pitch;
-        }
-        return;
-    }
-
-    src = src_ptr - 2 - src_pixels_per_line * 2;
-    tmpp = tmp;
-    for (i = 0; i < 7; i++) {
-        d6u8 = vld1_u8(src);
-        d7u8 = vld1_u8(src + 8);
-        d8u8 = vld1_u8(src + 16);
-        src += src_pixels_per_line;
-        d9u8 = vld1_u8(src);
-        d10u8 = vld1_u8(src + 8);
-        d11u8 = vld1_u8(src + 16);
-        src += src_pixels_per_line;
-        d12u8 = vld1_u8(src);
-        d13u8 = vld1_u8(src + 8);
-        d14u8 = vld1_u8(src + 16);
-        src += src_pixels_per_line;
-
-        __builtin_prefetch(src);
-        __builtin_prefetch(src + src_pixels_per_line);
-        __builtin_prefetch(src + src_pixels_per_line * 2);
-
-        q8u16 = vmull_u8(d6u8, d0u8);
-        q9u16 = vmull_u8(d7u8, d0u8);
-        q10u16 = vmull_u8(d9u8, d0u8);
-        q11u16 = vmull_u8(d10u8, d0u8);
-        q12u16 = vmull_u8(d12u8, d0u8);
-        q13u16 = vmull_u8(d13u8, d0u8);
-
-        d28u8 = vext_u8(d6u8, d7u8, 1);
-        d29u8 = vext_u8(d9u8, d10u8, 1);
-        d30u8 = vext_u8(d12u8, d13u8, 1);
-        q8u16 = vmlsl_u8(q8u16, d28u8, d1u8);
-        q10u16 = vmlsl_u8(q10u16, d29u8, d1u8);
-        q12u16 = vmlsl_u8(q12u16, d30u8, d1u8);
-        d28u8 = vext_u8(d7u8, d8u8, 1);
-        d29u8 = vext_u8(d10u8, d11u8, 1);
-        d30u8 = vext_u8(d13u8, d14u8, 1);
-        q9u16  = vmlsl_u8(q9u16, d28u8, d1u8);
-        q11u16 = vmlsl_u8(q11u16, d29u8, d1u8);
-        q13u16 = vmlsl_u8(q13u16, d30u8, d1u8);
-
-        d28u8 = vext_u8(d6u8, d7u8, 4);
-        d29u8 = vext_u8(d9u8, d10u8, 4);
-        d30u8 = vext_u8(d12u8, d13u8, 4);
-        q8u16 = vmlsl_u8(q8u16, d28u8, d4u8);
-        q10u16 = vmlsl_u8(q10u16, d29u8, d4u8);
-        q12u16 = vmlsl_u8(q12u16, d30u8, d4u8);
-        d28u8 = vext_u8(d7u8, d8u8, 4);
-        d29u8 = vext_u8(d10u8, d11u8, 4);
-        d30u8 = vext_u8(d13u8, d14u8, 4);
-        q9u16 = vmlsl_u8(q9u16, d28u8, d4u8);
-        q11u16 = vmlsl_u8(q11u16, d29u8, d4u8);
-        q13u16 = vmlsl_u8(q13u16, d30u8, d4u8);
-
-        d28u8 = vext_u8(d6u8, d7u8, 5);
-        d29u8 = vext_u8(d9u8, d10u8, 5);
-        d30u8 = vext_u8(d12u8, d13u8, 5);
-        q8u16 = vmlal_u8(q8u16, d28u8, d5u8);
-        q10u16 = vmlal_u8(q10u16, d29u8, d5u8);
-        q12u16 = vmlal_u8(q12u16, d30u8, d5u8);
-        d28u8 = vext_u8(d7u8, d8u8, 5);
-        d29u8 = vext_u8(d10u8, d11u8, 5);
-        d30u8 = vext_u8(d13u8, d14u8, 5);
-        q9u16 = vmlal_u8(q9u16, d28u8, d5u8);
-        q11u16 = vmlal_u8(q11u16, d29u8, d5u8);
-        q13u16 = vmlal_u8(q13u16, d30u8, d5u8);
-
-        d28u8 = vext_u8(d6u8, d7u8, 2);
-        d29u8 = vext_u8(d9u8, d10u8, 2);
-        d30u8 = vext_u8(d12u8, d13u8, 2);
-        q8u16 = vmlal_u8(q8u16, d28u8, d2u8);
-        q10u16 = vmlal_u8(q10u16, d29u8, d2u8);
-        q12u16 = vmlal_u8(q12u16, d30u8, d2u8);
-        d28u8 = vext_u8(d7u8, d8u8, 2);
-        d29u8 = vext_u8(d10u8, d11u8, 2);
-        d30u8 = vext_u8(d13u8, d14u8, 2);
-        q9u16 = vmlal_u8(q9u16, d28u8, d2u8);
-        q11u16 = vmlal_u8(q11u16, d29u8, d2u8);
-        q13u16 = vmlal_u8(q13u16, d30u8, d2u8);
-
-        d28u8 = vext_u8(d6u8, d7u8, 3);
-        d29u8 = vext_u8(d9u8, d10u8, 3);
-        d30u8 = vext_u8(d12u8, d13u8, 3);
-        d15u8 = vext_u8(d7u8, d8u8, 3);
-        d31u8 = vext_u8(d10u8, d11u8, 3);
-        d6u8  = vext_u8(d13u8, d14u8, 3);
-        q4u16 = vmull_u8(d28u8, d3u8);
-        q5u16 = vmull_u8(d29u8, d3u8);
-        q6u16 = vmull_u8(d30u8, d3u8);
-        q4s16 = vreinterpretq_s16_u16(q4u16);
-        q5s16 = vreinterpretq_s16_u16(q5u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-        q8s16 = vreinterpretq_s16_u16(q8u16);
-        q10s16 = vreinterpretq_s16_u16(q10u16);
-        q12s16 = vreinterpretq_s16_u16(q12u16);
-        q8s16 = vqaddq_s16(q8s16, q4s16);
-        q10s16 = vqaddq_s16(q10s16, q5s16);
-        q12s16 = vqaddq_s16(q12s16, q6s16);
-
-        q6u16 = vmull_u8(d15u8, d3u8);
-        q7u16 = vmull_u8(d31u8, d3u8);
-        q3u16 = vmull_u8(d6u8, d3u8);
-        q3s16 = vreinterpretq_s16_u16(q3u16);
-        q6s16 = vreinterpretq_s16_u16(q6u16);
-        q7s16 = vreinterpretq_s16_u16(q7u16);
-        q9s16 = vreinterpretq_s16_u16(q9u16);
-        q11s16 = vreinterpretq_s16_u16(q11u16);
-        q13s16 = vreinterpretq_s16_u16(q13u16);
-        q9s16 = vqaddq_s16(q9s16, q6s16);
-        q11s16 = vqaddq_s16(q11s16, q7s16);
-        q13s16 = vqaddq_s16(q13s16, q3s16);
-
-        d6u8 = vqrshrun_n_s16(q8s16, 7);
-        d7u8 = vqrshrun_n_s16(q9s16, 7);
-        d8u8 = vqrshrun_n_s16(q10s16, 7);
-        d9u8 = vqrshrun_n_s16(q11s16, 7);
-        d10u8 = vqrshrun_n_s16(q12s16, 7);
-        d11u8 = vqrshrun_n_s16(q13s16, 7);
-
-        vst1_u8(tmpp, d6u8);
-        tmpp += 8;
-        vst1_u8(tmpp, d7u8);
-        tmpp += 8;
-        vst1_u8(tmpp, d8u8);
-        tmpp += 8;
-        vst1_u8(tmpp, d9u8);
-        tmpp += 8;
-        vst1_u8(tmpp, d10u8);
-        tmpp += 8;
-        vst1_u8(tmpp, d11u8);
-        tmpp += 8;
-    }
-
-    // Second pass: 16x16
-    dtmps8 = vld1_s8(vp8_sub_pel_filters[yoffset]);
-    d0s8 = vdup_lane_s8(dtmps8, 0);
-    d1s8 = vdup_lane_s8(dtmps8, 1);
-    d2s8 = vdup_lane_s8(dtmps8, 2);
-    d3s8 = vdup_lane_s8(dtmps8, 3);
-    d4s8 = vdup_lane_s8(dtmps8, 4);
-    d5s8 = vdup_lane_s8(dtmps8, 5);
-    d0u8 = vreinterpret_u8_s8(vabs_s8(d0s8));
-    d1u8 = vreinterpret_u8_s8(vabs_s8(d1s8));
-    d2u8 = vreinterpret_u8_s8(vabs_s8(d2s8));
-    d3u8 = vreinterpret_u8_s8(vabs_s8(d3s8));
-    d4u8 = vreinterpret_u8_s8(vabs_s8(d4s8));
-    d5u8 = vreinterpret_u8_s8(vabs_s8(d5s8));
-
-    for (i = 0; i < 2; i++) {
-        dst = dst_ptr + 8 * i;
-        tmpp = tmp + 8 * i;
-        d18u8 = vld1_u8(tmpp);
-        tmpp += 16;
-        d19u8 = vld1_u8(tmpp);
-        tmpp += 16;
-        d20u8 = vld1_u8(tmpp);
-        tmpp += 16;
-        d21u8 = vld1_u8(tmpp);
-        tmpp += 16;
-        d22u8 = vld1_u8(tmpp);
-        tmpp += 16;
-        for (j = 0; j < 4; j++) {
-            d23u8 = vld1_u8(tmpp);
-            tmpp += 16;
-            d24u8 = vld1_u8(tmpp);
-            tmpp += 16;
-            d25u8 = vld1_u8(tmpp);
-            tmpp += 16;
-            d26u8 = vld1_u8(tmpp);
-            tmpp += 16;
-
-            q3u16 = vmull_u8(d18u8, d0u8);
-            q4u16 = vmull_u8(d19u8, d0u8);
-            q5u16 = vmull_u8(d20u8, d0u8);
-            q6u16 = vmull_u8(d21u8, d0u8);
-
-            q3u16 = vmlsl_u8(q3u16, d19u8, d1u8);
-            q4u16 = vmlsl_u8(q4u16, d20u8, d1u8);
-            q5u16 = vmlsl_u8(q5u16, d21u8, d1u8);
-            q6u16 = vmlsl_u8(q6u16, d22u8, d1u8);
-
-            q3u16 = vmlsl_u8(q3u16, d22u8, d4u8);
-            q4u16 = vmlsl_u8(q4u16, d23u8, d4u8);
-            q5u16 = vmlsl_u8(q5u16, d24u8, d4u8);
-            q6u16 = vmlsl_u8(q6u16, d25u8, d4u8);
-
-            q3u16 = vmlal_u8(q3u16, d20u8, d2u8);
-            q4u16 = vmlal_u8(q4u16, d21u8, d2u8);
-            q5u16 = vmlal_u8(q5u16, d22u8, d2u8);
-            q6u16 = vmlal_u8(q6u16, d23u8, d2u8);
-
-            q3u16 = vmlal_u8(q3u16, d23u8, d5u8);
-            q4u16 = vmlal_u8(q4u16, d24u8, d5u8);
-            q5u16 = vmlal_u8(q5u16, d25u8, d5u8);
-            q6u16 = vmlal_u8(q6u16, d26u8, d5u8);
-
-            q7u16 = vmull_u8(d21u8, d3u8);
-            q8u16 = vmull_u8(d22u8, d3u8);
-            q9u16 = vmull_u8(d23u8, d3u8);
-            q10u16 = vmull_u8(d24u8, d3u8);
-
-            q3s16 = vreinterpretq_s16_u16(q3u16);
-            q4s16 = vreinterpretq_s16_u16(q4u16);
-            q5s16 = vreinterpretq_s16_u16(q5u16);
-            q6s16 = vreinterpretq_s16_u16(q6u16);
-            q7s16 = vreinterpretq_s16_u16(q7u16);
-            q8s16 = vreinterpretq_s16_u16(q8u16);
-            q9s16 = vreinterpretq_s16_u16(q9u16);
-            q10s16 = vreinterpretq_s16_u16(q10u16);
-
-            q7s16 = vqaddq_s16(q7s16, q3s16);
-            q8s16 = vqaddq_s16(q8s16, q4s16);
-            q9s16 = vqaddq_s16(q9s16, q5s16);
-            q10s16 = vqaddq_s16(q10s16, q6s16);
-
-            d6u8 = vqrshrun_n_s16(q7s16, 7);
-            d7u8 = vqrshrun_n_s16(q8s16, 7);
-            d8u8 = vqrshrun_n_s16(q9s16, 7);
-            d9u8 = vqrshrun_n_s16(q10s16, 7);
-
-            d18u8 = d22u8;
-            d19u8 = d23u8;
-            d20u8 = d24u8;
-            d21u8 = d25u8;
-            d22u8 = d26u8;
-
-            vst1_u8(dst, d6u8);
-            dst += dst_pitch;
-            vst1_u8(dst, d7u8);
-            dst += dst_pitch;
-            vst1_u8(dst, d8u8);
-            dst += dst_pitch;
-            vst1_u8(dst, d9u8);
-            dst += dst_pitch;
-        }
-    }
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/variance_neon.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "vpx_ports/mem.h"
-
-unsigned int vp8_variance16x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance16x8_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 4; i++) {  // variance16x8_neon_loop
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance8x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d2u8, d4u8, d6u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint16x8_t q11u16, q12u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {  // variance8x16_neon_loop
-        d0u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d2u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        d4u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d6u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(d0u8, d4u8);
-        q12u16 = vsubl_u8(d2u8, d6u8);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance8x8_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 2; i++) {  // variance8x8_neon_loop
-        d0u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d1u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d2u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d3u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-
-        d4u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d5u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d6u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d7u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        q11u16 = vsubl_u8(d0u8, d4u8);
-        q12u16 = vsubl_u8(d1u8, d5u8);
-        q13u16 = vsubl_u8(d2u8, d6u8);
-        q14u16 = vsubl_u8(d3u8, d7u8);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 6);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "vpx_ports/mem.h"
-#include "vpx/vpx_integer.h"
-
-static const uint16_t bilinear_taps_coeff[8][2] = {
-    {128,   0},
-    {112,  16},
-    { 96,  32},
-    { 80,  48},
-    { 64,  64},
-    { 48,  80},
-    { 32,  96},
-    { 16, 112}
-};
-
-unsigned int vp8_sub_pixel_variance16x16_neon_func(
-        const unsigned char *src_ptr,
-        int src_pixels_per_line,
-        int xoffset,
-        int yoffset,
-        const unsigned char *dst_ptr,
-        int dst_pixels_per_line,
-        unsigned int *sse) {
-    int i;
-    DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 528);
-    unsigned char *tmpp;
-    unsigned char *tmpp2;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
-    uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
-    uint8x8_t d19u8, d20u8, d21u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64, d2s64, d3s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8;
-    uint8x16_t q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    uint16x8_t q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16, q8u16;
-    uint16x8_t q9u16, q10u16, q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    tmpp2 = tmp + 272;
-    tmpp = tmp;
-    if (xoffset == 0) {  // secondpass_bfilter16x16_only
-        d0u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][1]);
-
-        q11u8 = vld1q_u8(src_ptr);
-        src_ptr += src_pixels_per_line;
-        for (i = 4; i > 0; i--) {
-            q12u8 = vld1q_u8(src_ptr);
-            src_ptr += src_pixels_per_line;
-            q13u8 = vld1q_u8(src_ptr);
-            src_ptr += src_pixels_per_line;
-            q14u8 = vld1q_u8(src_ptr);
-            src_ptr += src_pixels_per_line;
-            q15u8 = vld1q_u8(src_ptr);
-            src_ptr += src_pixels_per_line;
-
-            __builtin_prefetch(src_ptr);
-            __builtin_prefetch(src_ptr + src_pixels_per_line);
-            __builtin_prefetch(src_ptr + src_pixels_per_line * 2);
-
-            q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
-            q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
-            q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
-            q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
-            q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
-            q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
-            q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
-            q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
-
-            q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
-            q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
-            q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
-            q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
-            q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
-            q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
-            q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
-            q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
-
-            d2u8 = vqrshrn_n_u16(q1u16, 7);
-            d3u8 = vqrshrn_n_u16(q2u16, 7);
-            d4u8 = vqrshrn_n_u16(q3u16, 7);
-            d5u8 = vqrshrn_n_u16(q4u16, 7);
-            d6u8 = vqrshrn_n_u16(q5u16, 7);
-            d7u8 = vqrshrn_n_u16(q6u16, 7);
-            d8u8 = vqrshrn_n_u16(q7u16, 7);
-            d9u8 = vqrshrn_n_u16(q8u16, 7);
-
-            q1u8 = vcombine_u8(d2u8, d3u8);
-            q2u8 = vcombine_u8(d4u8, d5u8);
-            q3u8 = vcombine_u8(d6u8, d7u8);
-            q4u8 = vcombine_u8(d8u8, d9u8);
-
-            q11u8 = q15u8;
-
-            vst1q_u8((uint8_t *)tmpp2, q1u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q2u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q3u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q4u8);
-            tmpp2 += 16;
-        }
-    } else if (yoffset == 0) {  // firstpass_bfilter16x16_only
-        d0u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][1]);
-
-        for (i = 4; i > 0 ; i--) {
-            d2u8 = vld1_u8(src_ptr);
-            d3u8 = vld1_u8(src_ptr + 8);
-            d4u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d5u8 = vld1_u8(src_ptr);
-            d6u8 = vld1_u8(src_ptr + 8);
-            d7u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d8u8 = vld1_u8(src_ptr);
-            d9u8 = vld1_u8(src_ptr + 8);
-            d10u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d11u8 = vld1_u8(src_ptr);
-            d12u8 = vld1_u8(src_ptr + 8);
-            d13u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-
-            __builtin_prefetch(src_ptr);
-            __builtin_prefetch(src_ptr + src_pixels_per_line);
-            __builtin_prefetch(src_ptr + src_pixels_per_line * 2);
-
-            q7u16  = vmull_u8(d2u8, d0u8);
-            q8u16  = vmull_u8(d3u8, d0u8);
-            q9u16  = vmull_u8(d5u8, d0u8);
-            q10u16 = vmull_u8(d6u8, d0u8);
-            q11u16 = vmull_u8(d8u8, d0u8);
-            q12u16 = vmull_u8(d9u8, d0u8);
-            q13u16 = vmull_u8(d11u8, d0u8);
-            q14u16 = vmull_u8(d12u8, d0u8);
-
-            d2u8  = vext_u8(d2u8, d3u8, 1);
-            d5u8  = vext_u8(d5u8, d6u8, 1);
-            d8u8  = vext_u8(d8u8, d9u8, 1);
-            d11u8 = vext_u8(d11u8, d12u8, 1);
-
-            q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
-            q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
-            q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
-            q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
-
-            d3u8  = vext_u8(d3u8, d4u8, 1);
-            d6u8  = vext_u8(d6u8, d7u8, 1);
-            d9u8  = vext_u8(d9u8, d10u8, 1);
-            d12u8 = vext_u8(d12u8, d13u8, 1);
-
-            q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
-            q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
-            q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
-            q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
-
-            d14u8 = vqrshrn_n_u16(q7u16, 7);
-            d15u8 = vqrshrn_n_u16(q8u16, 7);
-            d16u8 = vqrshrn_n_u16(q9u16, 7);
-            d17u8 = vqrshrn_n_u16(q10u16, 7);
-            d18u8 = vqrshrn_n_u16(q11u16, 7);
-            d19u8 = vqrshrn_n_u16(q12u16, 7);
-            d20u8 = vqrshrn_n_u16(q13u16, 7);
-            d21u8 = vqrshrn_n_u16(q14u16, 7);
-
-            q7u8  = vcombine_u8(d14u8, d15u8);
-            q8u8  = vcombine_u8(d16u8, d17u8);
-            q9u8  = vcombine_u8(d18u8, d19u8);
-            q10u8 = vcombine_u8(d20u8, d21u8);
-
-            vst1q_u8((uint8_t *)tmpp2, q7u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q8u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q9u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q10u8);
-            tmpp2 += 16;
-        }
-    } else {
-        d0u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bilinear_taps_coeff[xoffset][1]);
-
-        d2u8 = vld1_u8(src_ptr);
-        d3u8 = vld1_u8(src_ptr + 8);
-        d4u8 = vld1_u8(src_ptr + 16);
-        src_ptr += src_pixels_per_line;
-        d5u8 = vld1_u8(src_ptr);
-        d6u8 = vld1_u8(src_ptr + 8);
-        d7u8 = vld1_u8(src_ptr + 16);
-        src_ptr += src_pixels_per_line;
-        d8u8 = vld1_u8(src_ptr);
-        d9u8 = vld1_u8(src_ptr + 8);
-        d10u8 = vld1_u8(src_ptr + 16);
-        src_ptr += src_pixels_per_line;
-        d11u8 = vld1_u8(src_ptr);
-        d12u8 = vld1_u8(src_ptr + 8);
-        d13u8 = vld1_u8(src_ptr + 16);
-        src_ptr += src_pixels_per_line;
-
-        // First Pass: output_height lines x output_width columns (17x16)
-        for (i = 3; i > 0; i--) {
-            q7u16  = vmull_u8(d2u8, d0u8);
-            q8u16  = vmull_u8(d3u8, d0u8);
-            q9u16  = vmull_u8(d5u8, d0u8);
-            q10u16 = vmull_u8(d6u8, d0u8);
-            q11u16 = vmull_u8(d8u8, d0u8);
-            q12u16 = vmull_u8(d9u8, d0u8);
-            q13u16 = vmull_u8(d11u8, d0u8);
-            q14u16 = vmull_u8(d12u8, d0u8);
-
-            d2u8  = vext_u8(d2u8, d3u8, 1);
-            d5u8  = vext_u8(d5u8, d6u8, 1);
-            d8u8  = vext_u8(d8u8, d9u8, 1);
-            d11u8 = vext_u8(d11u8, d12u8, 1);
-
-            q7u16  = vmlal_u8(q7u16, d2u8, d1u8);
-            q9u16  = vmlal_u8(q9u16, d5u8, d1u8);
-            q11u16 = vmlal_u8(q11u16, d8u8, d1u8);
-            q13u16 = vmlal_u8(q13u16, d11u8, d1u8);
-
-            d3u8  = vext_u8(d3u8, d4u8, 1);
-            d6u8  = vext_u8(d6u8, d7u8, 1);
-            d9u8  = vext_u8(d9u8, d10u8, 1);
-            d12u8 = vext_u8(d12u8, d13u8, 1);
-
-            q8u16  = vmlal_u8(q8u16,  d3u8, d1u8);
-            q10u16 = vmlal_u8(q10u16, d6u8, d1u8);
-            q12u16 = vmlal_u8(q12u16, d9u8, d1u8);
-            q14u16 = vmlal_u8(q14u16, d12u8, d1u8);
-
-            d14u8 = vqrshrn_n_u16(q7u16, 7);
-            d15u8 = vqrshrn_n_u16(q8u16, 7);
-            d16u8 = vqrshrn_n_u16(q9u16, 7);
-            d17u8 = vqrshrn_n_u16(q10u16, 7);
-            d18u8 = vqrshrn_n_u16(q11u16, 7);
-            d19u8 = vqrshrn_n_u16(q12u16, 7);
-            d20u8 = vqrshrn_n_u16(q13u16, 7);
-            d21u8 = vqrshrn_n_u16(q14u16, 7);
-
-            d2u8 = vld1_u8(src_ptr);
-            d3u8 = vld1_u8(src_ptr + 8);
-            d4u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d5u8 = vld1_u8(src_ptr);
-            d6u8 = vld1_u8(src_ptr + 8);
-            d7u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d8u8 = vld1_u8(src_ptr);
-            d9u8 = vld1_u8(src_ptr + 8);
-            d10u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-            d11u8 = vld1_u8(src_ptr);
-            d12u8 = vld1_u8(src_ptr + 8);
-            d13u8 = vld1_u8(src_ptr + 16);
-            src_ptr += src_pixels_per_line;
-
-            q7u8 = vcombine_u8(d14u8, d15u8);
-            q8u8 = vcombine_u8(d16u8, d17u8);
-            q9u8 = vcombine_u8(d18u8, d19u8);
-            q10u8 = vcombine_u8(d20u8, d21u8);
-
-            vst1q_u8((uint8_t *)tmpp, q7u8);
-            tmpp += 16;
-            vst1q_u8((uint8_t *)tmpp, q8u8);
-            tmpp += 16;
-            vst1q_u8((uint8_t *)tmpp, q9u8);
-            tmpp += 16;
-            vst1q_u8((uint8_t *)tmpp, q10u8);
-            tmpp += 16;
-        }
-
-        // First-pass filtering for rest 5 lines
-        d14u8 = vld1_u8(src_ptr);
-        d15u8 = vld1_u8(src_ptr + 8);
-        d16u8 = vld1_u8(src_ptr + 16);
-        src_ptr += src_pixels_per_line;
-
-        q9u16  = vmull_u8(d2u8, d0u8);
-        q10u16 = vmull_u8(d3u8, d0u8);
-        q11u16 = vmull_u8(d5u8, d0u8);
-        q12u16 = vmull_u8(d6u8, d0u8);
-        q13u16 = vmull_u8(d8u8, d0u8);
-        q14u16 = vmull_u8(d9u8, d0u8);
-
-        d2u8  = vext_u8(d2u8, d3u8, 1);
-        d5u8  = vext_u8(d5u8, d6u8, 1);
-        d8u8  = vext_u8(d8u8, d9u8, 1);
-
-        q9u16  = vmlal_u8(q9u16, d2u8, d1u8);
-        q11u16 = vmlal_u8(q11u16, d5u8, d1u8);
-        q13u16 = vmlal_u8(q13u16, d8u8, d1u8);
-
-        d3u8  = vext_u8(d3u8, d4u8, 1);
-        d6u8  = vext_u8(d6u8, d7u8, 1);
-        d9u8  = vext_u8(d9u8, d10u8, 1);
-
-        q10u16 = vmlal_u8(q10u16, d3u8, d1u8);
-        q12u16 = vmlal_u8(q12u16, d6u8, d1u8);
-        q14u16 = vmlal_u8(q14u16, d9u8, d1u8);
-
-        q1u16 = vmull_u8(d11u8, d0u8);
-        q2u16 = vmull_u8(d12u8, d0u8);
-        q3u16 = vmull_u8(d14u8, d0u8);
-        q4u16 = vmull_u8(d15u8, d0u8);
-
-        d11u8 = vext_u8(d11u8, d12u8, 1);
-        d14u8 = vext_u8(d14u8, d15u8, 1);
-
-        q1u16 = vmlal_u8(q1u16, d11u8, d1u8);
-        q3u16 = vmlal_u8(q3u16, d14u8, d1u8);
-
-        d12u8 = vext_u8(d12u8, d13u8, 1);
-        d15u8 = vext_u8(d15u8, d16u8, 1);
-
-        q2u16 = vmlal_u8(q2u16, d12u8, d1u8);
-        q4u16 = vmlal_u8(q4u16, d15u8, d1u8);
-
-        d10u8 = vqrshrn_n_u16(q9u16, 7);
-        d11u8 = vqrshrn_n_u16(q10u16, 7);
-        d12u8 = vqrshrn_n_u16(q11u16, 7);
-        d13u8 = vqrshrn_n_u16(q12u16, 7);
-        d14u8 = vqrshrn_n_u16(q13u16, 7);
-        d15u8 = vqrshrn_n_u16(q14u16, 7);
-        d16u8 = vqrshrn_n_u16(q1u16, 7);
-        d17u8 = vqrshrn_n_u16(q2u16, 7);
-        d18u8 = vqrshrn_n_u16(q3u16, 7);
-        d19u8 = vqrshrn_n_u16(q4u16, 7);
-
-        q5u8 = vcombine_u8(d10u8, d11u8);
-        q6u8 = vcombine_u8(d12u8, d13u8);
-        q7u8 = vcombine_u8(d14u8, d15u8);
-        q8u8 = vcombine_u8(d16u8, d17u8);
-        q9u8 = vcombine_u8(d18u8, d19u8);
-
-        vst1q_u8((uint8_t *)tmpp, q5u8);
-        tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q6u8);
-        tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q7u8);
-        tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q8u8);
-        tmpp += 16;
-        vst1q_u8((uint8_t *)tmpp, q9u8);
-
-        // secondpass_filter
-        d0u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][0]);
-        d1u8 = vdup_n_u8(bilinear_taps_coeff[yoffset][1]);
-
-        tmpp = tmp;
-        tmpp2 = tmpp + 272;
-        q11u8 = vld1q_u8(tmpp);
-        tmpp += 16;
-        for (i = 4; i > 0; i--) {
-            q12u8 = vld1q_u8(tmpp);
-            tmpp += 16;
-            q13u8 = vld1q_u8(tmpp);
-            tmpp += 16;
-            q14u8 = vld1q_u8(tmpp);
-            tmpp += 16;
-            q15u8 = vld1q_u8(tmpp);
-            tmpp += 16;
-
-            q1u16 = vmull_u8(vget_low_u8(q11u8), d0u8);
-            q2u16 = vmull_u8(vget_high_u8(q11u8), d0u8);
-            q3u16 = vmull_u8(vget_low_u8(q12u8), d0u8);
-            q4u16 = vmull_u8(vget_high_u8(q12u8), d0u8);
-            q5u16 = vmull_u8(vget_low_u8(q13u8), d0u8);
-            q6u16 = vmull_u8(vget_high_u8(q13u8), d0u8);
-            q7u16 = vmull_u8(vget_low_u8(q14u8), d0u8);
-            q8u16 = vmull_u8(vget_high_u8(q14u8), d0u8);
-
-            q1u16 = vmlal_u8(q1u16, vget_low_u8(q12u8), d1u8);
-            q2u16 = vmlal_u8(q2u16, vget_high_u8(q12u8), d1u8);
-            q3u16 = vmlal_u8(q3u16, vget_low_u8(q13u8), d1u8);
-            q4u16 = vmlal_u8(q4u16, vget_high_u8(q13u8), d1u8);
-            q5u16 = vmlal_u8(q5u16, vget_low_u8(q14u8), d1u8);
-            q6u16 = vmlal_u8(q6u16, vget_high_u8(q14u8), d1u8);
-            q7u16 = vmlal_u8(q7u16, vget_low_u8(q15u8), d1u8);
-            q8u16 = vmlal_u8(q8u16, vget_high_u8(q15u8), d1u8);
-
-            d2u8 = vqrshrn_n_u16(q1u16, 7);
-            d3u8 = vqrshrn_n_u16(q2u16, 7);
-            d4u8 = vqrshrn_n_u16(q3u16, 7);
-            d5u8 = vqrshrn_n_u16(q4u16, 7);
-            d6u8 = vqrshrn_n_u16(q5u16, 7);
-            d7u8 = vqrshrn_n_u16(q6u16, 7);
-            d8u8 = vqrshrn_n_u16(q7u16, 7);
-            d9u8 = vqrshrn_n_u16(q8u16, 7);
-
-            q1u8 = vcombine_u8(d2u8, d3u8);
-            q2u8 = vcombine_u8(d4u8, d5u8);
-            q3u8 = vcombine_u8(d6u8, d7u8);
-            q4u8 = vcombine_u8(d8u8, d9u8);
-
-            q11u8 = q15u8;
-
-            vst1q_u8((uint8_t *)tmpp2, q1u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q2u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q3u8);
-            tmpp2 += 16;
-            vst1q_u8((uint8_t *)tmpp2, q4u8);
-            tmpp2 += 16;
-        }
-    }
-
-    // sub_pixel_variance16x16_neon
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    tmpp = tmp + 272;
-    for (i = 0; i < 8; i++) {  // sub_pixel_variance16x16_neon_loop
-        q0u8 = vld1q_u8(tmpp);
-        tmpp += 16;
-        q1u8 = vld1q_u8(tmpp);
-        tmpp += 16;
-        q2u8 = vld1q_u8(dst_ptr);
-        dst_ptr += dst_pixels_per_line;
-        q3u8 = vld1q_u8(dst_ptr);
-        dst_ptr += dst_pixels_per_line;
-
-        d0u8 = vget_low_u8(q0u8);
-        d1u8 = vget_high_u8(q0u8);
-        d2u8 = vget_low_u8(q1u8);
-        d3u8 = vget_high_u8(q1u8);
-
-        q11u16 = vsubl_u8(d0u8, vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(d1u8, vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(d2u8, vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(d3u8, vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vget_low_s64(q0s64);
-    d1s64 = vget_high_s64(q0s64);
-    d2s64 = vget_low_s64(q1s64);
-    d3s64 = vget_high_s64(q1s64);
-    d0s64 = vadd_s64(d0s64, d1s64);
-    d1s64 = vadd_s64(d2s64, d3s64);
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance_halfpixvar16x16_h_neon(
-        const unsigned char *src_ptr,
-        int  source_stride,
-        const unsigned char *ref_ptr,
-        int  recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
-    int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64, d2s64, d3s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8;
-    uint8x16_t q7u8, q11u8, q12u8, q13u8, q14u8;
-    uint16x8_t q0u16, q1u16, q2u16, q3u16, q4u16, q5u16, q6u16, q7u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 4; i++) {  // vp8_filt_fpo16x16s_4_0_loop_neon
-        q0u8 = vld1q_u8(src_ptr);
-        q1u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q2u8 = vld1q_u8(src_ptr);
-        q3u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q4u8 = vld1q_u8(src_ptr);
-        q5u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q6u8 = vld1q_u8(src_ptr);
-        q7u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-
-        q11u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q12u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q13u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q14u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        q1u8 = vextq_u8(q0u8, q1u8, 1);
-        q3u8 = vextq_u8(q2u8, q3u8, 1);
-        q5u8 = vextq_u8(q4u8, q5u8, 1);
-        q7u8 = vextq_u8(q6u8, q7u8, 1);
-
-        q0u8 = vrhaddq_u8(q0u8, q1u8);
-        q1u8 = vrhaddq_u8(q2u8, q3u8);
-        q2u8 = vrhaddq_u8(q4u8, q5u8);
-        q3u8 = vrhaddq_u8(q6u8, q7u8);
-
-        d0u8 = vget_low_u8(q0u8);
-        d1u8 = vget_high_u8(q0u8);
-        d2u8 = vget_low_u8(q1u8);
-        d3u8 = vget_high_u8(q1u8);
-        d4u8 = vget_low_u8(q2u8);
-        d5u8 = vget_high_u8(q2u8);
-        d6u8 = vget_low_u8(q3u8);
-        d7u8 = vget_high_u8(q3u8);
-
-        q4u16 = vsubl_u8(d0u8, vget_low_u8(q11u8));
-        q5u16 = vsubl_u8(d1u8, vget_high_u8(q11u8));
-        q6u16 = vsubl_u8(d2u8, vget_low_u8(q12u8));
-        q7u16 = vsubl_u8(d3u8, vget_high_u8(q12u8));
-        q0u16 = vsubl_u8(d4u8, vget_low_u8(q13u8));
-        q1u16 = vsubl_u8(d5u8, vget_high_u8(q13u8));
-        q2u16 = vsubl_u8(d6u8, vget_low_u8(q14u8));
-        q3u16 = vsubl_u8(d7u8, vget_high_u8(q14u8));
-
-        d8s16 = vreinterpret_s16_u16(vget_low_u16(q4u16));
-        d9s16 = vreinterpret_s16_u16(vget_high_u16(q4u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q4u16));
-        q9s32 = vmlal_s16(q9s32, d8s16, d8s16);
-        q10s32 = vmlal_s16(q10s32, d9s16, d9s16);
-        d10s16 = vreinterpret_s16_u16(vget_low_u16(q5u16));
-        d11s16 = vreinterpret_s16_u16(vget_high_u16(q5u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q5u16));
-        q9s32 = vmlal_s16(q9s32, d10s16, d10s16);
-        q10s32 = vmlal_s16(q10s32, d11s16, d11s16);
-        d12s16 = vreinterpret_s16_u16(vget_low_u16(q6u16));
-        d13s16 = vreinterpret_s16_u16(vget_high_u16(q6u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q6u16));
-        q9s32 = vmlal_s16(q9s32, d12s16, d12s16);
-        q10s32 = vmlal_s16(q10s32, d13s16, d13s16);
-        d14s16 = vreinterpret_s16_u16(vget_low_u16(q7u16));
-        d15s16 = vreinterpret_s16_u16(vget_high_u16(q7u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q7u16));
-        q9s32 = vmlal_s16(q9s32, d14s16, d14s16);
-        q10s32 = vmlal_s16(q10s32, d15s16, d15s16);
-        d0s16 = vreinterpret_s16_u16(vget_low_u16(q0u16));
-        d1s16 = vreinterpret_s16_u16(vget_high_u16(q0u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q0u16));
-        q9s32 = vmlal_s16(q9s32, d0s16, d0s16);
-        q10s32 = vmlal_s16(q10s32, d1s16, d1s16);
-        d2s16 = vreinterpret_s16_u16(vget_low_u16(q1u16));
-        d3s16 = vreinterpret_s16_u16(vget_high_u16(q1u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q1u16));
-        q9s32 = vmlal_s16(q9s32, d2s16, d2s16);
-        q10s32 = vmlal_s16(q10s32, d3s16, d3s16);
-        d4s16 = vreinterpret_s16_u16(vget_low_u16(q2u16));
-        d5s16 = vreinterpret_s16_u16(vget_high_u16(q2u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q2u16));
-        q9s32 = vmlal_s16(q9s32, d4s16, d4s16);
-        q10s32 = vmlal_s16(q10s32, d5s16, d5s16);
-        d6s16 = vreinterpret_s16_u16(vget_low_u16(q3u16));
-        d7s16 = vreinterpret_s16_u16(vget_high_u16(q3u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q3u16));
-        q9s32 = vmlal_s16(q9s32, d6s16, d6s16);
-        q10s32 = vmlal_s16(q10s32, d7s16, d7s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vget_low_s64(q0s64);
-    d1s64 = vget_high_s64(q0s64);
-    d2s64 = vget_low_s64(q1s64);
-    d3s64 = vget_high_s64(q1s64);
-    d0s64 = vadd_s64(d0s64, d1s64);
-    d1s64 = vadd_s64(d2s64, d3s64);
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance_halfpixvar16x16_v_neon(
-        const unsigned char *src_ptr,
-        int  source_stride,
-        const unsigned char *ref_ptr,
-        int  recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d1u8, d4u8, d5u8, d8u8, d9u8, d12u8, d13u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64, d2s64, d3s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q15u8;
-    uint16x8_t q0u16, q1u16, q2u16, q3u16, q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    q0u8 = vld1q_u8(src_ptr);
-    src_ptr += source_stride;
-    for (i = 0; i < 4; i++) {  // vp8_filt_fpo16x16s_4_0_loop_neon
-        q2u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q4u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q6u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q15u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-
-        q1u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q5u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q7u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        q0u8 = vrhaddq_u8(q0u8, q2u8);
-        q2u8 = vrhaddq_u8(q2u8, q4u8);
-        q4u8 = vrhaddq_u8(q4u8, q6u8);
-        q6u8 = vrhaddq_u8(q6u8, q15u8);
-
-        d0u8  = vget_low_u8(q0u8);
-        d1u8  = vget_high_u8(q0u8);
-        d4u8  = vget_low_u8(q2u8);
-        d5u8  = vget_high_u8(q2u8);
-        d8u8  = vget_low_u8(q4u8);
-        d9u8  = vget_high_u8(q4u8);
-        d12u8 = vget_low_u8(q6u8);
-        d13u8 = vget_high_u8(q6u8);
-
-        q11u16 = vsubl_u8(d0u8, vget_low_u8(q1u8));
-        q12u16 = vsubl_u8(d1u8, vget_high_u8(q1u8));
-        q13u16 = vsubl_u8(d4u8, vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(d5u8, vget_high_u8(q3u8));
-        q0u16  = vsubl_u8(d8u8, vget_low_u8(q5u8));
-        q1u16  = vsubl_u8(d9u8, vget_high_u8(q5u8));
-        q2u16  = vsubl_u8(d12u8, vget_low_u8(q7u8));
-        q3u16  = vsubl_u8(d13u8, vget_high_u8(q7u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-        d0s16 = vreinterpret_s16_u16(vget_low_u16(q0u16));
-        d1s16 = vreinterpret_s16_u16(vget_high_u16(q0u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q0u16));
-        q9s32 = vmlal_s16(q9s32, d0s16, d0s16);
-        q10s32 = vmlal_s16(q10s32, d1s16, d1s16);
-        d2s16 = vreinterpret_s16_u16(vget_low_u16(q1u16));
-        d3s16 = vreinterpret_s16_u16(vget_high_u16(q1u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q1u16));
-        q9s32 = vmlal_s16(q9s32, d2s16, d2s16);
-        q10s32 = vmlal_s16(q10s32, d3s16, d3s16);
-        d4s16 = vreinterpret_s16_u16(vget_low_u16(q2u16));
-        d5s16 = vreinterpret_s16_u16(vget_high_u16(q2u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q2u16));
-        q9s32 = vmlal_s16(q9s32, d4s16, d4s16);
-        q10s32 = vmlal_s16(q10s32, d5s16, d5s16);
-        d6s16 = vreinterpret_s16_u16(vget_low_u16(q3u16));
-        d7s16 = vreinterpret_s16_u16(vget_high_u16(q3u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q3u16));
-        q9s32 = vmlal_s16(q9s32, d6s16, d6s16);
-        q10s32 = vmlal_s16(q10s32, d7s16, d7s16);
-
-        q0u8 = q15u8;
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vget_low_s64(q0s64);
-    d1s64 = vget_high_s64(q0s64);
-    d2s64 = vget_low_s64(q1s64);
-    d3s64 = vget_high_s64(q1s64);
-    d0s64 = vadd_s64(d0s64, d1s64);
-    d1s64 = vadd_s64(d2s64, d3s64);
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance_halfpixvar16x16_hv_neon(
-        const unsigned char *src_ptr,
-        int  source_stride,
-        const unsigned char *ref_ptr,
-        int  recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    int16x4_t d0s16, d1s16, d2s16, d3s16, d10s16, d11s16, d12s16, d13s16;
-    int16x4_t d18s16, d19s16, d20s16, d21s16, d22s16, d23s16, d24s16, d25s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64, d2s64, d3s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8, q4u8, q5u8, q6u8, q7u8, q8u8, q9u8;
-    uint16x8_t q0u16, q1u16, q5u16, q6u16, q9u16, q10u16, q11u16, q12u16;
-    int32x4_t q13s32, q14s32, q15s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q13s32 = vdupq_n_s32(0);
-    q14s32 = vdupq_n_s32(0);
-    q15s32 = vdupq_n_s32(0);
-
-    q0u8 = vld1q_u8(src_ptr);
-    q1u8 = vld1q_u8(src_ptr + 16);
-    src_ptr += source_stride;
-    q1u8 = vextq_u8(q0u8, q1u8, 1);
-    q0u8 = vrhaddq_u8(q0u8, q1u8);
-    for (i = 0; i < 4; i++) {  // vp8_filt_fpo16x16s_4_0_loop_neon
-        q2u8 = vld1q_u8(src_ptr);
-        q3u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q4u8 = vld1q_u8(src_ptr);
-        q5u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q6u8 = vld1q_u8(src_ptr);
-        q7u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-        q8u8 = vld1q_u8(src_ptr);
-        q9u8 = vld1q_u8(src_ptr + 16);
-        src_ptr += source_stride;
-
-        q3u8 = vextq_u8(q2u8, q3u8, 1);
-        q5u8 = vextq_u8(q4u8, q5u8, 1);
-        q7u8 = vextq_u8(q6u8, q7u8, 1);
-        q9u8 = vextq_u8(q8u8, q9u8, 1);
-
-        q1u8 = vrhaddq_u8(q2u8, q3u8);
-        q2u8 = vrhaddq_u8(q4u8, q5u8);
-        q3u8 = vrhaddq_u8(q6u8, q7u8);
-        q4u8 = vrhaddq_u8(q8u8, q9u8);
-        q0u8 = vrhaddq_u8(q0u8, q1u8);
-        q1u8 = vrhaddq_u8(q1u8, q2u8);
-        q2u8 = vrhaddq_u8(q2u8, q3u8);
-        q3u8 = vrhaddq_u8(q3u8, q4u8);
-
-        q5u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q6u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q7u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q8u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        d0u8 = vget_low_u8(q0u8);
-        d1u8 = vget_high_u8(q0u8);
-        d2u8 = vget_low_u8(q1u8);
-        d3u8 = vget_high_u8(q1u8);
-        d4u8 = vget_low_u8(q2u8);
-        d5u8 = vget_high_u8(q2u8);
-        d6u8 = vget_low_u8(q3u8);
-        d7u8 = vget_high_u8(q3u8);
-
-        q9u16  = vsubl_u8(d0u8, vget_low_u8(q5u8));
-        q10u16 = vsubl_u8(d1u8, vget_high_u8(q5u8));
-        q11u16 = vsubl_u8(d2u8, vget_low_u8(q6u8));
-        q12u16 = vsubl_u8(d3u8, vget_high_u8(q6u8));
-        q0u16  = vsubl_u8(d4u8, vget_low_u8(q7u8));
-        q1u16  = vsubl_u8(d5u8, vget_high_u8(q7u8));
-        q5u16  = vsubl_u8(d6u8, vget_low_u8(q8u8));
-        q6u16  = vsubl_u8(d7u8, vget_high_u8(q8u8));
-
-        d18s16 = vreinterpret_s16_u16(vget_low_u16(q9u16));
-        d19s16 = vreinterpret_s16_u16(vget_high_u16(q9u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q9u16));
-        q14s32 = vmlal_s16(q14s32, d18s16, d18s16);
-        q15s32 = vmlal_s16(q15s32, d19s16, d19s16);
-
-        d20s16 = vreinterpret_s16_u16(vget_low_u16(q10u16));
-        d21s16 = vreinterpret_s16_u16(vget_high_u16(q10u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q10u16));
-        q14s32 = vmlal_s16(q14s32, d20s16, d20s16);
-        q15s32 = vmlal_s16(q15s32, d21s16, d21s16);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q11u16));
-        q14s32 = vmlal_s16(q14s32, d22s16, d22s16);
-        q15s32 = vmlal_s16(q15s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q12u16));
-        q14s32 = vmlal_s16(q14s32, d24s16, d24s16);
-        q15s32 = vmlal_s16(q15s32, d25s16, d25s16);
-
-        d0s16 = vreinterpret_s16_u16(vget_low_u16(q0u16));
-        d1s16 = vreinterpret_s16_u16(vget_high_u16(q0u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q0u16));
-        q14s32 = vmlal_s16(q14s32, d0s16, d0s16);
-        q15s32 = vmlal_s16(q15s32, d1s16, d1s16);
-
-        d2s16 = vreinterpret_s16_u16(vget_low_u16(q1u16));
-        d3s16 = vreinterpret_s16_u16(vget_high_u16(q1u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q1u16));
-        q14s32 = vmlal_s16(q14s32, d2s16, d2s16);
-        q15s32 = vmlal_s16(q15s32, d3s16, d3s16);
-
-        d10s16 = vreinterpret_s16_u16(vget_low_u16(q5u16));
-        d11s16 = vreinterpret_s16_u16(vget_high_u16(q5u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q5u16));
-        q14s32 = vmlal_s16(q14s32, d10s16, d10s16);
-        q15s32 = vmlal_s16(q15s32, d11s16, d11s16);
-
-        d12s16 = vreinterpret_s16_u16(vget_low_u16(q6u16));
-        d13s16 = vreinterpret_s16_u16(vget_high_u16(q6u16));
-        q13s32 = vpadalq_s16(q13s32, vreinterpretq_s16_u16(q6u16));
-        q14s32 = vmlal_s16(q14s32, d12s16, d12s16);
-        q15s32 = vmlal_s16(q15s32, d13s16, d13s16);
-
-        q0u8 = q4u8;
-    }
-
-    q15s32 = vaddq_s32(q14s32, q15s32);
-    q0s64 = vpaddlq_s32(q13s32);
-    q1s64 = vpaddlq_s32(q15s32);
-
-    d0s64 = vget_low_s64(q0s64);
-    d1s64 = vget_high_s64(q0s64);
-    d2s64 = vget_low_s64(q1s64);
-    d3s64 = vget_high_s64(q1s64);
-    d0s64 = vadd_s64(d0s64, d1s64);
-    d1s64 = vadd_s64(d2s64, d3s64);
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-enum { kWidth8 = 8 };
-enum { kHeight8 = 8 };
-enum { kHeight8PlusOne = 9 };
-enum { kPixelStepOne = 1 };
-enum { kAlign16 = 16 };
-
-#define FILTER_BITS 7
-
-static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
-  const int32x4_t a = vpaddlq_s16(v_16x8);
-  const int64x2_t b = vpaddlq_s32(a);
-  const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
-                               vreinterpret_s32_s64(vget_high_s64(b)));
-  return vget_lane_s32(c, 0);
-}
-
-static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) {
-  const int64x2_t b = vpaddlq_s32(v_32x4);
-  const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
-                               vreinterpret_s32_s64(vget_high_s64(b)));
-  return vget_lane_s32(c, 0);
-}
-
-static void variance_neon_w8(const uint8_t *a, int a_stride,
-                             const uint8_t *b, int b_stride,
-                             int w, int h, unsigned int *sse, int *sum) {
-  int i, j;
-  int16x8_t v_sum = vdupq_n_s16(0);
-  int32x4_t v_sse_lo = vdupq_n_s32(0);
-  int32x4_t v_sse_hi = vdupq_n_s32(0);
-
-  for (i = 0; i < h; ++i) {
-    for (j = 0; j < w; j += 8) {
-      const uint8x8_t v_a = vld1_u8(&a[j]);
-      const uint8x8_t v_b = vld1_u8(&b[j]);
-      const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
-      const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
-      v_sum = vaddq_s16(v_sum, sv_diff);
-      v_sse_lo = vmlal_s16(v_sse_lo,
-                           vget_low_s16(sv_diff),
-                           vget_low_s16(sv_diff));
-      v_sse_hi = vmlal_s16(v_sse_hi,
-                           vget_high_s16(sv_diff),
-                           vget_high_s16(sv_diff));
-    }
-    a += a_stride;
-    b += b_stride;
-  }
-
-  *sum = horizontal_add_s16x8(v_sum);
-  *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
-}
-
-static unsigned int variance8x8_neon(const uint8_t *a, int a_stride,
-                                     const uint8_t *b, int b_stride,
-                                     unsigned int *sse) {
-  int sum;
-  variance_neon_w8(a, a_stride, b, b_stride, kWidth8, kHeight8, sse, &sum);
-  return *sse - (((int64_t)sum * sum) / (kWidth8 * kHeight8));
-}
-
-static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
-                                      uint8_t *output_ptr,
-                                      unsigned int src_pixels_per_line,
-                                      int pixel_step,
-                                      unsigned int output_height,
-                                      unsigned int output_width,
-                                      const uint16_t *vpx_filter) {
-  const uint8x8_t f0 = vmov_n_u8((uint8_t)vpx_filter[0]);
-  const uint8x8_t f1 = vmov_n_u8((uint8_t)vpx_filter[1]);
-  unsigned int i;
-  for (i = 0; i < output_height; ++i) {
-    const uint8x8_t src_0 = vld1_u8(&src_ptr[0]);
-    const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]);
-    const uint16x8_t a = vmull_u8(src_0, f0);
-    const uint16x8_t b = vmlal_u8(a, src_1, f1);
-    const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS);
-    vst1_u8(&output_ptr[0], out);
-    // Next row...
-    src_ptr += src_pixels_per_line;
-    output_ptr += output_width;
-  }
-}
-
-unsigned int vp8_sub_pixel_variance8x8_neon(
-        const unsigned char *src,
-        int src_stride,
-        int xoffset,
-        int yoffset,
-        const unsigned char *dst,
-        int dst_stride,
-        unsigned int *sse) {
-  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight8 * kWidth8);
-  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight8PlusOne * kWidth8);
-  if (xoffset == 0) {
-    var_filter_block2d_bil_w8(src, temp2, src_stride, kWidth8, kHeight8,
-                              kWidth8, bilinear_taps_coeff[yoffset]);
-  } else if (yoffset == 0) {
-    var_filter_block2d_bil_w8(src, temp2, src_stride, kPixelStepOne,
-                              kHeight8PlusOne, kWidth8,
-                              bilinear_taps_coeff[xoffset]);
-  } else {
-    var_filter_block2d_bil_w8(src, fdata3, src_stride, kPixelStepOne,
-                              kHeight8PlusOne, kWidth8,
-                              bilinear_taps_coeff[xoffset]);
-    var_filter_block2d_bil_w8(fdata3, temp2, kWidth8, kWidth8, kHeight8,
-                              kWidth8, bilinear_taps_coeff[yoffset]);
-  }
-  return variance8x8_neon(temp2, kWidth8, dst, dst_stride, sse);
-}
-
deleted file mode 100644
--- a/media/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
+++ /dev/null
@@ -1,815 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-
-%macro LF_ABS 2
-        ; %1 value not preserved
-        ; %2 value preserved
-        ; output in %1
-        movdqa      scratch1, %2            ; v2
-
-        psubusb     scratch1, %1            ; v2 - v1
-        psubusb     %1, %2                  ; v1 - v2
-        por         %1, scratch1            ; abs(v2 - v1)
-%endmacro
-
-%macro LF_FILTER_HEV_MASK 8-9
-
-        LF_ABS      %1, %2                  ; abs(p3 - p2)
-        LF_ABS      %2, %3                  ; abs(p2 - p1)
-        pmaxub      %1, %2                  ; accumulate mask
-%if %0 == 8
-        movdqa      scratch2, %3            ; save p1
-        LF_ABS      scratch2, %4            ; abs(p1 - p0)
-%endif
-        LF_ABS      %4, %5                  ; abs(p0 - q0)
-        LF_ABS      %5, %6                  ; abs(q0 - q1)
-%if %0 == 8
-        pmaxub      %5, scratch2            ; accumulate hev
-%else
-        pmaxub      %5, %9
-%endif
-        pmaxub      %1, %5                  ; accumulate mask
-
-        LF_ABS      %3, %6                  ; abs(p1 - q1)
-        LF_ABS      %6, %7                  ; abs(q1 - q2)
-        pmaxub      %1, %6                  ; accumulate mask
-        LF_ABS      %7, %8                  ; abs(q2 - q3)
-        pmaxub      %1, %7                  ; accumulate mask
-
-        paddusb     %4, %4                  ; 2 * abs(p0 - q0)
-        pand        %3, [GLOBAL(tfe)]
-        psrlw       %3, 1                   ; abs(p1 - q1) / 2
-        paddusb     %4, %3                  ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
-
-        psubusb     %1, [limit]
-        psubusb     %4, [blimit]
-        por         %1, %4
-        pcmpeqb     %1, zero                ; mask
-
-        psubusb     %5, [thresh]
-        pcmpeqb     %5, zero                ; ~hev
-%endmacro
-
-%macro LF_FILTER 6
-        ; %1-%4: p1-q1
-        ; %5: mask
-        ; %6: hev
-
-        movdqa      scratch2, %6            ; save hev
-
-        pxor        %1, [GLOBAL(t80)]       ; ps1
-        pxor        %4, [GLOBAL(t80)]       ; qs1
-        movdqa      scratch1, %1
-        psubsb      scratch1, %4            ; signed_char_clamp(ps1 - qs1)
-        pandn       scratch2, scratch1      ; vp8_filter &= hev
-
-        pxor        %2, [GLOBAL(t80)]       ; ps0
-        pxor        %3, [GLOBAL(t80)]       ; qs0
-        movdqa      scratch1, %3
-        psubsb      scratch1, %2            ; qs0 - ps0
-        paddsb      scratch2, scratch1      ; vp8_filter += (qs0 - ps0)
-        paddsb      scratch2, scratch1      ; vp8_filter += (qs0 - ps0)
-        paddsb      scratch2, scratch1      ; vp8_filter += (qs0 - ps0)
-        pand        %5, scratch2            ; &= mask
-
-        movdqa      scratch2, %5
-        paddsb      %5, [GLOBAL(t4)]        ; Filter1
-        paddsb      scratch2, [GLOBAL(t3)]  ; Filter2
-
-        ; Filter1 >> 3
-        movdqa      scratch1, zero
-        pcmpgtb     scratch1, %5
-        psrlw       %5, 3
-        pand        scratch1, [GLOBAL(te0)]
-        pand        %5, [GLOBAL(t1f)]
-        por         %5, scratch1
-
-        psubsb      %3, %5                  ; qs0 - Filter1
-        pxor        %3, [GLOBAL(t80)]
-
-        ; Filter2 >> 3
-        movdqa      scratch1, zero
-        pcmpgtb     scratch1, scratch2
-        psrlw       scratch2, 3
-        pand        scratch1, [GLOBAL(te0)]
-        pand        scratch2, [GLOBAL(t1f)]
-        por         scratch2, scratch1
-
-        paddsb      %2, scratch2            ; ps0 + Filter2
-        pxor        %2, [GLOBAL(t80)]
-
-        ; outer tap adjustments
-        paddsb      %5, [GLOBAL(t1)]
-        movdqa      scratch1, zero
-        pcmpgtb     scratch1, %5
-        psrlw       %5, 1
-        pand        scratch1, [GLOBAL(t80)]
-        pand        %5, [GLOBAL(t7f)]
-        por         %5, scratch1
-        pand        %5, %6                  ; vp8_filter &= ~hev
-
-        psubsb      %4, %5                  ; qs1 - vp8_filter
-        pxor        %4, [GLOBAL(t80)]
-
-        paddsb      %1, %5                  ; ps1 + vp8_filter
-        pxor        %1, [GLOBAL(t80)]
-%endmacro
-
-;void vp8_loop_filter_bh_y_sse2
-;(
-;    unsigned char *src_ptr,
-;    int            src_pixel_step,
-;    const char    *blimit,
-;    const char    *limit,
-;    const char    *thresh
-;)
-global sym(vp8_loop_filter_bh_y_sse2) PRIVATE
-sym(vp8_loop_filter_bh_y_sse2):
-
-%if LIBVPX_YASM_WIN64
-    %define src      rcx ; src_ptr
-    %define stride   rdx ; src_pixel_step
-    %define blimit   r8
-    %define limit    r9
-    %define thresh   r10
-
-    %define spp      rax
-    %define stride3  r11
-    %define stride5  r12
-    %define stride7  r13
-
-    push    rbp
-    mov     rbp, rsp
-    SAVE_XMM 11
-    push    r12
-    push    r13
-    mov     thresh, arg(4)
-%else
-    %define src      rdi ; src_ptr
-    %define stride   rsi ; src_pixel_step
-    %define blimit   rdx
-    %define limit    rcx
-    %define thresh   r8
-
-    %define spp      rax
-    %define stride3  r9
-    %define stride5  r10
-    %define stride7  r11
-%endif
-
-    %define scratch1 xmm5
-    %define scratch2 xmm6
-    %define zero     xmm7
-
-    %define i0       [src]
-    %define i1       [spp]
-    %define i2       [src + 2 * stride]
-    %define i3       [spp + 2 * stride]
-    %define i4       [src + 4 * stride]
-    %define i5       [spp + 4 * stride]
-    %define i6       [src + 2 * stride3]
-    %define i7       [spp + 2 * stride3]
-    %define i8       [src + 8 * stride]
-    %define i9       [spp + 8 * stride]
-    %define i10      [src + 2 * stride5]
-    %define i11      [spp + 2 * stride5]
-    %define i12      [src + 4 * stride3]
-    %define i13      [spp + 4 * stride3]
-    %define i14      [src + 2 * stride7]
-    %define i15      [spp + 2 * stride7]
-
-    ; prep work
-    lea         spp, [src + stride]
-    lea         stride3, [stride + 2 * stride]
-    lea         stride5, [stride3 + 2 * stride]
-    lea         stride7, [stride3 + 4 * stride]
-    pxor        zero, zero
-
-        ; load the first set into registers
-        movdqa       xmm0, i0
-        movdqa       xmm1, i1
-        movdqa       xmm2, i2
-        movdqa       xmm3, i3
-        movdqa       xmm4, i4
-        movdqa       xmm8, i5
-        movdqa       xmm9, i6   ; q2, will contain abs(p1-p0)
-        movdqa       xmm10, i7
-LF_FILTER_HEV_MASK xmm0, xmm1, xmm2, xmm3, xmm4, xmm8, xmm9, xmm10
-
-        movdqa       xmm1, i2
-        movdqa       xmm2, i3
-        movdqa       xmm3, i4
-        movdqa       xmm8, i5
-LF_FILTER xmm1, xmm2, xmm3, xmm8, xmm0, xmm4
-        movdqa       i2, xmm1
-        movdqa       i3, xmm2
-
-; second set
-        movdqa       i4, xmm3
-        movdqa       i5, xmm8
-
-        movdqa       xmm0, i6
-        movdqa       xmm1, i7
-        movdqa       xmm2, i8
-        movdqa       xmm4, i9
-        movdqa       xmm10, i10   ; q2, will contain abs(p1-p0)
-        movdqa       xmm11, i11
-LF_FILTER_HEV_MASK xmm3, xmm8, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm9
-
-        movdqa       xmm0, i6
-        movdqa       xmm1, i7
-        movdqa       xmm4, i8
-        movdqa       xmm8, i9
-LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
-        movdqa       i6, xmm0
-        movdqa       i7, xmm1
-
-; last set
-        movdqa       i8, xmm4
-        movdqa       i9, xmm8
-
-        movdqa       xmm0, i10
-        movdqa       xmm1, i11
-        movdqa       xmm2, i12
-        movdqa       xmm3, i13
-        movdqa       xmm9, i14   ; q2, will contain abs(p1-p0)
-        movdqa       xmm11, i15
-LF_FILTER_HEV_MASK xmm4, xmm8, xmm0, xmm1, xmm2, xmm3, xmm9, xmm11, xmm10
-
-        movdqa       xmm0, i10
-        movdqa       xmm1, i11
-        movdqa       xmm3, i12
-        movdqa       xmm8, i13
-LF_FILTER xmm0, xmm1, xmm3, xmm8, xmm4, xmm2
-        movdqa       i10, xmm0
-        movdqa       i11, xmm1
-        movdqa       i12, xmm3
-        movdqa       i13, xmm8
-
-%if LIBVPX_YASM_WIN64
-    pop    r13
-    pop    r12
-    RESTORE_XMM
-    pop    rbp
-%endif
-
-    ret
-
-
-;void vp8_loop_filter_bv_y_sse2
-;(
-;    unsigned char *src_ptr,
-;    int            src_pixel_step,
-;    const char    *blimit,
-;    const char    *limit,
-;    const char    *thresh
-;)
-
-global sym(vp8_loop_filter_bv_y_sse2) PRIVATE
-sym(vp8_loop_filter_bv_y_sse2):
-
-%if LIBVPX_YASM_WIN64
-    %define src      rcx ; src_ptr
-    %define stride   rdx ; src_pixel_step
-    %define blimit   r8
-    %define limit    r9
-    %define thresh   r10
-
-    %define spp      rax
-    %define stride3  r11
-    %define stride5  r12
-    %define stride7  r13
-
-    push    rbp
-    mov     rbp, rsp
-    SAVE_XMM 15
-    push    r12
-    push    r13
-    mov     thresh, arg(4)
-%else
-    %define src      rdi
-    %define stride   rsi
-    %define blimit   rdx
-    %define limit    rcx
-    %define thresh   r8
-
-    %define spp      rax
-    %define stride3  r9
-    %define stride5  r10
-    %define stride7  r11
-%endif
-
-    %define scratch1 xmm5
-    %define scratch2 xmm6
-    %define zero     xmm7
-
-    %define s0       [src]
-    %define s1       [spp]
-    %define s2       [src + 2 * stride]
-    %define s3       [spp + 2 * stride]
-    %define s4       [src + 4 * stride]
-    %define s5       [spp + 4 * stride]
-    %define s6       [src + 2 * stride3]
-    %define s7       [spp + 2 * stride3]
-    %define s8       [src + 8 * stride]
-    %define s9       [spp + 8 * stride]
-    %define s10      [src + 2 * stride5]
-    %define s11      [spp + 2 * stride5]
-    %define s12      [src + 4 * stride3]
-    %define s13      [spp + 4 * stride3]
-    %define s14      [src + 2 * stride7]
-    %define s15      [spp + 2 * stride7]
-
-    %define i0       [rsp]
-    %define i1       [rsp + 16]
-    %define i2       [rsp + 32]
-    %define i3       [rsp + 48]
-    %define i4       [rsp + 64]
-    %define i5       [rsp + 80]
-    %define i6       [rsp + 96]
-    %define i7       [rsp + 112]
-    %define i8       [rsp + 128]
-    %define i9       [rsp + 144]
-    %define i10      [rsp + 160]
-    %define i11      [rsp + 176]
-    %define i12      [rsp + 192]
-    %define i13      [rsp + 208]
-    %define i14      [rsp + 224]
-    %define i15      [rsp + 240]
-
-    ALIGN_STACK 16, rax
-
-    ; reserve stack space
-    %define      temp_storage  0 ; size is 256 (16*16)
-    %define      stack_size 256
-    sub          rsp, stack_size
-
-    ; prep work
-    lea         spp, [src + stride]
-    lea         stride3, [stride + 2 * stride]
-    lea         stride5, [stride3 + 2 * stride]
-    lea         stride7, [stride3 + 4 * stride]
-
-        ; 8-f
-        movdqa      xmm0, s8
-        movdqa      xmm1, xmm0
-        punpcklbw   xmm0, s9                ; 80 90
-        punpckhbw   xmm1, s9                ; 88 98
-
-        movdqa      xmm2, s10
-        movdqa      xmm3, xmm2
-        punpcklbw   xmm2, s11 ; a0 b0
-        punpckhbw   xmm3, s11 ; a8 b8
-
-        movdqa      xmm4, xmm0
-        punpcklwd   xmm0, xmm2              ; 80 90 a0 b0
-        punpckhwd   xmm4, xmm2              ; 84 94 a4 b4
-
-        movdqa      xmm2, xmm1
-        punpcklwd   xmm1, xmm3              ; 88 98 a8 b8
-        punpckhwd   xmm2, xmm3              ; 8c 9c ac bc
-
-        ; using xmm[0124]
-        ; work on next 4 rows
-
-        movdqa      xmm3, s12
-        movdqa      xmm5, xmm3
-        punpcklbw   xmm3, s13 ; c0 d0
-        punpckhbw   xmm5, s13 ; c8 d8
-
-        movdqa      xmm6, s14
-        movdqa      xmm7, xmm6
-        punpcklbw   xmm6, s15 ; e0 f0
-        punpckhbw   xmm7, s15 ; e8 f8
-
-        movdqa      xmm8, xmm3
-        punpcklwd   xmm3, xmm6              ; c0 d0 e0 f0
-        punpckhwd   xmm8, xmm6              ; c4 d4 e4 f4
-
-        movdqa      xmm6, xmm5
-        punpcklwd   xmm5, xmm7              ; c8 d8 e8 f8
-        punpckhwd   xmm6, xmm7              ; cc dc ec fc
-
-        ; pull the third and fourth sets together
-
-        movdqa      xmm7, xmm0
-        punpckldq   xmm0, xmm3              ; 80 90 a0 b0 c0 d0 e0 f0
-        punpckhdq   xmm7, xmm3              ; 82 92 a2 b2 c2 d2 e2 f2
-
-        movdqa      xmm3, xmm4
-        punpckldq   xmm4, xmm8              ; 84 94 a4 b4 c4 d4 e4 f4
-        punpckhdq   xmm3, xmm8              ; 86 96 a6 b6 c6 d6 e6 f6
-
-        movdqa      xmm8, xmm1
-        punpckldq   xmm1, xmm5              ; 88 88 a8 b8 c8 d8 e8 f8
-        punpckhdq   xmm8, xmm5              ; 8a 9a aa ba ca da ea fa
-
-        movdqa      xmm5, xmm2
-        punpckldq   xmm2, xmm6              ; 8c 9c ac bc cc dc ec fc
-        punpckhdq   xmm5, xmm6              ; 8e 9e ae be ce de ee fe
-
-        ; save the calculations. we only have 15 registers ...
-        movdqa      i0, xmm0
-        movdqa      i1, xmm7
-        movdqa      i2, xmm4
-        movdqa      i3, xmm3
-        movdqa      i4, xmm1
-        movdqa      i5, xmm8
-        movdqa      i6, xmm2
-        movdqa      i7, xmm5
-
-        ; 0-7
-        movdqa      xmm0, s0
-        movdqa      xmm1, xmm0
-        punpcklbw   xmm0, s1 ; 00 10
-        punpckhbw   xmm1, s1 ; 08 18
-
-        movdqa      xmm2, s2
-        movdqa      xmm3, xmm2
-        punpcklbw   xmm2, s3 ; 20 30
-        punpckhbw   xmm3, s3 ; 28 38
-
-        movdqa      xmm4, xmm0
-        punpcklwd   xmm0, xmm2              ; 00 10 20 30
-        punpckhwd   xmm4, xmm2              ; 04 14 24 34
-
-        movdqa      xmm2, xmm1
-        punpcklwd   xmm1, xmm3              ; 08 18 28 38
-        punpckhwd   xmm2, xmm3              ; 0c 1c 2c 3c
-
-        ; using xmm[0124]
-        ; work on next 4 rows
-
-        movdqa      xmm3, s4
-        movdqa      xmm5, xmm3
-        punpcklbw   xmm3, s5 ; 40 50
-        punpckhbw   xmm5, s5 ; 48 58
-
-        movdqa      xmm6, s6
-        movdqa      xmm7, xmm6
-        punpcklbw   xmm6, s7   ; 60 70
-        punpckhbw   xmm7, s7   ; 68 78
-
-        movdqa      xmm8, xmm3
-        punpcklwd   xmm3, xmm6              ; 40 50 60 70
-        punpckhwd   xmm8, xmm6              ; 44 54 64 74
-
-        movdqa      xmm6, xmm5
-        punpcklwd   xmm5, xmm7              ; 48 58 68 78
-        punpckhwd   xmm6, xmm7              ; 4c 5c 6c 7c
-
-        ; pull the first two sets together
-
-        movdqa      xmm7, xmm0
-        punpckldq   xmm0, xmm3              ; 00 10 20 30 40 50 60 70
-        punpckhdq   xmm7, xmm3              ; 02 12 22 32 42 52 62 72
-
-        movdqa      xmm3, xmm4
-        punpckldq   xmm4, xmm8              ; 04 14 24 34 44 54 64 74
-        punpckhdq   xmm3, xmm8              ; 06 16 26 36 46 56 66 76
-
-        movdqa      xmm8, xmm1
-        punpckldq   xmm1, xmm5              ; 08 18 28 38 48 58 68 78
-        punpckhdq   xmm8, xmm5              ; 0a 1a 2a 3a 4a 5a 6a 7a
-
-        movdqa      xmm5, xmm2
-        punpckldq   xmm2, xmm6              ; 0c 1c 2c 3c 4c 5c 6c 7c
-        punpckhdq   xmm5, xmm6              ; 0e 1e 2e 3e 4e 5e 6e 7e
-        ; final combination
-
-        movdqa      xmm6, xmm0
-        punpcklqdq  xmm0, i0
-        punpckhqdq  xmm6, i0
-
-        movdqa      xmm9, xmm7
-        punpcklqdq  xmm7, i1
-        punpckhqdq  xmm9, i1
-
-        movdqa      xmm10, xmm4
-        punpcklqdq  xmm4, i2
-        punpckhqdq  xmm10, i2
-
-        movdqa      xmm11, xmm3
-        punpcklqdq  xmm3, i3
-        punpckhqdq  xmm11, i3
-
-        movdqa      xmm12, xmm1
-        punpcklqdq  xmm1, i4
-        punpckhqdq  xmm12, i4
-
-        movdqa      xmm13, xmm8
-        punpcklqdq  xmm8, i5
-        punpckhqdq  xmm13, i5
-
-        movdqa      xmm14, xmm2
-        punpcklqdq  xmm2, i6
-        punpckhqdq  xmm14, i6
-
-        movdqa      xmm15, xmm5
-        punpcklqdq  xmm5, i7
-        punpckhqdq  xmm15, i7
-
-        movdqa      i0, xmm0
-        movdqa      i1, xmm6
-        movdqa      i2, xmm7
-        movdqa      i3, xmm9
-        movdqa      i4, xmm4
-        movdqa      i5, xmm10
-        movdqa      i6, xmm3
-        movdqa      i7, xmm11
-        movdqa      i8, xmm1
-        movdqa      i9, xmm12
-        movdqa      i10, xmm8
-        movdqa      i11, xmm13
-        movdqa      i12, xmm2
-        movdqa      i13, xmm14
-        movdqa      i14, xmm5
-        movdqa      i15, xmm15
-
-; TRANSPOSED DATA AVAILABLE ON THE STACK
-
-        movdqa      xmm12, xmm6
-        movdqa      xmm13, xmm7
-
-        pxor        zero, zero
-
-LF_FILTER_HEV_MASK xmm0, xmm12, xmm13, xmm9, xmm4, xmm10, xmm3, xmm11
-
-        movdqa       xmm1, i2
-        movdqa       xmm2, i3
-        movdqa       xmm8, i4
-        movdqa       xmm9, i5
-LF_FILTER xmm1, xmm2, xmm8, xmm9, xmm0, xmm4
-        movdqa       i2, xmm1
-        movdqa       i3, xmm2
-
-; second set
-        movdqa       i4, xmm8
-        movdqa       i5, xmm9
-
-        movdqa       xmm0, i6
-        movdqa       xmm1, i7
-        movdqa       xmm2, i8
-        movdqa       xmm4, i9
-        movdqa       xmm10, i10   ; q2, will contain abs(p1-p0)
-        movdqa       xmm11, i11
-LF_FILTER_HEV_MASK xmm8, xmm9, xmm0, xmm1, xmm2, xmm4, xmm10, xmm11, xmm3
-
-        movdqa       xmm0, i6
-        movdqa       xmm1, i7
-        movdqa       xmm3, i8
-        movdqa       xmm4, i9
-LF_FILTER xmm0, xmm1, xmm3, xmm4, xmm8, xmm2
-        movdqa       i6, xmm0
-        movdqa       i7, xmm1
-
-; last set
-        movdqa       i8, xmm3
-        movdqa       i9, xmm4
-
-        movdqa       xmm0, i10
-        movdqa       xmm1, i11
-        movdqa       xmm2, i12
-        movdqa       xmm8, i13
-        movdqa       xmm9, i14   ; q2, will contain abs(p1-p0)
-        movdqa       xmm11, i15
-LF_FILTER_HEV_MASK xmm3, xmm4, xmm0, xmm1, xmm2, xmm8, xmm9, xmm11, xmm10
-
-        movdqa       xmm0, i10
-        movdqa       xmm1, i11
-        movdqa       xmm4, i12
-        movdqa       xmm8, i13
-LF_FILTER xmm0, xmm1, xmm4, xmm8, xmm3, xmm2
-        movdqa       i10, xmm0
-        movdqa       i11, xmm1
-        movdqa       i12, xmm4
-        movdqa       i13, xmm8
-
-
-; RESHUFFLE AND WRITE OUT
-        ; 8-f
-        movdqa      xmm0, i8
-        movdqa      xmm1, xmm0
-        punpcklbw   xmm0, i9                ; 80 90
-        punpckhbw   xmm1, i9                ; 88 98
-
-        movdqa      xmm2, i10
-        movdqa      xmm3, xmm2
-        punpcklbw   xmm2, i11               ; a0 b0
-        punpckhbw   xmm3, i11               ; a8 b8
-
-        movdqa      xmm4, xmm0
-        punpcklwd   xmm0, xmm2              ; 80 90 a0 b0
-        punpckhwd   xmm4, xmm2              ; 84 94 a4 b4
-
-        movdqa      xmm2, xmm1
-        punpcklwd   xmm1, xmm3              ; 88 98 a8 b8
-        punpckhwd   xmm2, xmm3              ; 8c 9c ac bc
-
-        ; using xmm[0124]
-        ; work on next 4 rows
-
-        movdqa      xmm3, i12
-        movdqa      xmm5, xmm3
-        punpcklbw   xmm3, i13               ; c0 d0
-        punpckhbw   xmm5, i13               ; c8 d8
-
-        movdqa      xmm6, i14
-        movdqa      xmm7, xmm6
-        punpcklbw   xmm6, i15               ; e0 f0
-        punpckhbw   xmm7, i15               ; e8 f8
-
-        movdqa      xmm8, xmm3
-        punpcklwd   xmm3, xmm6              ; c0 d0 e0 f0
-        punpckhwd   xmm8, xmm6              ; c4 d4 e4 f4
-
-        movdqa      xmm6, xmm5
-        punpcklwd   xmm5, xmm7              ; c8 d8 e8 f8
-        punpckhwd   xmm6, xmm7              ; cc dc ec fc
-
-        ; pull the third and fourth sets together
-
-        movdqa      xmm7, xmm0
-        punpckldq   xmm0, xmm3              ; 80 90 a0 b0 c0 d0 e0 f0
-        punpckhdq   xmm7, xmm3              ; 82 92 a2 b2 c2 d2 e2 f2
-
-        movdqa      xmm3, xmm4
-        punpckldq   xmm4, xmm8              ; 84 94 a4 b4 c4 d4 e4 f4
-        punpckhdq   xmm3, xmm8              ; 86 96 a6 b6 c6 d6 e6 f6
-
-        movdqa      xmm8, xmm1
-        punpckldq   xmm1, xmm5              ; 88 88 a8 b8 c8 d8 e8 f8
-        punpckhdq   xmm8, xmm5              ; 8a 9a aa ba ca da ea fa
-
-        movdqa      xmm5, xmm2
-        punpckldq   xmm2, xmm6              ; 8c 9c ac bc cc dc ec fc
-        punpckhdq   xmm5, xmm6              ; 8e 9e ae be ce de ee fe
-
-        ; save the calculations. we only have 15 registers ...
-        movdqa      i8, xmm0
-        movdqa      i9, xmm7
-        movdqa      i10, xmm4
-        movdqa      i11, xmm3
-        movdqa      i12, xmm1
-        movdqa      i13, xmm8
-        movdqa      i14, xmm2
-        movdqa      i15, xmm5
-
-        ; 0-7
-        movdqa      xmm0, i0
-        movdqa      xmm1, xmm0
-        punpcklbw   xmm0, i1                ; 00 10
-        punpckhbw   xmm1, i1                ; 08 18
-
-        movdqa      xmm2, i2
-        movdqa      xmm3, xmm2
-        punpcklbw   xmm2, i3                ; 20 30
-        punpckhbw   xmm3, i3                ; 28 38
-
-        movdqa      xmm4, xmm0
-        punpcklwd   xmm0, xmm2              ; 00 10 20 30
-        punpckhwd   xmm4, xmm2              ; 04 14 24 34
-
-        movdqa      xmm2, xmm1
-        punpcklwd   xmm1, xmm3              ; 08 18 28 38
-        punpckhwd   xmm2, xmm3              ; 0c 1c 2c 3c
-
-        ; using xmm[0124]
-        ; work on next 4 rows
-
-        movdqa      xmm3, i4
-        movdqa      xmm5, xmm3
-        punpcklbw   xmm3, i5                ; 40 50
-        punpckhbw   xmm5, i5                ; 48 58
-
-        movdqa      xmm6, i6
-        movdqa      xmm7, xmm6
-        punpcklbw   xmm6, i7                ; 60 70
-        punpckhbw   xmm7, i7                ; 68 78
-
-        movdqa      xmm8, xmm3
-        punpcklwd   xmm3, xmm6              ; 40 50 60 70
-        punpckhwd   xmm8, xmm6              ; 44 54 64 74
-
-        movdqa      xmm6, xmm5
-        punpcklwd   xmm5, xmm7              ; 48 58 68 78
-        punpckhwd   xmm6, xmm7              ; 4c 5c 6c 7c
-
-        ; pull the first two sets together
-
-        movdqa      xmm7, xmm0
-        punpckldq   xmm0, xmm3              ; 00 10 20 30 40 50 60 70
-        punpckhdq   xmm7, xmm3              ; 02 12 22 32 42 52 62 72
-
-        movdqa      xmm3, xmm4
-        punpckldq   xmm4, xmm8              ; 04 14 24 34 44 54 64 74
-        punpckhdq   xmm3, xmm8              ; 06 16 26 36 46 56 66 76
-
-        movdqa      xmm8, xmm1
-        punpckldq   xmm1, xmm5              ; 08 18 28 38 48 58 68 78
-        punpckhdq   xmm8, xmm5              ; 0a 1a 2a 3a 4a 5a 6a 7a
-
-        movdqa      xmm5, xmm2
-        punpckldq   xmm2, xmm6              ; 0c 1c 2c 3c 4c 5c 6c 7c
-        punpckhdq   xmm5, xmm6              ; 0e 1e 2e 3e 4e 5e 6e 7e
-        ; final combination
-
-        movdqa      xmm6, xmm0
-        punpcklqdq  xmm0, i8
-        punpckhqdq  xmm6, i8
-
-        movdqa      xmm9, xmm7
-        punpcklqdq  xmm7, i9
-        punpckhqdq  xmm9, i9
-
-        movdqa      xmm10, xmm4
-        punpcklqdq  xmm4, i10
-        punpckhqdq  xmm10, i10
-
-        movdqa      xmm11, xmm3
-        punpcklqdq  xmm3, i11
-        punpckhqdq  xmm11, i11
-
-        movdqa      xmm12, xmm1
-        punpcklqdq  xmm1, i12
-        punpckhqdq  xmm12, i12
-
-        movdqa      xmm13, xmm8
-        punpcklqdq  xmm8, i13
-        punpckhqdq  xmm13, i13
-
-        movdqa      xmm14, xmm2
-        punpcklqdq  xmm2, i14
-        punpckhqdq  xmm14, i14
-
-        movdqa      xmm15, xmm5
-        punpcklqdq  xmm5, i15
-        punpckhqdq  xmm15, i15
-
-        movdqa      s0, xmm0
-        movdqa      s1, xmm6
-        movdqa      s2, xmm7
-        movdqa      s3, xmm9
-        movdqa      s4, xmm4
-        movdqa      s5, xmm10
-        movdqa      s6, xmm3
-        movdqa      s7, xmm11
-        movdqa      s8, xmm1
-        movdqa      s9, xmm12
-        movdqa      s10, xmm8
-        movdqa      s11, xmm13
-        movdqa      s12, xmm2
-        movdqa      s13, xmm14
-        movdqa      s14, xmm5
-        movdqa      s15, xmm15
-
-    ; free stack space
-    add          rsp, stack_size
-
-    ; un-ALIGN_STACK
-    pop          rsp
-
-%if LIBVPX_YASM_WIN64
-    pop    r13
-    pop    r12
-    RESTORE_XMM
-    pop    rbp
-%endif
-
-    ret
-
-SECTION_RODATA
-align 16
-te0:
-    times 16 db 0xe0
-align 16
-t7f:
-    times 16 db 0x7f
-align 16
-tfe:
-    times 16 db 0xfe
-align 16
-t1f:
-    times 16 db 0x1f
-align 16
-t80:
-    times 16 db 0x80
-align 16
-t1:
-    times 16 db 0x01
-align 16
-t3:
-    times 16 db 0x03
-align 16
-t4:
-    times 16 db 0x04
deleted file mode 100644
--- a/media/libvpx/vp8/decoder/decodeframe.c
+++ /dev/null
@@ -1,1397 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include "vpx_config.h"
-#include "vp8_rtcd.h"
-#include "./vpx_scale_rtcd.h"
-#include "onyxd_int.h"
-#include "vp8/common/header.h"
-#include "vp8/common/reconintra4x4.h"
-#include "vp8/common/reconinter.h"
-#include "detokenize.h"
-#include "vp8/common/common.h"
-#include "vp8/common/invtrans.h"
-#include "vp8/common/alloccommon.h"
-#include "vp8/common/entropymode.h"
-#include "vp8/common/quant_common.h"
-#include "vpx_scale/vpx_scale.h"
-#include "vp8/common/setupintrarecon.h"
-
-#include "decodemv.h"
-#include "vp8/common/extend.h"
-#if CONFIG_ERROR_CONCEALMENT
-#include "error_concealment.h"
-#endif
-#include "vpx_mem/vpx_mem.h"
-#include "vp8/common/threading.h"
-#include "decoderthreading.h"
-#include "dboolhuff.h"
-
-#include <assert.h>
-#include <stdio.h>
-
-void vp8cx_init_de_quantizer(VP8D_COMP *pbi)
-{
-    int Q;
-    VP8_COMMON *const pc = & pbi->common;
-
-    for (Q = 0; Q < QINDEX_RANGE; Q++)
-    {
-        pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
-        pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
-        pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
-
-        pc->Y1dequant[Q][1] = (short)vp8_ac_yquant(Q);
-        pc->Y2dequant[Q][1] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
-        pc->UVdequant[Q][1] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
-    }
-}
-
-void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
-{
-    int i;
-    int QIndex;
-    MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi;
-    VP8_COMMON *const pc = & pbi->common;
-
-    /* Decide whether to use the default or alternate baseline Q value. */
-    if (xd->segmentation_enabled)
-    {
-        /* Abs Value */
-        if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA)
-            QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
-
-        /* Delta Value */
-        else
-        {
-            QIndex = pc->base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id];
-            QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0;    /* Clamp to valid range */
-        }
-    }
-    else
-        QIndex = pc->base_qindex;
-
-    /* Set up the macroblock dequant constants */
-    xd->dequant_y1_dc[0] = 1;
-    xd->dequant_y1[0] = pc->Y1dequant[QIndex][0];
-    xd->dequant_y2[0] = pc->Y2dequant[QIndex][0];
-    xd->dequant_uv[0] = pc->UVdequant[QIndex][0];
-
-    for (i = 1; i < 16; i++)
-    {
-        xd->dequant_y1_dc[i] =
-        xd->dequant_y1[i] = pc->Y1dequant[QIndex][1];
-        xd->dequant_y2[i] = pc->Y2dequant[QIndex][1];
-        xd->dequant_uv[i] = pc->UVdequant[QIndex][1];
-    }
-}
-
-static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
-                              unsigned int mb_idx)
-{
-    MB_PREDICTION_MODE mode;
-    int i;
-#if CONFIG_ERROR_CONCEALMENT
-    int corruption_detected = 0;
-#endif
-
-    if (xd->mode_info_context->mbmi.mb_skip_coeff)
-    {
-        vp8_reset_mb_tokens_context(xd);
-    }
-    else if (!vp8dx_bool_error(xd->current_bc))
-    {
-        int eobtotal;
-        eobtotal = vp8_decode_mb_tokens(pbi, xd);
-
-        /* Special case:  Force the loopfilter to skip when eobtotal is zero */
-        xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
-    }
-
-    mode = xd->mode_info_context->mbmi.mode;
-
-    if (xd->segmentation_enabled)
-        vp8_mb_init_dequantizer(pbi, xd);
-
-
-#if CONFIG_ERROR_CONCEALMENT
-
-    if(pbi->ec_active)
-    {
-        int throw_residual;
-        /* When we have independent partitions we can apply residual even
-         * though other partitions within the frame are corrupt.
-         */
-        throw_residual = (!pbi->independent_partitions &&
-                          pbi->frame_corrupt_residual);
-        throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
-
-        if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
-        {
-            /* MB with corrupt residuals or corrupt mode/motion vectors.
-             * Better to use the predictor as reconstruction.
-             */
-            pbi->frame_corrupt_residual = 1;
-            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
-            vp8_conceal_corrupt_mb(xd);
-
-
-            corruption_detected = 1;
-
-            /* force idct to be skipped for B_PRED and use the
-             * prediction only for reconstruction
-             * */
-            vpx_memset(xd->eobs, 0, 25);
-        }
-    }
-#endif
-
-    /* do prediction */
-    if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
-    {
-        vp8_build_intra_predictors_mbuv_s(xd,
-                                          xd->recon_above[1],
-                                          xd->recon_above[2],
-                                          xd->recon_left[1],
-                                          xd->recon_left[2],
-                                          xd->recon_left_stride[1],
-                                          xd->dst.u_buffer, xd->dst.v_buffer,
-                                          xd->dst.uv_stride);
-
-        if (mode != B_PRED)
-        {
-            vp8_build_intra_predictors_mby_s(xd,
-                                                 xd->recon_above[0],
-                                                 xd->recon_left[0],
-                                                 xd->recon_left_stride[0],
-                                                 xd->dst.y_buffer,
-                                                 xd->dst.y_stride);
-        }
-        else
-        {
-            short *DQC = xd->dequant_y1;
-            int dst_stride = xd->dst.y_stride;
-
-            /* clear out residual eob info */
-            if(xd->mode_info_context->mbmi.mb_skip_coeff)
-                vpx_memset(xd->eobs, 0, 25);
-
-            intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
-
-            for (i = 0; i < 16; i++)
-            {
-                BLOCKD *b = &xd->block[i];
-                unsigned char *dst = xd->dst.y_buffer + b->offset;
-                B_PREDICTION_MODE b_mode =
-                    xd->mode_info_context->bmi[i].as_mode;
-                unsigned char *Above = dst - dst_stride;
-                unsigned char *yleft = dst - 1;
-                int left_stride = dst_stride;
-                unsigned char top_left = Above[-1];
-
-                vp8_intra4x4_predict(Above, yleft, left_stride, b_mode,
-                                     dst, dst_stride, top_left);
-
-                if (xd->eobs[i])
-                {
-                    if (xd->eobs[i] > 1)
-                    {
-                    vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
-                    }
-                    else
-                    {
-                        vp8_dc_only_idct_add
-                            (b->qcoeff[0] * DQC[0],
-                                dst, dst_stride,
-                                dst, dst_stride);
-                        vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
-                    }
-                }
-            }
-        }
-    }
-    else
-    {
-        vp8_build_inter_predictors_mb(xd);
-    }
-
-
-#if CONFIG_ERROR_CONCEALMENT
-    if (corruption_detected)
-    {
-        return;
-    }
-#endif
-
-    if(!xd->mode_info_context->mbmi.mb_skip_coeff)
-    {
-        /* dequantization and idct */
-        if (mode != B_PRED)
-        {
-            short *DQC = xd->dequant_y1;
-
-            if (mode != SPLITMV)
-            {
-                BLOCKD *b = &xd->block[24];
-
-                /* do 2nd order transform on the dc block */
-                if (xd->eobs[24] > 1)
-                {
-                    vp8_dequantize_b(b, xd->dequant_y2);
-
-                    vp8_short_inv_walsh4x4(&b->dqcoeff[0],
-                        xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
-                }
-                else
-                {
-                    b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
-                    vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
-                        xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
-                }
-
-                /* override the dc dequant constant in order to preserve the
-                 * dc components
-                 */
-                DQC = xd->dequant_y1_dc;
-            }
-
-            vp8_dequant_idct_add_y_block
-                            (xd->qcoeff, DQC,
-                             xd->dst.y_buffer,
-                             xd->dst.y_stride, xd->eobs);
-        }
-
-        vp8_dequant_idct_add_uv_block
-                        (xd->qcoeff+16*16, xd->dequant_uv,
-                         xd->dst.u_buffer, xd->dst.v_buffer,
-                         xd->dst.uv_stride, xd->eobs+16);
-    }
-}
-
-static int get_delta_q(vp8_reader *bc, int prev, int *q_update)
-{
-    int ret_val = 0;
-
-    if (vp8_read_bit(bc))
-    {
-        ret_val = vp8_read_literal(bc, 4);
-
-        if (vp8_read_bit(bc))
-            ret_val = -ret_val;
-    }
-
-    /* Trigger a quantizer update if the delta-q value has changed */
-    if (ret_val != prev)
-        *q_update = 1;
-
-    return ret_val;
-}
-
-#ifdef PACKET_TESTING
-#include <stdio.h>
-FILE *vpxlog = 0;
-#endif
-
-static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf)
-{
-    int i;
-    unsigned char *src_ptr1;
-    unsigned char *dest_ptr1;
-
-    unsigned int Border;
-    int plane_stride;
-
-    /***********/
-    /* Y Plane */
-    /***********/
-    Border = ybf->border;
-    plane_stride = ybf->y_stride;
-    src_ptr1 = ybf->y_buffer - Border;
-    dest_ptr1 = src_ptr1 - (Border * plane_stride);
-
-    for (i = 0; i < (int)Border; i++)
-    {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
-        dest_ptr1 += plane_stride;
-    }
-
-
-    /***********/
-    /* U Plane */
-    /***********/
-    plane_stride = ybf->uv_stride;
-    Border /= 2;
-    src_ptr1 = ybf->u_buffer - Border;
-    dest_ptr1 = src_ptr1 - (Border * plane_stride);
-
-    for (i = 0; i < (int)(Border); i++)
-    {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
-        dest_ptr1 += plane_stride;
-    }
-
-    /***********/
-    /* V Plane */
-    /***********/
-
-    src_ptr1 = ybf->v_buffer - Border;
-    dest_ptr1 = src_ptr1 - (Border * plane_stride);
-
-    for (i = 0; i < (int)(Border); i++)
-    {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
-        dest_ptr1 += plane_stride;
-    }
-}
-
-static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
-{
-    int i;
-    unsigned char *src_ptr1, *src_ptr2;
-    unsigned char *dest_ptr2;
-
-    unsigned int Border;
-    int plane_stride;
-    int plane_height;
-
-    /***********/
-    /* Y Plane */
-    /***********/
-    Border = ybf->border;
-    plane_stride = ybf->y_stride;
-    plane_height = ybf->y_height;
-
-    src_ptr1 = ybf->y_buffer - Border;
-    src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
-    dest_ptr2 = src_ptr2 + plane_stride;
-
-    for (i = 0; i < (int)Border; i++)
-    {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
-        dest_ptr2 += plane_stride;
-    }
-
-
-    /***********/
-    /* U Plane */
-    /***********/
-    plane_stride = ybf->uv_stride;
-    plane_height = ybf->uv_height;
-    Border /= 2;
-
-    src_ptr1 = ybf->u_buffer - Border;
-    src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
-    dest_ptr2 = src_ptr2 + plane_stride;
-
-    for (i = 0; i < (int)(Border); i++)
-    {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
-        dest_ptr2 += plane_stride;
-    }
-
-    /***********/
-    /* V Plane */
-    /***********/
-
-    src_ptr1 = ybf->v_buffer - Border;
-    src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
-    dest_ptr2 = src_ptr2 + plane_stride;
-
-    for (i = 0; i < (int)(Border); i++)
-    {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
-        dest_ptr2 += plane_stride;
-    }
-}
-
-static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
-                                           unsigned char *y_src,
-                                           unsigned char *u_src,
-                                           unsigned char *v_src)
-{
-    int i;
-    unsigned char *src_ptr1, *src_ptr2;
-    unsigned char *dest_ptr1, *dest_ptr2;
-
-    unsigned int Border;
-    int plane_stride;
-    int plane_height;
-    int plane_width;
-
-    /***********/
-    /* Y Plane */
-    /***********/
-    Border = ybf->border;
-    plane_stride = ybf->y_stride;
-    plane_height = 16;
-    plane_width = ybf->y_width;
-
-    /* copy the left and right most columns out */
-    src_ptr1 = y_src;
-    src_ptr2 = src_ptr1 + plane_width - 1;
-    dest_ptr1 = src_ptr1 - Border;
-    dest_ptr2 = src_ptr2 + 1;
-
-    for (i = 0; i < plane_height; i++)
-    {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
-        src_ptr1  += plane_stride;
-        src_ptr2  += plane_stride;
-        dest_ptr1 += plane_stride;
-        dest_ptr2 += plane_stride;
-    }
-
-    /***********/
-    /* U Plane */
-    /***********/
-    plane_stride = ybf->uv_stride;
-    plane_height = 8;
-    plane_width = ybf->uv_width;
-    Border /= 2;
-
-    /* copy the left and right most columns out */
-    src_ptr1 = u_src;
-    src_ptr2 = src_ptr1 + plane_width - 1;
-    dest_ptr1 = src_ptr1 - Border;
-    dest_ptr2 = src_ptr2 + 1;
-
-    for (i = 0; i < plane_height; i++)
-    {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
-        src_ptr1  += plane_stride;
-        src_ptr2  += plane_stride;
-        dest_ptr1 += plane_stride;
-        dest_ptr2 += plane_stride;
-    }
-
-    /***********/
-    /* V Plane */
-    /***********/
-
-    /* copy the left and right most columns out */
-    src_ptr1 = v_src;
-    src_ptr2 = src_ptr1 + plane_width - 1;
-    dest_ptr1 = src_ptr1 - Border;
-    dest_ptr2 = src_ptr2 + 1;
-
-    for (i = 0; i < plane_height; i++)
-    {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
-        src_ptr1  += plane_stride;
-        src_ptr2  += plane_stride;
-        dest_ptr1 += plane_stride;
-        dest_ptr2 += plane_stride;
-    }
-}
-
-static void decode_mb_rows(VP8D_COMP *pbi)
-{
-    VP8_COMMON *const pc = & pbi->common;
-    MACROBLOCKD *const xd  = & pbi->mb;
-
-    MODE_INFO *lf_mic = xd->mode_info_context;
-
-    int ibc = 0;
-    int num_part = 1 << pc->multi_token_partition;
-
-    int recon_yoffset, recon_uvoffset;
-    int mb_row, mb_col;
-    int mb_idx = 0;
-
-    YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
-
-    int recon_y_stride = yv12_fb_new->y_stride;
-    int recon_uv_stride = yv12_fb_new->uv_stride;
-
-    unsigned char *ref_buffer[MAX_REF_FRAMES][3];
-    unsigned char *dst_buffer[3];
-    unsigned char *lf_dst[3];
-    unsigned char *eb_dst[3];
-    int i;
-    int ref_fb_corrupted[MAX_REF_FRAMES];
-
-    ref_fb_corrupted[INTRA_FRAME] = 0;
-
-    for(i = 1; i < MAX_REF_FRAMES; i++)
-    {
-        YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i];
-
-        ref_buffer[i][0] = this_fb->y_buffer;
-        ref_buffer[i][1] = this_fb->u_buffer;
-        ref_buffer[i][2] = this_fb->v_buffer;
-
-        ref_fb_corrupted[i] = this_fb->corrupted;
-    }
-
-    /* Set up the buffer pointers */
-    eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer;
-    eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer;
-    eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer;
-
-    xd->up_available = 0;
-
-    /* Initialize the loop filter for this frame. */
-    if(pc->filter_level)
-        vp8_loop_filter_frame_init(pc, xd, pc->filter_level);
-
-    vp8_setup_intra_recon_top_line(yv12_fb_new);
-
-    /* Decode the individual macro block */
-    for (mb_row = 0; mb_row < pc->mb_rows; mb_row++)
-    {
-        if (num_part > 1)
-        {
-            xd->current_bc = & pbi->mbc[ibc];
-            ibc++;
-
-            if (ibc == num_part)
-                ibc = 0;
-        }
-
-        recon_yoffset = mb_row * recon_y_stride * 16;
-        recon_uvoffset = mb_row * recon_uv_stride * 8;
-
-        /* reset contexts */
-        xd->above_context = pc->above_context;
-        vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
-
-        xd->left_available = 0;
-
-        xd->mb_to_top_edge = -((mb_row * 16) << 3);
-        xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
-
-        xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
-        xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
-        xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
-
-        xd->recon_left[0] = xd->recon_above[0] - 1;
-        xd->recon_left[1] = xd->recon_above[1] - 1;
-        xd->recon_left[2] = xd->recon_above[2] - 1;
-
-        xd->recon_above[0] -= xd->dst.y_stride;
-        xd->recon_above[1] -= xd->dst.uv_stride;
-        xd->recon_above[2] -= xd->dst.uv_stride;
-
-        /* TODO: move to outside row loop */
-        xd->recon_left_stride[0] = xd->dst.y_stride;
-        xd->recon_left_stride[1] = xd->dst.uv_stride;
-
-        setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1],
-                               xd->recon_left[2], xd->dst.y_stride,
-                               xd->dst.uv_stride);
-
-        for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
-        {
-            /* Distance of Mb to the various image edges.
-             * These are specified to 8th pel as they are always compared to values
-             * that are in 1/8th pel units
-             */
-            xd->mb_to_left_edge = -((mb_col * 16) << 3);
-            xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
-
-#if CONFIG_ERROR_CONCEALMENT
-            {
-                int corrupt_residual = (!pbi->independent_partitions &&
-                                       pbi->frame_corrupt_residual) ||
-                                       vp8dx_bool_error(xd->current_bc);
-                if (pbi->ec_active &&
-                    xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME &&
-                    corrupt_residual)
-                {
-                    /* We have an intra block with corrupt coefficients, better to
-                     * conceal with an inter block. Interpolate MVs from neighboring
-                     * MBs.
-                     *
-                     * Note that for the first mb with corrupt residual in a frame,
-                     * we might not discover that before decoding the residual. That
-                     * happens after this check, and therefore no inter concealment
-                     * will be done.
-                     */
-                    vp8_interpolate_motion(xd,
-                                           mb_row, mb_col,
-                                           pc->mb_rows, pc->mb_cols,
-                                           pc->mode_info_stride);
-                }
-            }
-#endif
-
-            xd->dst.y_buffer = dst_buffer[0] + recon_yoffset;
-            xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset;
-            xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset;
-
-            if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) {
-              MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame;
-              xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset;
-              xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset;
-              xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset;
-            } else {
-              // ref_frame is INTRA_FRAME, pre buffer should not be used.
-              xd->pre.y_buffer = 0;
-              xd->pre.u_buffer = 0;
-              xd->pre.v_buffer = 0;
-            }
-
-            /* propagate errors from reference frames */
-            xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
-
-            decode_macroblock(pbi, xd, mb_idx);
-
-            mb_idx++;
-            xd->left_available = 1;
-
-            /* check if the boolean decoder has suffered an error */
-            xd->corrupted |= vp8dx_bool_error(xd->current_bc);
-
-            xd->recon_above[0] += 16;
-            xd->recon_above[1] += 8;
-            xd->recon_above[2] += 8;
-            xd->recon_left[0] += 16;
-            xd->recon_left[1] += 8;
-            xd->recon_left[2] += 8;
-
-            recon_yoffset += 16;
-            recon_uvoffset += 8;
-
-            ++xd->mode_info_context;  /* next mb */
-
-            xd->above_context++;
-        }
-
-        /* adjust to the next row of mbs */
-        vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16,
-                          xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
-
-        ++xd->mode_info_context;      /* skip prediction column */
-        xd->up_available = 1;
-
-        if(pc->filter_level)
-        {
-            if(mb_row > 0)
-            {
-                if (pc->filter_type == NORMAL_LOOPFILTER)
-                    vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1,
-                                               recon_y_stride, recon_uv_stride,
-                                               lf_dst[0], lf_dst[1], lf_dst[2]);
-                else
-                    vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1,
-                                               recon_y_stride, recon_uv_stride,
-                                               lf_dst[0], lf_dst[1], lf_dst[2]);
-                if(mb_row > 1)
-                {
-                    yv12_extend_frame_left_right_c(yv12_fb_new,
-                                                   eb_dst[0],
-                                                   eb_dst[1],
-                                                   eb_dst[2]);
-
-                    eb_dst[0] += recon_y_stride  * 16;
-                    eb_dst[1] += recon_uv_stride *  8;
-                    eb_dst[2] += recon_uv_stride *  8;
-                }
-
-                lf_dst[0] += recon_y_stride  * 16;
-                lf_dst[1] += recon_uv_stride *  8;
-                lf_dst[2] += recon_uv_stride *  8;
-                lf_mic += pc->mb_cols;
-                lf_mic++;         /* Skip border mb */
-            }
-        }
-        else
-        {
-            if(mb_row > 0)
-            {
-                /**/
-                yv12_extend_frame_left_right_c(yv12_fb_new,
-                                               eb_dst[0],
-                                               eb_dst[1],
-                                               eb_dst[2]);
-                eb_dst[0] += recon_y_stride  * 16;
-                eb_dst[1] += recon_uv_stride *  8;
-                eb_dst[2] += recon_uv_stride *  8;
-            }
-        }
-    }
-
-    if(pc->filter_level)
-    {
-        if (pc->filter_type == NORMAL_LOOPFILTER)
-            vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride,
-                                       recon_uv_stride, lf_dst[0], lf_dst[1],
-                                       lf_dst[2]);
-        else
-            vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride,
-                                       recon_uv_stride, lf_dst[0], lf_dst[1],
-                                       lf_dst[2]);
-
-        yv12_extend_frame_left_right_c(yv12_fb_new,
-                                       eb_dst[0],
-                                       eb_dst[1],
-                                       eb_dst[2]);
-        eb_dst[0] += recon_y_stride  * 16;
-        eb_dst[1] += recon_uv_stride *  8;
-        eb_dst[2] += recon_uv_stride *  8;
-    }
-    yv12_extend_frame_left_right_c(yv12_fb_new,
-                                   eb_dst[0],
-                                   eb_dst[1],
-                                   eb_dst[2]);
-    yv12_extend_frame_top_c(yv12_fb_new);
-    yv12_extend_frame_bottom_c(yv12_fb_new);
-
-}
-
-static unsigned int read_partition_size(VP8D_COMP *pbi,
-                                        const unsigned char *cx_size)
-{
-    unsigned char temp[3];
-    if (pbi->decrypt_cb)
-    {
-        pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3);
-        cx_size = temp;
-    }
-    return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
-}
-
-static int read_is_valid(const unsigned char *start,
-                         size_t               len,
-                         const unsigned char *end)
-{
-    return (start + len > start && start + len <= end);
-}
-
-static unsigned int read_available_partition_size(
-                                       VP8D_COMP *pbi,
-                                       const unsigned char *token_part_sizes,
-                                       const unsigned char *fragment_start,
-                                       const unsigned char *first_fragment_end,
-                                       const unsigned char *fragment_end,
-                                       int i,
-                                       int num_part)
-{
-    VP8_COMMON* pc = &pbi->common;
-    const unsigned char *partition_size_ptr = token_part_sizes + i * 3;
-    unsigned int partition_size = 0;
-    ptrdiff_t bytes_left = fragment_end - fragment_start;
-    /* Calculate the length of this partition. The last partition
-     * size is implicit. If the partition size can't be read, then
-     * either use the remaining data in the buffer (for EC mode)
-     * or throw an error.
-     */
-    if (i < num_part - 1)
-    {
-        if (read_is_valid(partition_size_ptr, 3, first_fragment_end))
-            partition_size = read_partition_size(pbi, partition_size_ptr);
-        else if (pbi->ec_active)
-            partition_size = (unsigned int)bytes_left;
-        else
-            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                               "Truncated partition size data");
-    }
-    else
-        partition_size = (unsigned int)bytes_left;
-
-    /* Validate the calculated partition length. If the buffer
-     * described by the partition can't be fully read, then restrict
-     * it to the portion that can be (for EC mode) or throw an error.
-     */
-    if (!read_is_valid(fragment_start, partition_size, fragment_end))
-    {
-        if (pbi->ec_active)
-            partition_size = (unsigned int)bytes_left;
-        else
-            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                               "Truncated packet or corrupt partition "
-                               "%d length", i + 1);
-    }
-    return partition_size;
-}
-
-
-static void setup_token_decoder(VP8D_COMP *pbi,
-                                const unsigned char* token_part_sizes)
-{
-    vp8_reader *bool_decoder = &pbi->mbc[0];
-    unsigned int partition_idx;
-    unsigned int fragment_idx;
-    unsigned int num_token_partitions;
-    const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] +
-                                          pbi->fragments.sizes[0];
-
-    TOKEN_PARTITION multi_token_partition =
-            (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2);
-    if (!vp8dx_bool_error(&pbi->mbc[8]))
-        pbi->common.multi_token_partition = multi_token_partition;
-    num_token_partitions = 1 << pbi->common.multi_token_partition;
-
-    /* Check for partitions within the fragments and unpack the fragments
-     * so that each fragment pointer points to its corresponding partition. */
-    for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx)
-    {
-        unsigned int fragment_size = pbi->fragments.sizes[fragment_idx];
-        const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] +
-                                            fragment_size;
-        /* Special case for handling the first partition since we have already
-         * read its size. */
-        if (fragment_idx == 0)
-        {
-            /* Size of first partition + token partition sizes element */
-            ptrdiff_t ext_first_part_size = token_part_sizes -
-                pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1);
-            fragment_size -= (unsigned int)ext_first_part_size;
-            if (fragment_size > 0)
-            {
-                pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size;
-                /* The fragment contains an additional partition. Move to
-                 * next. */
-                fragment_idx++;
-                pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] +
-                  pbi->fragments.sizes[0];
-            }
-        }
-        /* Split the chunk into partitions read from the bitstream */
-        while (fragment_size > 0)
-        {
-            ptrdiff_t partition_size = read_available_partition_size(
-                                                 pbi,
-                                                 token_part_sizes,
-                                                 pbi->fragments.ptrs[fragment_idx],
-                                                 first_fragment_end,
-                                                 fragment_end,
-                                                 fragment_idx - 1,
-                                                 num_token_partitions);
-            pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size;
-            fragment_size -= (unsigned int)partition_size;
-            assert(fragment_idx <= num_token_partitions);
-            if (fragment_size > 0)
-            {
-                /* The fragment contains an additional partition.
-                 * Move to next. */
-                fragment_idx++;
-                pbi->fragments.ptrs[fragment_idx] =
-                    pbi->fragments.ptrs[fragment_idx - 1] + partition_size;
-            }
-        }
-    }
-
-    pbi->fragments.count = num_token_partitions + 1;
-
-    for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx)
-    {
-        if (vp8dx_start_decode(bool_decoder,
-                               pbi->fragments.ptrs[partition_idx],
-                               pbi->fragments.sizes[partition_idx],
-                               pbi->decrypt_cb, pbi->decrypt_state))
-            vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
-                               "Failed to allocate bool decoder %d",
-                               partition_idx);
-
-        bool_decoder++;
-    }
-
-#if CONFIG_MULTITHREAD
-    /* Clamp number of decoder threads */
-    if (pbi->decoding_thread_count > num_token_partitions - 1)
-        pbi->decoding_thread_count = num_token_partitions - 1;
-#endif
-}
-
-
-static void init_frame(VP8D_COMP *pbi)
-{
-    VP8_COMMON *const pc = & pbi->common;
-    MACROBLOCKD *const xd  = & pbi->mb;
-
-    if (pc->frame_type == KEY_FRAME)
-    {
-        /* Various keyframe initializations */
-        vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
-
-        vp8_init_mbmode_probs(pc);
-
-        vp8_default_coef_probs(pc);
-
-        /* reset the segment feature data to 0 with delta coding (Default state). */
-        vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
-        xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
-
-        /* reset the mode ref deltasa for loop filter */
-        vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
-        vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
-
-        /* All buffers are implicitly updated on key frames. */
-        pc->refresh_golden_frame = 1;
-        pc->refresh_alt_ref_frame = 1;
-        pc->copy_buffer_to_gf = 0;
-        pc->copy_buffer_to_arf = 0;
-
-        /* Note that Golden and Altref modes cannot be used on a key frame so
-         * ref_frame_sign_bias[] is undefined and meaningless
-         */
-        pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
-        pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
-    }
-    else
-    {
-        /* To enable choice of different interploation filters */
-        if (!pc->use_bilinear_mc_filter)
-        {
-            xd->subpixel_predict        = vp8_sixtap_predict4x4;
-            xd->subpixel_predict8x4     = vp8_sixtap_predict8x4;
-            xd->subpixel_predict8x8     = vp8_sixtap_predict8x8;
-            xd->subpixel_predict16x16   = vp8_sixtap_predict16x16;
-        }
-        else
-        {
-            xd->subpixel_predict        = vp8_bilinear_predict4x4;
-            xd->subpixel_predict8x4     = vp8_bilinear_predict8x4;
-            xd->subpixel_predict8x8     = vp8_bilinear_predict8x8;
-            xd->subpixel_predict16x16   = vp8_bilinear_predict16x16;
-        }
-
-        if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active)
-            pbi->ec_active = 1;
-    }
-
-    xd->left_context = &pc->left_context;
-    xd->mode_info_context = pc->mi;
-    xd->frame_type = pc->frame_type;
-    xd->mode_info_context->mbmi.mode = DC_PRED;
-    xd->mode_info_stride = pc->mode_info_stride;
-    xd->corrupted = 0; /* init without corruption */
-
-    xd->fullpixel_mask = 0xffffffff;
-    if(pc->full_pixel)
-        xd->fullpixel_mask = 0xfffffff8;
-
-}
-
-int vp8_decode_frame(VP8D_COMP *pbi)
-{
-    vp8_reader *const bc = &pbi->mbc[8];
-    VP8_COMMON *const pc = &pbi->common;
-    MACROBLOCKD *const xd  = &pbi->mb;
-    const unsigned char *data = pbi->fragments.ptrs[0];
-    const unsigned char *data_end =  data + pbi->fragments.sizes[0];
-    ptrdiff_t first_partition_length_in_bytes;
-
-    int i, j, k, l;
-    const int *const mb_feature_data_bits = vp8_mb_feature_data_bits;
-    int corrupt_tokens = 0;
-    int prev_independent_partitions = pbi->independent_partitions;
-
-    YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME];
-
-    /* start with no corruption of current frame */
-    xd->corrupted = 0;
-    yv12_fb_new->corrupted = 0;
-
-    if (data_end - data < 3)
-    {
-        if (!pbi->ec_active)
-        {
-            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                               "Truncated packet");
-        }
-
-        /* Declare the missing frame as an inter frame since it will
-           be handled as an inter frame when we have estimated its
-           motion vectors. */
-        pc->frame_type = INTER_FRAME;
-        pc->version = 0;
-        pc->show_frame = 1;
-        first_partition_length_in_bytes = 0;
-    }
-    else
-    {
-        unsigned char clear_buffer[10];
-        const unsigned char *clear = data;
-        if (pbi->decrypt_cb)
-        {
-            int n = (int)MIN(sizeof(clear_buffer), data_end - data);
-            pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n);
-            clear = clear_buffer;
-        }
-
-        pc->frame_type = (FRAME_TYPE)(clear[0] & 1);
-        pc->version = (clear[0] >> 1) & 7;
-        pc->show_frame = (clear[0] >> 4) & 1;
-        first_partition_length_in_bytes =
-            (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5;
-
-        if (!pbi->ec_active &&
-            (data + first_partition_length_in_bytes > data_end
-            || data + first_partition_length_in_bytes < data))
-            vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                               "Truncated packet or corrupt partition 0 length");
-
-        data += 3;
-        clear += 3;
-
-        vp8_setup_version(pc);
-
-
-        if (pc->frame_type == KEY_FRAME)
-        {
-            /* vet via sync code */
-            /* When error concealment is enabled we should only check the sync
-             * code if we have enough bits available
-             */
-            if (!pbi->ec_active || data + 3 < data_end)
-            {
-                if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a)
-                    vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
-                                   "Invalid frame sync code");
-            }
-
-            /* If error concealment is enabled we should only parse the new size
-             * if we have enough data. Otherwise we will end up with the wrong
-             * size.
-             */
-            if (!pbi->ec_active || data + 6 < data_end)
-            {
-                pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff;
-                pc->horiz_scale = clear[4] >> 6;
-                pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff;
-                pc->vert_scale = clear[6] >> 6;
-            }
-            data += 7;
-            clear += 7;
-        }
-        else
-        {
-          vpx_memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
-          vpx_memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
-        }
-    }
-    if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME))
-    {
-        return -1;
-    }
-
-    init_frame(pbi);
-
-    if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data),
-                           pbi->decrypt_cb, pbi->decrypt_state))
-        vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
-                           "Failed to allocate bool decoder 0");
-    if (pc->frame_type == KEY_FRAME) {
-        (void)vp8_read_bit(bc);  // colorspace
-        pc->clamp_type  = (CLAMP_TYPE)vp8_read_bit(bc);
-    }
-
-    /* Is segmentation enabled */
-    xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc);
-
-    if (xd->segmentation_enabled)
-    {
-        /* Signal whether or not the segmentation map is being explicitly updated this frame. */
-        xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc);
-        xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
-
-        if (xd->update_mb_segmentation_data)
-        {
-            xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc);
-
-            vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
-
-            /* For each segmentation feature (Quant and loop filter level) */
-            for (i = 0; i < MB_LVL_MAX; i++)
-            {
-                for (j = 0; j < MAX_MB_SEGMENTS; j++)
-                {
-                    /* Frame level data */
-                    if (vp8_read_bit(bc))
-                    {
-                        xd->segment_feature_data[i][j] = (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]);
-
-                        if (vp8_read_bit(bc))
-                            xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j];
-                    }
-                    else
-                        xd->segment_feature_data[i][j] = 0;
-                }
-            }
-        }
-
-        if (xd->update_mb_segmentation_map)
-        {
-            /* Which macro block level features are enabled */
-            vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
-
-            /* Read the probs used to decode the segment id for each macro block. */
-            for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
-            {
-                /* If not explicitly set value is defaulted to 255 by memset above */
-                if (vp8_read_bit(bc))
-                    xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8);
-            }
-        }
-    }
-    else
-    {
-        /* No segmentation updates on this frame */
-        xd->update_mb_segmentation_map = 0;
-        xd->update_mb_segmentation_data = 0;
-    }
-
-    /* Read the loop filter level and type */
-    pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc);
-    pc->filter_level = vp8_read_literal(bc, 6);
-    pc->sharpness_level = vp8_read_literal(bc, 3);
-
-    /* Read in loop filter deltas applied at the MB level based on mode or ref frame. */
-    xd->mode_ref_lf_delta_update = 0;
-    xd->mode_ref_lf_delta_enabled = (unsigned char)vp8_read_bit(bc);
-
-    if (xd->mode_ref_lf_delta_enabled)
-    {
-        /* Do the deltas need to be updated */
-        xd->mode_ref_lf_delta_update = (unsigned char)vp8_read_bit(bc);
-
-        if (xd->mode_ref_lf_delta_update)
-        {
-            /* Send update */
-            for (i = 0; i < MAX_REF_LF_DELTAS; i++)
-            {
-                if (vp8_read_bit(bc))
-                {
-                    /*sign = vp8_read_bit( bc );*/