Bug 1148639 - Update libvpx. r=kinetik
authorRalph Giles <giles@mozilla.com>
Fri, 27 Mar 2015 14:56:04 -0700
changeset 269557 7cfa6632f4cef96133012ca2c9746ec776a18d79
parent 269556 8f2814bc943b4a6d99bbaa3420dd86ac404dd43d
child 269558 7a16421424ad24c62ebda5a54a1f038597992648
push id4830
push userjlund@mozilla.com
push dateMon, 29 Jun 2015 20:18:48 +0000
treeherdermozilla-beta@4c2175bb0420 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerskinetik
bugs1148639
milestone40.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1148639 - Update libvpx. r=kinetik From b40321d9379faaa62fa16d6d412098fca115042d Mon Sep 17 00:00:00 2001 Result of running ./update.py --ndk ~/android/android-ndk-r9 --commit afad1a84c15b9af8298a37c0fa449e0af40931fd
media/libvpx/PATENTS
media/libvpx/README_MOZILLA
media/libvpx/build/make/obj_int_extract.c
media/libvpx/build/make/thumb.pm
media/libvpx/sources.mozbuild
media/libvpx/third_party/x86inc/x86inc.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
media/libvpx/vp8/common/arm/dequantize_arm.c
media/libvpx/vp8/common/arm/loopfilter_arm.c
media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
media/libvpx/vp8/common/arm/neon/copymem16x16_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x4_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x8_neon.asm
media/libvpx/vp8/common/arm/neon/copymem_neon.c
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.asm
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
media/libvpx/vp8/common/arm/neon/dequant_idct_neon.asm
media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
media/libvpx/vp8/common/arm/neon/dequantizeb_neon.asm
media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.asm
media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.asm
media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
media/libvpx/vp8/common/arm/neon/iwalsh_neon.asm
media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
media/libvpx/vp8/common/arm/neon/loopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
media/libvpx/vp8/common/arm/neon/reconintra_neon.c
media/libvpx/vp8/common/arm/neon/sad16_neon.asm
media/libvpx/vp8/common/arm/neon/sad8_neon.asm
media/libvpx/vp8/common/arm/neon/sad_neon.c
media/libvpx/vp8/common/arm/neon/save_reg_neon.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
media/libvpx/vp8/common/arm/neon/sixtappredict16x16_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict4x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x8_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
media/libvpx/vp8/common/arm/neon/variance_neon.asm
media/libvpx/vp8/common/arm/neon/variance_neon.c
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
media/libvpx/vp8/common/arm/reconintra_arm.c
media/libvpx/vp8/common/common.h
media/libvpx/vp8/common/loopfilter.c
media/libvpx/vp8/common/onyx.h
media/libvpx/vp8/common/postproc.c
media/libvpx/vp8/common/postproc.h
media/libvpx/vp8/common/pragmas.h
media/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
media/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
media/libvpx/vp8/common/x86/loopfilter_mmx.asm
media/libvpx/vp8/common/x86/loopfilter_sse2.asm
media/libvpx/vp8/common/x86/postproc_mmx.asm
media/libvpx/vp8/common/x86/postproc_sse2.asm
media/libvpx/vp8/common/x86/postproc_x86.c
media/libvpx/vp8/common/x86/recon_sse2.asm
media/libvpx/vp8/common/x86/variance_impl_mmx.asm
media/libvpx/vp8/common/x86/variance_mmx.c
media/libvpx/vp8/common/x86/variance_sse2.c
media/libvpx/vp8/common/x86/variance_ssse3.c
media/libvpx/vp8/decoder/dboolhuff.c
media/libvpx/vp8/decoder/dboolhuff.h
media/libvpx/vp8/decoder/decodeframe.c
media/libvpx/vp8/decoder/error_concealment.c
media/libvpx/vp8/decoder/onyxd_if.c
media/libvpx/vp8/decoder/onyxd_int.h
media/libvpx/vp8/encoder/arm/neon/denoising_neon.c
media/libvpx/vp8/encoder/arm/neon/picklpf_arm.c
media/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
media/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c
media/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
media/libvpx/vp8/encoder/arm/neon/subtract_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c
media/libvpx/vp8/encoder/bitstream.c
media/libvpx/vp8/encoder/bitstream.h
media/libvpx/vp8/encoder/block.h
media/libvpx/vp8/encoder/boolhuff.h
media/libvpx/vp8/encoder/denoising.c
media/libvpx/vp8/encoder/denoising.h
media/libvpx/vp8/encoder/encodeframe.c
media/libvpx/vp8/encoder/encodemb.c
media/libvpx/vp8/encoder/ethreading.c
media/libvpx/vp8/encoder/firstpass.c
media/libvpx/vp8/encoder/mcomp.c
media/libvpx/vp8/encoder/mr_dissim.c
media/libvpx/vp8/encoder/onyx_if.c
media/libvpx/vp8/encoder/onyx_int.h
media/libvpx/vp8/encoder/pickinter.c
media/libvpx/vp8/encoder/picklpf.c
media/libvpx/vp8/encoder/psnr.c
media/libvpx/vp8/encoder/psnr.h
media/libvpx/vp8/encoder/quantize.c
media/libvpx/vp8/encoder/rdopt.c
media/libvpx/vp8/encoder/rdopt.h
media/libvpx/vp8/encoder/temporal_filter.c
media/libvpx/vp8/encoder/tokenize.c
media/libvpx/vp8/encoder/x86/denoising_sse2.c
media/libvpx/vp8/encoder/x86/quantize_sse2.c
media/libvpx/vp8/encoder/x86/quantize_sse4.asm
media/libvpx/vp8/encoder/x86/quantize_sse4.c
media/libvpx/vp8/encoder/x86/quantize_ssse3.asm
media/libvpx/vp8/encoder/x86/quantize_ssse3.c
media/libvpx/vp8/encoder/x86/ssim_opt_x86_64.asm
media/libvpx/vp8/vp8_cx_iface.c
media/libvpx/vp8/vp8_dx_iface.c
media/libvpx/vp8_rtcd_armv7-android-gcc.h
media/libvpx/vp8_rtcd_generic-gnu.h
media/libvpx/vp8_rtcd_x86-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86-linux-gcc.h
media/libvpx/vp8_rtcd_x86-win32-gcc.h
media/libvpx/vp8_rtcd_x86-win32-vs12.h
media/libvpx/vp8_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86_64-linux-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-vs12.h
media/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm
media/libvpx/vp9/common/generic/vp9_systemdependent.c
media/libvpx/vp9/common/vp9_alloccommon.c
media/libvpx/vp9/common/vp9_alloccommon.h
media/libvpx/vp9/common/vp9_blockd.c
media/libvpx/vp9/common/vp9_blockd.h
media/libvpx/vp9/common/vp9_common.h
media/libvpx/vp9/common/vp9_common_data.c
media/libvpx/vp9/common/vp9_common_data.h
media/libvpx/vp9/common/vp9_convolve.c
media/libvpx/vp9/common/vp9_convolve.h
media/libvpx/vp9/common/vp9_debugmodes.c
media/libvpx/vp9/common/vp9_entropy.c
media/libvpx/vp9/common/vp9_entropy.h
media/libvpx/vp9/common/vp9_entropymode.c
media/libvpx/vp9/common/vp9_entropymode.h
media/libvpx/vp9/common/vp9_entropymv.c
media/libvpx/vp9/common/vp9_entropymv.h
media/libvpx/vp9/common/vp9_enums.h
media/libvpx/vp9/common/vp9_filter.c
media/libvpx/vp9/common/vp9_filter.h
media/libvpx/vp9/common/vp9_frame_buffers.c
media/libvpx/vp9/common/vp9_idct.c
media/libvpx/vp9/common/vp9_idct.h
media/libvpx/vp9/common/vp9_loopfilter.c
media/libvpx/vp9/common/vp9_loopfilter.h
media/libvpx/vp9/common/vp9_loopfilter_filters.c
media/libvpx/vp9/common/vp9_mv.h
media/libvpx/vp9/common/vp9_mvref_common.c
media/libvpx/vp9/common/vp9_mvref_common.h
media/libvpx/vp9/common/vp9_onyx.h
media/libvpx/vp9/common/vp9_onyxc_int.h
media/libvpx/vp9/common/vp9_postproc.c
media/libvpx/vp9/common/vp9_postproc.h
media/libvpx/vp9/common/vp9_ppflags.h
media/libvpx/vp9/common/vp9_pragmas.h
media/libvpx/vp9/common/vp9_pred_common.c
media/libvpx/vp9/common/vp9_pred_common.h
media/libvpx/vp9/common/vp9_prob.c
media/libvpx/vp9/common/vp9_prob.h
media/libvpx/vp9/common/vp9_quant_common.c
media/libvpx/vp9/common/vp9_quant_common.h
media/libvpx/vp9/common/vp9_reconinter.c
media/libvpx/vp9/common/vp9_reconinter.h
media/libvpx/vp9/common/vp9_reconintra.c
media/libvpx/vp9/common/vp9_reconintra.h
media/libvpx/vp9/common/vp9_scale.c
media/libvpx/vp9/common/vp9_scale.h
media/libvpx/vp9/common/vp9_systemdependent.h
media/libvpx/vp9/common/vp9_thread.c
media/libvpx/vp9/common/vp9_thread.h
media/libvpx/vp9/common/vp9_tile_common.c
media/libvpx/vp9/common/vp9_tile_common.h
media/libvpx/vp9/common/x86/vp9_asm_stubs.c
media/libvpx/vp9/common/x86/vp9_copy_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_intrapred_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_high_subpixel_8t_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_subpixel_bilinear_sse2.asm
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.h
media/libvpx/vp9/common/x86/vp9_idct_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_idct_ssse3_x86_64.asm
media/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
media/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
media/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
media/libvpx/vp9/common/x86/vp9_postproc_x86.h
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
media/libvpx/vp9/decoder/vp9_decodeframe.c
media/libvpx/vp9/decoder/vp9_decodeframe.h
media/libvpx/vp9/decoder/vp9_decodemv.c
media/libvpx/vp9/decoder/vp9_decodemv.h
media/libvpx/vp9/decoder/vp9_decoder.c
media/libvpx/vp9/decoder/vp9_decoder.h
media/libvpx/vp9/decoder/vp9_detokenize.c
media/libvpx/vp9/decoder/vp9_detokenize.h
media/libvpx/vp9/decoder/vp9_dsubexp.c
media/libvpx/vp9/decoder/vp9_dthread.c
media/libvpx/vp9/decoder/vp9_dthread.h
media/libvpx/vp9/decoder/vp9_onyxd.h
media/libvpx/vp9/decoder/vp9_onyxd_if.c
media/libvpx/vp9/decoder/vp9_onyxd_int.h
media/libvpx/vp9/decoder/vp9_read_bit_buffer.c
media/libvpx/vp9/decoder/vp9_read_bit_buffer.h
media/libvpx/vp9/decoder/vp9_reader.c
media/libvpx/vp9/decoder/vp9_reader.h
media/libvpx/vp9/decoder/vp9_thread.c
media/libvpx/vp9/decoder/vp9_thread.h
media/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_subtract_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_variance_neon.c
media/libvpx/vp9/encoder/vp9_aq_complexity.c
media/libvpx/vp9/encoder/vp9_aq_complexity.h
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h
media/libvpx/vp9/encoder/vp9_aq_variance.c
media/libvpx/vp9/encoder/vp9_aq_variance.h
media/libvpx/vp9/encoder/vp9_bitstream.c
media/libvpx/vp9/encoder/vp9_bitstream.h
media/libvpx/vp9/encoder/vp9_block.h
media/libvpx/vp9/encoder/vp9_context_tree.c
media/libvpx/vp9/encoder/vp9_context_tree.h
media/libvpx/vp9/encoder/vp9_cost.c
media/libvpx/vp9/encoder/vp9_cost.h
media/libvpx/vp9/encoder/vp9_dct.c
media/libvpx/vp9/encoder/vp9_denoiser.c
media/libvpx/vp9/encoder/vp9_denoiser.h
media/libvpx/vp9/encoder/vp9_encodeframe.c
media/libvpx/vp9/encoder/vp9_encodeframe.h
media/libvpx/vp9/encoder/vp9_encodemb.c
media/libvpx/vp9/encoder/vp9_encodemb.h
media/libvpx/vp9/encoder/vp9_encodemv.c
media/libvpx/vp9/encoder/vp9_encodemv.h
media/libvpx/vp9/encoder/vp9_encoder.c
media/libvpx/vp9/encoder/vp9_encoder.h
media/libvpx/vp9/encoder/vp9_extend.c
media/libvpx/vp9/encoder/vp9_firstpass.c
media/libvpx/vp9/encoder/vp9_firstpass.h
media/libvpx/vp9/encoder/vp9_lookahead.c
media/libvpx/vp9/encoder/vp9_lookahead.h
media/libvpx/vp9/encoder/vp9_mbgraph.c
media/libvpx/vp9/encoder/vp9_mbgraph.h
media/libvpx/vp9/encoder/vp9_mcomp.c
media/libvpx/vp9/encoder/vp9_mcomp.h
media/libvpx/vp9/encoder/vp9_onyx_if.c
media/libvpx/vp9/encoder/vp9_onyx_int.h
media/libvpx/vp9/encoder/vp9_picklpf.c
media/libvpx/vp9/encoder/vp9_picklpf.h
media/libvpx/vp9/encoder/vp9_pickmode.c
media/libvpx/vp9/encoder/vp9_pickmode.h
media/libvpx/vp9/encoder/vp9_psnr.c
media/libvpx/vp9/encoder/vp9_psnr.h
media/libvpx/vp9/encoder/vp9_quantize.c
media/libvpx/vp9/encoder/vp9_quantize.h
media/libvpx/vp9/encoder/vp9_ratectrl.c
media/libvpx/vp9/encoder/vp9_ratectrl.h
media/libvpx/vp9/encoder/vp9_rd.c
media/libvpx/vp9/encoder/vp9_rd.h
media/libvpx/vp9/encoder/vp9_rdopt.c
media/libvpx/vp9/encoder/vp9_rdopt.h
media/libvpx/vp9/encoder/vp9_resize.c
media/libvpx/vp9/encoder/vp9_resize.h
media/libvpx/vp9/encoder/vp9_sad.c
media/libvpx/vp9/encoder/vp9_segmentation.c
media/libvpx/vp9/encoder/vp9_segmentation.h
media/libvpx/vp9/encoder/vp9_speed_features.c
media/libvpx/vp9/encoder/vp9_speed_features.h
media/libvpx/vp9/encoder/vp9_ssim.h
media/libvpx/vp9/encoder/vp9_subexp.c
media/libvpx/vp9/encoder/vp9_subexp.h
media/libvpx/vp9/encoder/vp9_svc_layercontext.c
media/libvpx/vp9/encoder/vp9_svc_layercontext.h
media/libvpx/vp9/encoder/vp9_temporal_filter.c
media/libvpx/vp9/encoder/vp9_temporal_filter.h
media/libvpx/vp9/encoder/vp9_tokenize.c
media/libvpx/vp9/encoder/vp9_tokenize.h
media/libvpx/vp9/encoder/vp9_treewriter.c
media/libvpx/vp9/encoder/vp9_treewriter.h
media/libvpx/vp9/encoder/vp9_vaq.c
media/libvpx/vp9/encoder/vp9_vaq.h
media/libvpx/vp9/encoder/vp9_variance.c
media/libvpx/vp9/encoder/vp9_variance.h
media/libvpx/vp9/encoder/vp9_write_bit_buffer.c
media/libvpx/vp9/encoder/vp9_write_bit_buffer.h
media/libvpx/vp9/encoder/vp9_writer.c
media/libvpx/vp9/encoder/vp9_writer.h
media/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_variance_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
media/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
media/libvpx/vp9/vp9_cx_iface.c
media/libvpx/vp9/vp9_dx_iface.c
media/libvpx/vp9/vp9_iface_common.h
media/libvpx/vp9_rtcd_armv7-android-gcc.h
media/libvpx/vp9_rtcd_generic-gnu.h
media/libvpx/vp9_rtcd_x86-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86-linux-gcc.h
media/libvpx/vp9_rtcd_x86-win32-gcc.h
media/libvpx/vp9_rtcd_x86-win32-vs12.h
media/libvpx/vp9_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86_64-linux-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx/internal/vpx_codec_internal.h
media/libvpx/vpx/internal/vpx_psnr.h
media/libvpx/vpx/src/svc_encodeframe.c
media/libvpx/vpx/src/vpx_codec.c
media/libvpx/vpx/src/vpx_decoder.c
media/libvpx/vpx/src/vpx_encoder.c
media/libvpx/vpx/src/vpx_image.c
media/libvpx/vpx/src/vpx_psnr.c
media/libvpx/vpx/svc_context.h
media/libvpx/vpx/vp8cx.h
media/libvpx/vpx/vp8dx.h
media/libvpx/vpx/vpx_codec.h
media/libvpx/vpx/vpx_decoder.h
media/libvpx/vpx/vpx_encoder.h
media/libvpx/vpx/vpx_frame_buffer.h
media/libvpx/vpx/vpx_image.h
media/libvpx/vpx/vpx_integer.h
media/libvpx/vpx_config_armv7-android-gcc.asm
media/libvpx/vpx_config_armv7-android-gcc.h
media/libvpx/vpx_config_generic-gnu.asm
media/libvpx/vpx_config_generic-gnu.h
media/libvpx/vpx_config_x86-darwin9-gcc.asm
media/libvpx/vpx_config_x86-darwin9-gcc.h
media/libvpx/vpx_config_x86-linux-gcc.asm
media/libvpx/vpx_config_x86-linux-gcc.h
media/libvpx/vpx_config_x86-win32-gcc.asm
media/libvpx/vpx_config_x86-win32-gcc.h
media/libvpx/vpx_config_x86-win32-vs12.asm
media/libvpx/vpx_config_x86-win32-vs12.h
media/libvpx/vpx_config_x86_64-darwin9-gcc.asm
media/libvpx/vpx_config_x86_64-darwin9-gcc.h
media/libvpx/vpx_config_x86_64-linux-gcc.asm
media/libvpx/vpx_config_x86_64-linux-gcc.h
media/libvpx/vpx_config_x86_64-win64-gcc.asm
media/libvpx/vpx_config_x86_64-win64-gcc.h
media/libvpx/vpx_config_x86_64-win64-vs12.asm
media/libvpx/vpx_config_x86_64-win64-vs12.h
media/libvpx/vpx_mem/vpx_mem.c
media/libvpx/vpx_mem/vpx_mem.h
media/libvpx/vpx_ports/arm.h
media/libvpx/vpx_ports/arm_cpudetect.c
media/libvpx/vpx_ports/mem.h
media/libvpx/vpx_ports/mem_ops.h
media/libvpx/vpx_ports/mem_ops_aligned.h
media/libvpx/vpx_ports/vpx_once.h
media/libvpx/vpx_ports/vpx_timer.h
media/libvpx/vpx_ports/x86.h
media/libvpx/vpx_ports/x86_abi_support.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
media/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
media/libvpx/vpx_scale/generic/yv12config.c
media/libvpx/vpx_scale/generic/yv12extend.c
media/libvpx/vpx_scale/yv12config.h
media/libvpx/vpx_scale_rtcd_armv7-android-gcc.h
media/libvpx/vpx_scale_rtcd_generic-gnu.h
media/libvpx/vpx_scale_rtcd_x86-darwin9-gcc.h
media/libvpx/vpx_scale_rtcd_x86-linux-gcc.h
media/libvpx/vpx_scale_rtcd_x86-win32-gcc.h
media/libvpx/vpx_scale_rtcd_x86-win32-vs12.h
media/libvpx/vpx_scale_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-linux-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-win64-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx_version.h
--- a/media/libvpx/PATENTS
+++ b/media/libvpx/PATENTS
@@ -1,23 +1,22 @@
 Additional IP Rights Grant (Patents)
-------------------------------------
 
-"These implementations" means the copyrightable works that implement the WebM
-codecs distributed by Google as part of the WebM Project.
+"This implementation" means the copyrightable works distributed by
+Google as part of the WebM Project.
 
-Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge,
-royalty-free, irrevocable (except as stated in this section) patent license to
-make, have made, use, offer to sell, sell, import, transfer, and otherwise
-run, modify and propagate the contents of these implementations of WebM, where
-such license applies only to those patent claims, both currently owned by
-Google and acquired in the future, licensable by Google that are necessarily
-infringed by these implementations of WebM. This grant does not include claims
-that would be infringed only as a consequence of further modification of these
-implementations. If you or your agent or exclusive licensee institute or order
-or agree to the institution of patent litigation or any other patent
-enforcement activity against any entity (including a cross-claim or
-counterclaim in a lawsuit) alleging that any of these implementations of WebM
-or any code incorporated within any of these implementations of WebM
-constitutes direct or contributory patent infringement, or inducement of
-patent infringement, then any patent rights granted to you under this License
-for these implementations of WebM shall terminate as of the date such
-litigation is filed.
+Google hereby grants to you a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer, and otherwise run, modify and propagate the contents of this
+implementation of VP8, where such license applies only to those patent
+claims, both currently owned by Google and acquired in the future,
+licensable by Google that are necessarily infringed by this
+implementation of VP8. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of VP8 or any code incorporated within this
+implementation of VP8 constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of VP8
+shall terminate as of the date such litigation is filed.
--- a/media/libvpx/README_MOZILLA
+++ b/media/libvpx/README_MOZILLA
@@ -3,9 +3,9 @@ git repository using the update.py scrip
 made were those applied by update.py and the addition of
 moz.build and Makefile.in build files for the
 Mozilla build system.
 
 The libvpx git repository is:
 
     https://gerrit.chromium.org/gerrit/webm/libvpx
 
-The git commit ID used was 587ff646f
+The git commit ID used was afad1a84c15b9af8298a37c0fa449e0af40931fd
--- a/media/libvpx/build/make/obj_int_extract.c
+++ b/media/libvpx/build/make/obj_int_extract.c
@@ -29,28 +29,16 @@ int log_msg(const char *fmt, ...) {
   va_list ap;
   va_start(ap, fmt);
   res = vfprintf(stderr, fmt, ap);
   va_end(ap);
   return res;
 }
 
 #if defined(__GNUC__) && __GNUC__
-
-#if defined(FORCE_PARSE_ELF)
-
-#if defined(__MACH__)
-#undef __MACH__
-#endif
-
-#if !defined(__ELF__)
-#define __ELF__
-#endif
-#endif
-
 #if defined(__MACH__)
 
 #include <mach-o/loader.h>
 #include <mach-o/nlist.h>
 
 int print_macho_equ(output_fmt_t mode, uint8_t* name, int val) {
   switch (mode) {
     case OUTPUT_FMT_RVDS:
--- a/media/libvpx/build/make/thumb.pm
+++ b/media/libvpx/build/make/thumb.pm
@@ -1,9 +1,9 @@
-#!/usr/bin/env perl
+#!/usr/bin/perl
 ##
 ##  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
 ##
 ##  Use of this source code is governed by a BSD-style license
 ##  that can be found in the LICENSE file in the root of the source
 ##  tree. An additional intellectual property rights grant can be found
 ##  in the file PATENTS.  All contributing project authors may
 ##  be found in the AUTHORS file in the root of the source tree.
@@ -46,17 +46,17 @@ sub FixThumbInstructions($$)
     # "addne src, src, pstep, lsl #1". In a couple of cases where
     # this is used, it's used for two subsequent load instructions,
     # where a hand-written version of it could merge two subsequent
     # add and sub instructions.
     s/^(\s*)((ldr|str|pld)(ne)?)(\s+)(r\d+,\s*)?\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6\[$7\]\n$1add$4$5$7, $7, $8/g;
 
     # Convert register post indexing to a separate add instruction.
     # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
-    # "addne r0, r0, r2".
+    # "add r0, r2".
     s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
 
     # Convert a conditional addition to the pc register into a series of
     # instructions. This converts "addlt pc, pc, r3, lsl #2" into
     # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12",
     # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12".
     # This assumes that r12 is free at this point.
     s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g;
--- a/media/libvpx/sources.mozbuild
+++ b/media/libvpx/sources.mozbuild
@@ -20,52 +20,64 @@ files = {
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm',
              'vp8/common/arm/bilinearfilter_arm.c',
              'vp8/common/arm/dequantize_arm.c',
              'vp8/common/arm/filter_arm.c',
              'vp8/common/arm/loopfilter_arm.c',
              'vp8/common/arm/neon/bilinearpredict_neon.c',
-             'vp8/common/arm/neon/copymem_neon.c',
-             'vp8/common/arm/neon/dc_only_idct_add_neon.c',
-             'vp8/common/arm/neon/dequant_idct_neon.c',
-             'vp8/common/arm/neon/dequantizeb_neon.c',
+             'vp8/common/arm/neon/buildintrapredictorsmby_neon.asm',
+             'vp8/common/arm/neon/copymem16x16_neon.asm',
+             'vp8/common/arm/neon/copymem8x4_neon.asm',
+             'vp8/common/arm/neon/copymem8x8_neon.asm',
+             'vp8/common/arm/neon/dc_only_idct_add_neon.asm',
+             'vp8/common/arm/neon/dequant_idct_neon.asm',
+             'vp8/common/arm/neon/dequantizeb_neon.asm',
              'vp8/common/arm/neon/idct_blk_neon.c',
-             'vp8/common/arm/neon/idct_dequant_0_2x_neon.c',
-             'vp8/common/arm/neon/idct_dequant_full_2x_neon.c',
-             'vp8/common/arm/neon/iwalsh_neon.c',
-             'vp8/common/arm/neon/loopfilter_neon.c',
-             'vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c',
-             'vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c',
-             'vp8/common/arm/neon/mbloopfilter_neon.c',
-             'vp8/common/arm/neon/reconintra_neon.c',
-             'vp8/common/arm/neon/sad_neon.c',
-             'vp8/common/arm/neon/shortidct4x4llm_neon.c',
-             'vp8/common/arm/neon/sixtappredict_neon.c',
-             'vp8/common/arm/neon/variance_neon.c',
-             'vp8/common/arm/neon/vp8_subpixelvariance_neon.c',
+             'vp8/common/arm/neon/idct_dequant_0_2x_neon.asm',
+             'vp8/common/arm/neon/idct_dequant_full_2x_neon.asm',
+             'vp8/common/arm/neon/iwalsh_neon.asm',
+             'vp8/common/arm/neon/loopfilter_neon.asm',
+             'vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm',
+             'vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm',
+             'vp8/common/arm/neon/mbloopfilter_neon.asm',
+             'vp8/common/arm/neon/sad16_neon.asm',
+             'vp8/common/arm/neon/sad8_neon.asm',
+             'vp8/common/arm/neon/save_reg_neon.asm',
+             'vp8/common/arm/neon/shortidct4x4llm_neon.asm',
+             'vp8/common/arm/neon/sixtappredict16x16_neon.asm',
+             'vp8/common/arm/neon/sixtappredict4x4_neon.asm',
+             'vp8/common/arm/neon/sixtappredict8x4_neon.asm',
+             'vp8/common/arm/neon/sixtappredict8x8_neon.asm',
+             'vp8/common/arm/neon/variance_neon.asm',
+             'vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm',
+             'vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm',
+             'vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm',
+             'vp8/common/arm/reconintra_arm.c',
              'vp8/common/arm/variance_arm.c',
              'vp8/encoder/arm/armv5te/boolhuff_armv5te.asm',
              'vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm',
              'vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm',
              'vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm',
              'vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_subtract_armv6.asm',
              'vp8/encoder/arm/armv6/walsh_v6.asm',
              'vp8/encoder/arm/boolhuff_arm.c',
              'vp8/encoder/arm/dct_arm.c',
              'vp8/encoder/arm/neon/denoising_neon.c',
              'vp8/encoder/arm/neon/fastquantizeb_neon.asm',
-             'vp8/encoder/arm/neon/shortfdct_neon.c',
-             'vp8/encoder/arm/neon/subtract_neon.c',
-             'vp8/encoder/arm/neon/vp8_mse16x16_neon.c',
-             'vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c',
+             'vp8/encoder/arm/neon/picklpf_arm.c',
+             'vp8/encoder/arm/neon/shortfdct_neon.asm',
+             'vp8/encoder/arm/neon/subtract_neon.asm',
+             'vp8/encoder/arm/neon/vp8_memcpy_neon.asm',
+             'vp8/encoder/arm/neon/vp8_mse16x16_neon.asm',
+             'vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm',
              'vp8/encoder/arm/quantize_arm.c',
              'vp9/common/arm/neon/vp9_avg_neon.asm',
              'vp9/common/arm/neon/vp9_convolve8_avg_neon.asm',
              'vp9/common/arm/neon/vp9_convolve8_neon.asm',
              'vp9/common/arm/neon/vp9_convolve_neon.c',
              'vp9/common/arm/neon/vp9_copy_neon.asm',
              'vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm',
              'vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm',
@@ -80,29 +92,26 @@ files = {
              'vp9/common/arm/neon/vp9_iht4x4_add_neon.asm',
              'vp9/common/arm/neon/vp9_iht8x8_add_neon.asm',
              'vp9/common/arm/neon/vp9_loopfilter_16_neon.asm',
              'vp9/common/arm/neon/vp9_loopfilter_16_neon.c',
              'vp9/common/arm/neon/vp9_loopfilter_neon.asm',
              'vp9/common/arm/neon/vp9_mb_lpf_neon.asm',
              'vp9/common/arm/neon/vp9_reconintra_neon.asm',
              'vp9/common/arm/neon/vp9_save_reg_neon.asm',
-             'vp9/encoder/arm/neon/vp9_dct_neon.c',
-             'vp9/encoder/arm/neon/vp9_quantize_neon.c',
-             'vp9/encoder/arm/neon/vp9_sad_neon.c',
-             'vp9/encoder/arm/neon/vp9_subtract_neon.c',
-             'vp9/encoder/arm/neon/vp9_variance_neon.c',
-             'vpx_ports/arm_cpudetect.c'],
+             'vpx_ports/arm_cpudetect.c',
+             'vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm',
+             'vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm',
+             'vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm',
+             'vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm',
+             'vpx_scale/arm/neon/yv12extend_arm.c'],
  'AVX2': ['vp9/common/x86/vp9_loopfilter_intrin_avx2.c',
           'vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c',
           'vp9/encoder/x86/vp9_dct32x32_avx2.c',
           'vp9/encoder/x86/vp9_dct_avx2.c',
-          'vp9/encoder/x86/vp9_error_intrin_avx2.c',
-          'vp9/encoder/x86/vp9_sad4d_intrin_avx2.c',
-          'vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c',
           'vp9/encoder/x86/vp9_variance_avx2.c',
           'vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c'],
  'ERROR_CONCEALMENT': ['vp8/decoder/error_concealment.c'],
  'EXPORTS': ['vpx/vp8.h',
              'vpx/vp8cx.h',
              'vpx/vp8dx.h',
              'vpx/vpx_codec.h',
              'vpx/vpx_decoder.h',
@@ -118,16 +127,17 @@ files = {
              'vpx_ports/x86.h',
              'vpx_scale/vpx_scale.h',
              'vpx_scale/yv12config.h'],
  'SOURCES': ['vp8/common/rtcd.c',
              'vp8/common/sad_c.c',
              'vp8/encoder/bitstream.c',
              'vp8/encoder/onyx_if.c',
              'vp8/vp8_dx_iface.c',
+             'vp9/common/generic/vp9_systemdependent.c',
              'vp9/common/vp9_alloccommon.c',
              'vp9/common/vp9_blockd.c',
              'vp9/common/vp9_common_data.c',
              'vp9/common/vp9_convolve.c',
              'vp9/common/vp9_debugmodes.c',
              'vp9/common/vp9_entropy.c',
              'vp9/common/vp9_entropymode.c',
              'vp9/common/vp9_entropymv.c',
@@ -141,58 +151,49 @@ files = {
              'vp9/common/vp9_prob.c',
              'vp9/common/vp9_quant_common.c',
              'vp9/common/vp9_reconinter.c',
              'vp9/common/vp9_reconintra.c',
              'vp9/common/vp9_rtcd.c',
              'vp9/common/vp9_scale.c',
              'vp9/common/vp9_scan.c',
              'vp9/common/vp9_seg_common.c',
-             'vp9/common/vp9_thread.c',
              'vp9/common/vp9_tile_common.c',
              'vp9/decoder/vp9_decodeframe.c',
              'vp9/decoder/vp9_decodemv.c',
-             'vp9/decoder/vp9_decoder.c',
              'vp9/decoder/vp9_detokenize.c',
              'vp9/decoder/vp9_dsubexp.c',
              'vp9/decoder/vp9_dthread.c',
+             'vp9/decoder/vp9_onyxd_if.c',
              'vp9/decoder/vp9_reader.c',
-             'vp9/encoder/vp9_aq_complexity.c',
-             'vp9/encoder/vp9_aq_cyclicrefresh.c',
-             'vp9/encoder/vp9_aq_variance.c',
              'vp9/encoder/vp9_bitstream.c',
-             'vp9/encoder/vp9_context_tree.c',
-             'vp9/encoder/vp9_cost.c',
              'vp9/encoder/vp9_dct.c',
              'vp9/encoder/vp9_encodeframe.c',
              'vp9/encoder/vp9_encodemb.c',
              'vp9/encoder/vp9_encodemv.c',
-             'vp9/encoder/vp9_encoder.c',
              'vp9/encoder/vp9_extend.c',
              'vp9/encoder/vp9_firstpass.c',
              'vp9/encoder/vp9_lookahead.c',
              'vp9/encoder/vp9_mbgraph.c',
              'vp9/encoder/vp9_mcomp.c',
+             'vp9/encoder/vp9_onyx_if.c',
              'vp9/encoder/vp9_picklpf.c',
              'vp9/encoder/vp9_pickmode.c',
              'vp9/encoder/vp9_quantize.c',
              'vp9/encoder/vp9_ratectrl.c',
-             'vp9/encoder/vp9_rd.c',
              'vp9/encoder/vp9_rdopt.c',
              'vp9/encoder/vp9_resize.c',
              'vp9/encoder/vp9_sad.c',
              'vp9/encoder/vp9_segmentation.c',
-             'vp9/encoder/vp9_speed_features.c',
              'vp9/encoder/vp9_subexp.c',
-             'vp9/encoder/vp9_svc_layercontext.c',
              'vp9/encoder/vp9_temporal_filter.c',
              'vp9/encoder/vp9_tokenize.c',
              'vp9/encoder/vp9_treewriter.c',
+             'vp9/encoder/vp9_vaq.c',
              'vp9/encoder/vp9_variance.c',
-             'vp9/encoder/vp9_write_bit_buffer.c',
              'vp9/encoder/vp9_writer.c',
              'vp9/vp9_cx_iface.c',
              'vp9/vp9_dx_iface.c',
              'vpx/src/svc_encodeframe.c',
              'vpx/src/vpx_encoder.c',
              'vpx_mem/vpx_mem.c',
              'vpx_scale/generic/yv12config.c',
              'vpx_scale/generic/yv12extend.c',
@@ -237,52 +238,52 @@ files = {
                      'vp8/encoder/ethreading.c',
                      'vp8/encoder/firstpass.c',
                      'vp8/encoder/lookahead.c',
                      'vp8/encoder/mcomp.c',
                      'vp8/encoder/modecosts.c',
                      'vp8/encoder/mr_dissim.c',
                      'vp8/encoder/pickinter.c',
                      'vp8/encoder/picklpf.c',
+                     'vp8/encoder/psnr.c',
                      'vp8/encoder/quantize.c',
                      'vp8/encoder/ratectrl.c',
                      'vp8/encoder/rdopt.c',
                      'vp8/encoder/segmentation.c',
                      'vp8/encoder/temporal_filter.c',
                      'vp8/encoder/tokenize.c',
                      'vp8/encoder/treewriter.c',
                      'vp8/vp8_cx_iface.c',
-                     'vp9/decoder/vp9_read_bit_buffer.c',
+                     'vp9/decoder/vp9_thread.c',
+                     'vp9/encoder/vp9_psnr.c',
                      'vpx/src/vpx_codec.c',
                      'vpx/src/vpx_decoder.c',
                      'vpx/src/vpx_image.c',
-                     'vpx/src/vpx_psnr.c',
                      'vpx_scale/generic/gen_scalers.c',
                      'vpx_scale/generic/vpx_scale.c'],
  'VP8_POSTPROC': ['vp8/common/mfqe.c', 'vp8/common/postproc.c'],
  'VP9_POSTPROC': ['vp9/common/vp9_postproc.c'],
  'X86-64_ASM': ['third_party/x86inc/x86inc.asm',
-                'vp8/common/x86/loopfilter_block_sse2_x86_64.asm',
-                'vp8/encoder/x86/ssim_opt_x86_64.asm',
-                'vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm',
-                'vp9/encoder/x86/vp9_ssim_opt_x86_64.asm'],
+                'vp8/common/x86/loopfilter_block_sse2.asm',
+                'vp9/encoder/x86/vp9_quantize_ssse3.asm'],
  'X86_ASM': ['vp8/common/x86/dequantize_mmx.asm',
              'vp8/common/x86/filter_x86.c',
              'vp8/common/x86/idct_blk_mmx.c',
              'vp8/common/x86/idct_blk_sse2.c',
              'vp8/common/x86/idctllm_mmx.asm',
              'vp8/common/x86/idctllm_sse2.asm',
              'vp8/common/x86/iwalsh_mmx.asm',
              'vp8/common/x86/iwalsh_sse2.asm',
              'vp8/common/x86/loopfilter_mmx.asm',
              'vp8/common/x86/loopfilter_sse2.asm',
              'vp8/common/x86/loopfilter_x86.c',
              'vp8/common/x86/mfqe_sse2.asm',
              'vp8/common/x86/postproc_mmx.asm',
              'vp8/common/x86/postproc_sse2.asm',
+             'vp8/common/x86/postproc_x86.c',
              'vp8/common/x86/recon_mmx.asm',
              'vp8/common/x86/recon_sse2.asm',
              'vp8/common/x86/recon_wrapper_sse2.c',
              'vp8/common/x86/sad_mmx.asm',
              'vp8/common/x86/sad_sse2.asm',
              'vp8/common/x86/sad_sse3.asm',
              'vp8/common/x86/sad_sse4.asm',
              'vp8/common/x86/sad_ssse3.asm',
@@ -298,49 +299,45 @@ files = {
              'vp8/common/x86/vp8_asm_stubs.c',
              'vp8/encoder/x86/dct_mmx.asm',
              'vp8/encoder/x86/dct_sse2.asm',
              'vp8/encoder/x86/denoising_sse2.c',
              'vp8/encoder/x86/encodeopt.asm',
              'vp8/encoder/x86/fwalsh_sse2.asm',
              'vp8/encoder/x86/quantize_mmx.asm',
              'vp8/encoder/x86/quantize_sse2.c',
-             'vp8/encoder/x86/quantize_sse4.c',
-             'vp8/encoder/x86/quantize_ssse3.c',
+             'vp8/encoder/x86/quantize_sse4.asm',
+             'vp8/encoder/x86/quantize_ssse3.asm',
              'vp8/encoder/x86/subtract_mmx.asm',
              'vp8/encoder/x86/subtract_sse2.asm',
              'vp8/encoder/x86/temporal_filter_apply_sse2.asm',
              'vp8/encoder/x86/vp8_enc_stubs_mmx.c',
              'vp8/encoder/x86/vp8_enc_stubs_sse2.c',
              'vp9/common/x86/vp9_asm_stubs.c',
              'vp9/common/x86/vp9_copy_sse2.asm',
-             'vp9/common/x86/vp9_high_intrapred_sse2.asm',
-             'vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c',
-             'vp9/common/x86/vp9_high_subpixel_8t_sse2.asm',
-             'vp9/common/x86/vp9_high_subpixel_bilinear_sse2.asm',
              'vp9/common/x86/vp9_idct_intrin_sse2.c',
-             'vp9/common/x86/vp9_idct_intrin_ssse3.c',
-             'vp9/common/x86/vp9_idct_ssse3_x86_64.asm',
              'vp9/common/x86/vp9_intrapred_sse2.asm',
              'vp9/common/x86/vp9_intrapred_ssse3.asm',
              'vp9/common/x86/vp9_loopfilter_intrin_sse2.c',
              'vp9/common/x86/vp9_loopfilter_mmx.asm',
-             'vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c',
              'vp9/common/x86/vp9_subpixel_8t_sse2.asm',
              'vp9/common/x86/vp9_subpixel_8t_ssse3.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_sse2.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm',
              'vp9/encoder/x86/vp9_dct32x32_sse2.c',
-             'vp9/encoder/x86/vp9_dct_mmx.asm',
              'vp9/encoder/x86/vp9_dct_sse2.c',
-             'vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm',
              'vp9/encoder/x86/vp9_error_sse2.asm',
              'vp9/encoder/x86/vp9_sad4d_sse2.asm',
+             'vp9/encoder/x86/vp9_sad_mmx.asm',
              'vp9/encoder/x86/vp9_sad_sse2.asm',
              'vp9/encoder/x86/vp9_sad_sse3.asm',
              'vp9/encoder/x86/vp9_sad_sse4.asm',
              'vp9/encoder/x86/vp9_sad_ssse3.asm',
              'vp9/encoder/x86/vp9_subpel_variance.asm',
+             'vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm',
              'vp9/encoder/x86/vp9_subtract_sse2.asm',
              'vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm',
+             'vp9/encoder/x86/vp9_variance_impl_mmx.asm',
+             'vp9/encoder/x86/vp9_variance_impl_sse2.asm',
+             'vp9/encoder/x86/vp9_variance_mmx.c',
              'vp9/encoder/x86/vp9_variance_sse2.c',
              'vpx_ports/emms.asm']
 }
--- a/media/libvpx/third_party/x86inc/x86inc.asm
+++ b/media/libvpx/third_party/x86inc/x86inc.asm
@@ -229,20 +229,20 @@ ALIGNMODE k7
     %define r%1d %3
     %define r%1w %4
     %define r%1b %5
     %if %0 == 5
         %define r%1m  %3
         %define r%1mp %2
     %elif ARCH_X86_64 ; memory
         %define r%1m [rsp + stack_offset + %6]
-        %define r%1mp qword r %+ %1 %+ m
+        %define r%1mp qword r %+ %1m
     %else
         %define r%1m [esp + stack_offset + %6]
-        %define r%1mp dword r %+ %1 %+ m
+        %define r%1mp dword r %+ %1m
     %endif
     %define r%1  %2
 %endmacro
 
 %macro DECLARE_REG_SIZE 2
     %define r%1q r%1
     %define e%1q r%1
     %define r%1d e%1
@@ -390,33 +390,16 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9
         CAT_XDEFINE arg_name, %%i, %1
         %assign %%i %%i+1
         %rotate 1
     %endrep
     %xdefine stack_offset %%stack_offset
     %assign n_arg_names %0
 %endmacro
 
-%if ARCH_X86_64
-%macro ALLOC_STACK 2  ; stack_size, num_regs
-  %assign %%stack_aligment ((mmsize + 15) & ~15)
-  %assign stack_size_padded %1
-
-  %assign %%reg_num (%2 - 1)
-  %xdefine rsp_tmp r %+ %%reg_num
-  mov  rsp_tmp, rsp
-  sub  rsp, stack_size_padded
-  and  rsp, ~(%%stack_aligment - 1)
-%endmacro
-
-%macro RESTORE_STACK 0  ; reset rsp register
-  mov  rsp, rsp_tmp
-%endmacro
-%endif
-
 %if WIN64 ; Windows x64 ;=================================================
 
 DECLARE_REG 0,  rcx, ecx,  cx,   cl
 DECLARE_REG 1,  rdx, edx,  dx,   dl
 DECLARE_REG 2,  R8,  R8D,  R8W,  R8B
 DECLARE_REG 3,  R9,  R9D,  R9W,  R9B
 DECLARE_REG 4,  R10, R10D, R10W, R10B, 40
 DECLARE_REG 5,  R11, R11D, R11W, R11B, 48
@@ -604,30 +587,26 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
 %endmacro
 %macro cglobal_internal 1-2+
     %ifndef cglobaled_%1
         %xdefine %1 mangle(program_name %+ _ %+ %1)
         %xdefine %1.skip_prologue %1 %+ .skip_prologue
         CAT_XDEFINE cglobaled_, %1, 1
     %endif
     %xdefine current_function %1
-    %ifdef CHROMIUM
-        %ifidn __OUTPUT_FORMAT__,elf
-            global %1:function hidden
-        %elifidn __OUTPUT_FORMAT__,elf32
-            global %1:function hidden
-        %elifidn __OUTPUT_FORMAT__,elf64
-            global %1:function hidden
-        %elifidn __OUTPUT_FORMAT__,macho32
-            global %1:private_extern
-        %elifidn __OUTPUT_FORMAT__,macho64
-            global %1:private_extern
-        %else
-            global %1
-        %endif
+    %ifidn __OUTPUT_FORMAT__,elf
+        global %1:function hidden
+    %elifidn __OUTPUT_FORMAT__,elf32
+        global %1:function hidden
+    %elifidn __OUTPUT_FORMAT__,elf64
+        global %1:function hidden
+    %elifidn __OUTPUT_FORMAT__,macho32
+        global %1:private_extern
+    %elifidn __OUTPUT_FORMAT__,macho64
+        global %1:private_extern
     %else
         global %1
     %endif
     align function_align
     %1:
     RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
     %assign stack_offset 0
     %if %0 > 1
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
@@ -48,17 +48,17 @@ loop
     sel     r6, r9, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; subtract negative differences from sum
+    subs    r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -72,17 +72,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -96,17 +96,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -122,17 +122,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r10, r10, r11  ; dual signed multiply, add and accumulate (2)
 
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
@@ -46,17 +46,17 @@ loop
     sel     r8, r9, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r6, r10, lr         ; calculate sum of positive differences
     usad8   r7, r8, lr          ; calculate sum of negative differences
     orr     r8, r8, r10         ; differences of all 4 pixels
     ; calculate total sum
     add    r4, r4, r6           ; add positive differences to sum
-    sub    r4, r4, r7           ; subtract negative differences from sum
+    sub    r4, r4, r7           ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r7, r8              ; byte (two pixels) to halfwords
     uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
     smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r6, [r0, #0x4]      ; load 4 src pixels
@@ -72,17 +72,17 @@ loop
 
     ; calculate partial sums
     usad8   r6, r10, lr         ; calculate sum of positive differences
     usad8   r7, r8, lr          ; calculate sum of negative differences
     orr     r8, r8, r10         ; differences of all 4 pixels
 
     ; calculate total sum
     add     r4, r4, r6          ; add positive differences to sum
-    sub     r4, r4, r7          ; subtract negative differences from sum
+    sub     r4, r4, r7          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r7, r8              ; byte (two pixels) to halfwords
     uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
     smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
     subs    r12, r12, #1        ; next row
     smlad   r5, r10, r10, r5    ; dual signed multiply, add and accumulate (2)
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
@@ -53,17 +53,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; subtract negative differences from sum
+    subs    r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -84,17 +84,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -115,17 +115,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -148,17 +148,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
     subs    r12, r12, #1
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
@@ -64,17 +64,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; subtract negative differences from sum
+    subs    r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load source pixels a, row N
@@ -106,17 +106,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load source pixels a, row N
@@ -148,17 +148,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load source pixels a, row N
@@ -190,17 +190,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     subs    r12, r12, #1
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
@@ -54,17 +54,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; subtract negative differences from sum
+    subs    r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -85,17 +85,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -116,17 +116,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -149,17 +149,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; subtract negative differences from sum
+    sub     r8, r8, r5          ; substract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
 
--- a/media/libvpx/vp8/common/arm/dequantize_arm.c
+++ b/media/libvpx/vp8/common/arm/dequantize_arm.c
@@ -7,19 +7,36 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 
 #include "vpx_config.h"
 #include "vp8/common/blockd.h"
 
+#if HAVE_NEON
+extern void vp8_dequantize_b_loop_neon(short *Q, short *DQC, short *DQ);
+#endif
+
 #if HAVE_MEDIA
 extern void vp8_dequantize_b_loop_v6(short *Q, short *DQC, short *DQ);
+#endif
 
+#if HAVE_NEON
+
+void vp8_dequantize_b_neon(BLOCKD *d, short *DQC)
+{
+    short *DQ  = d->dqcoeff;
+    short *Q   = d->qcoeff;
+
+    vp8_dequantize_b_loop_neon(Q, DQC, DQ);
+}
+#endif
+
+#if HAVE_MEDIA
 void vp8_dequantize_b_v6(BLOCKD *d, short *DQC)
 {
     short *DQ  = d->dqcoeff;
     short *Q   = d->qcoeff;
 
     vp8_dequantize_b_loop_v6(Q, DQC, DQ);
 }
 #endif
--- a/media/libvpx/vp8/common/arm/loopfilter_arm.c
+++ b/media/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -29,21 +29,21 @@ extern prototype_loopfilter(vp8_mbloop_f
 typedef void loopfilter_y_neon(unsigned char *src, int pitch,
         unsigned char blimit, unsigned char limit, unsigned char thresh);
 typedef void loopfilter_uv_neon(unsigned char *u, int pitch,
         unsigned char blimit, unsigned char limit, unsigned char thresh,
         unsigned char *v);
 
 extern loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon;
 extern loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon;
+extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
+extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
+
 extern loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
 extern loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
-
-extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
-extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
 extern loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon;
 extern loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon;
 #endif
 
 #if HAVE_MEDIA
 /* ARMV6/MEDIA loopfilter functions*/
 /* Horizontal MB filtering */
 void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
--- a/media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
@@ -5,17 +5,17 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include <arm_neon.h>
 
-static const uint8_t bifilter4_coeff[8][2] = {
+static const uint16_t bifilter4_coeff[8][2] = {
     {128,   0},
     {112,  16},
     { 96,  32},
     { 80,  48},
     { 64,  64},
     { 48,  80},
     { 32,  96},
     { 16, 112}
@@ -25,28 +25,25 @@ void vp8_bilinear_predict4x4_neon(
         unsigned char *src_ptr,
         int src_pixels_per_line,
         int xoffset,
         int yoffset,
         unsigned char *dst_ptr,
         int dst_pitch) {
     uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8;
     uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8;
+    uint32x2_t d28u32, d29u32, d30u32;
     uint8x16_t q1u8, q2u8;
     uint16x8_t q1u16, q2u16;
     uint16x8_t q7u16, q8u16, q9u16;
     uint64x2_t q4u64, q5u64;
     uint64x1_t d12u64;
     uint32x2x2_t d0u32x2, d1u32x2, d2u32x2, d3u32x2;
 
     if (xoffset == 0) {  // skip_1stpass_filter
-        uint32x2_t d28u32 = vdup_n_u32(0);
-        uint32x2_t d29u32 = vdup_n_u32(0);
-        uint32x2_t d30u32 = vdup_n_u32(0);
-
         d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 0);
         src_ptr += src_pixels_per_line;
         d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 1);
         src_ptr += src_pixels_per_line;
         d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 0);
         src_ptr += src_pixels_per_line;
         d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 1);
         src_ptr += src_pixels_per_line;
@@ -59,18 +56,18 @@ void vp8_bilinear_predict4x4_neon(
         d3u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d4u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d5u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d6u8 = vld1_u8(src_ptr);
 
         q1u8 = vcombine_u8(d2u8, d3u8);
         q2u8 = vcombine_u8(d4u8, d5u8);
 
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
 
         q4u64  = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8);
         q5u64  = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8);
         d12u64 = vshr_n_u64(vreinterpret_u64_u8(d6u8), 8);
 
         d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)),
                            vreinterpret_u32_u8(vget_high_u8(q1u8)));
         d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)),
@@ -150,18 +147,18 @@ void vp8_bilinear_predict8x4_neon(
         d26u8 = vld1_u8(src_ptr);
     } else {
         q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q5u8 = vld1q_u8(src_ptr);
 
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
 
         q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
         q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
         q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
         q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
         q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
 
         d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
@@ -240,18 +237,18 @@ void vp8_bilinear_predict8x8_neon(
         d29u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d30u8 = vld1_u8(src_ptr);
     } else {
         q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
 
-        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
 
         q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
         q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
         q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
         q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
 
         d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
         d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
@@ -0,0 +1,584 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_build_intra_predictors_mby_neon_func|
+    EXPORT  |vp8_build_intra_predictors_mby_s_neon_func|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char *y_buffer
+; r1    unsigned char *ypred_ptr
+; r2    int y_stride
+; r3    int mode
+; stack int Up
+; stack int Left
+
+|vp8_build_intra_predictors_mby_neon_func| PROC
+    push            {r4-r8, lr}
+
+    cmp             r3, #0
+    beq             case_dc_pred
+    cmp             r3, #1
+    beq             case_v_pred
+    cmp             r3, #2
+    beq             case_h_pred
+    cmp             r3, #3
+    beq             case_tm_pred
+
+case_dc_pred
+    ldr             r4, [sp, #24]       ; Up
+    ldr             r5, [sp, #28]       ; Left
+
+    ; Default the DC average to 128
+    mov             r12, #128
+    vdup.u8         q0, r12
+
+    ; Zero out running sum
+    mov             r12, #0
+
+    ; compute shift and jump
+    adds            r7, r4, r5
+    beq             skip_dc_pred_up_left
+
+    ; Load above row, if it exists
+    cmp             r4, #0
+    beq             skip_dc_pred_up
+
+    sub             r6, r0, r2
+    vld1.8          {q1}, [r6]
+    vpaddl.u8       q2, q1
+    vpaddl.u16      q3, q2
+    vpaddl.u32      q4, q3
+
+    vmov.32         r4, d8[0]
+    vmov.32         r6, d9[0]
+
+    add             r12, r4, r6
+
+    ; Move back to interger registers
+
+skip_dc_pred_up
+
+    cmp             r5, #0
+    beq             skip_dc_pred_left
+
+    sub             r0, r0, #1
+
+    ; Load left row, if it exists
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0]
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+skip_dc_pred_left
+    add             r7, r7, #3          ; Shift
+    sub             r4, r7, #1
+    mov             r5, #1
+    add             r12, r12, r5, lsl r4
+    mov             r5, r12, lsr r7     ; expected_dc
+
+    vdup.u8         q0, r5
+
+skip_dc_pred_up_left
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+
+    pop             {r4-r8,pc}
+case_v_pred
+    ; Copy down above row
+    sub             r6, r0, r2
+    vld1.8          {q0}, [r6]
+
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    pop             {r4-r8,pc}
+
+case_h_pred
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    pop             {r4-r8,pc}
+
+case_tm_pred
+    ; Load yabove_row
+    sub             r3, r0, r2
+    vld1.8          {q8}, [r3]
+
+    ; Load ytop_left
+    sub             r3, r3, #1
+    ldrb            r7, [r3]
+
+    vdup.u16        q7, r7
+
+    ; Compute yabove_row - ytop_left
+    mov             r3, #1
+    vdup.u8         q0, r3
+
+    vmull.u8        q4, d16, d0
+    vmull.u8        q5, d17, d0
+
+    vsub.s16        q4, q4, q7
+    vsub.s16        q5, q5, q7
+
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+    mov             r12, #4
+
+case_tm_pred_loop
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u16        q0, r3
+    vdup.u16        q1, r4
+    vdup.u16        q2, r5
+    vdup.u16        q3, r6
+
+    vqadd.s16       q8, q0, q4
+    vqadd.s16       q9, q0, q5
+
+    vqadd.s16       q10, q1, q4
+    vqadd.s16       q11, q1, q5
+
+    vqadd.s16       q12, q2, q4
+    vqadd.s16       q13, q2, q5
+
+    vqadd.s16       q14, q3, q4
+    vqadd.s16       q15, q3, q5
+
+    vqshrun.s16     d0, q8, #0
+    vqshrun.s16     d1, q9, #0
+
+    vqshrun.s16     d2, q10, #0
+    vqshrun.s16     d3, q11, #0
+
+    vqshrun.s16     d4, q12, #0
+    vqshrun.s16     d5, q13, #0
+
+    vqshrun.s16     d6, q14, #0
+    vqshrun.s16     d7, q15, #0
+
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    subs            r12, r12, #1
+    bne             case_tm_pred_loop
+
+    pop             {r4-r8,pc}
+
+    ENDP
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; r0    unsigned char *y_buffer
+; r1    unsigned char *ypred_ptr
+; r2    int y_stride
+; r3    int mode
+; stack int Up
+; stack int Left
+
+|vp8_build_intra_predictors_mby_s_neon_func| PROC
+    push            {r4-r8, lr}
+
+    mov             r1, r0      ;   unsigned char *ypred_ptr = x->dst.y_buffer; //x->Predictor;
+
+    cmp             r3, #0
+    beq             case_dc_pred_s
+    cmp             r3, #1
+    beq             case_v_pred_s
+    cmp             r3, #2
+    beq             case_h_pred_s
+    cmp             r3, #3
+    beq             case_tm_pred_s
+
+case_dc_pred_s
+    ldr             r4, [sp, #24]       ; Up
+    ldr             r5, [sp, #28]       ; Left
+
+    ; Default the DC average to 128
+    mov             r12, #128
+    vdup.u8         q0, r12
+
+    ; Zero out running sum
+    mov             r12, #0
+
+    ; compute shift and jump
+    adds            r7, r4, r5
+    beq             skip_dc_pred_up_left_s
+
+    ; Load above row, if it exists
+    cmp             r4, #0
+    beq             skip_dc_pred_up_s
+
+    sub             r6, r0, r2
+    vld1.8          {q1}, [r6]
+    vpaddl.u8       q2, q1
+    vpaddl.u16      q3, q2
+    vpaddl.u32      q4, q3
+
+    vmov.32         r4, d8[0]
+    vmov.32         r6, d9[0]
+
+    add             r12, r4, r6
+
+    ; Move back to interger registers
+
+skip_dc_pred_up_s
+
+    cmp             r5, #0
+    beq             skip_dc_pred_left_s
+
+    sub             r0, r0, #1
+
+    ; Load left row, if it exists
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0]
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+skip_dc_pred_left_s
+    add             r7, r7, #3          ; Shift
+    sub             r4, r7, #1
+    mov             r5, #1
+    add             r12, r12, r5, lsl r4
+    mov             r5, r12, lsr r7     ; expected_dc
+
+    vdup.u8         q0, r5
+
+skip_dc_pred_up_left_s
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+
+    pop             {r4-r8,pc}
+case_v_pred_s
+    ; Copy down above row
+    sub             r6, r0, r2
+    vld1.8          {q0}, [r6]
+
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    pop             {r4-r8,pc}
+
+case_h_pred_s
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q1}, [r1], r2
+    vst1.u8         {q2}, [r1], r2
+    vst1.u8         {q3}, [r1], r2
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q1}, [r1], r2
+    vst1.u8         {q2}, [r1], r2
+    vst1.u8         {q3}, [r1], r2
+
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q1}, [r1], r2
+    vst1.u8         {q2}, [r1], r2
+    vst1.u8         {q3}, [r1], r2
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q1}, [r1], r2
+    vst1.u8         {q2}, [r1], r2
+    vst1.u8         {q3}, [r1], r2
+
+    pop             {r4-r8,pc}
+
+case_tm_pred_s
+    ; Load yabove_row
+    sub             r3, r0, r2
+    vld1.8          {q8}, [r3]
+
+    ; Load ytop_left
+    sub             r3, r3, #1
+    ldrb            r7, [r3]
+
+    vdup.u16        q7, r7
+
+    ; Compute yabove_row - ytop_left
+    mov             r3, #1
+    vdup.u8         q0, r3
+
+    vmull.u8        q4, d16, d0
+    vmull.u8        q5, d17, d0
+
+    vsub.s16        q4, q4, q7
+    vsub.s16        q5, q5, q7
+
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+    mov             r12, #4
+
+case_tm_pred_loop_s
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u16        q0, r3
+    vdup.u16        q1, r4
+    vdup.u16        q2, r5
+    vdup.u16        q3, r6
+
+    vqadd.s16       q8, q0, q4
+    vqadd.s16       q9, q0, q5
+
+    vqadd.s16       q10, q1, q4
+    vqadd.s16       q11, q1, q5
+
+    vqadd.s16       q12, q2, q4
+    vqadd.s16       q13, q2, q5
+
+    vqadd.s16       q14, q3, q4
+    vqadd.s16       q15, q3, q5
+
+    vqshrun.s16     d0, q8, #0
+    vqshrun.s16     d1, q9, #0
+
+    vqshrun.s16     d2, q10, #0
+    vqshrun.s16     d3, q11, #0
+
+    vqshrun.s16     d4, q12, #0
+    vqshrun.s16     d5, q13, #0
+
+    vqshrun.s16     d6, q14, #0
+    vqshrun.s16     d7, q15, #0
+
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q1}, [r1], r2
+    vst1.u8         {q2}, [r1], r2
+    vst1.u8         {q3}, [r1], r2
+
+    subs            r12, r12, #1
+    bne             case_tm_pred_loop_s
+
+    pop             {r4-r8,pc}
+
+    ENDP
+
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/copymem16x16_neon.asm
@@ -0,0 +1,59 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem16x16_neon|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void copy_mem16x16_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem16x16_neon| PROC
+
+    vld1.u8     {q0}, [r0], r1
+    vld1.u8     {q1}, [r0], r1
+    vld1.u8     {q2}, [r0], r1
+    vst1.u8     {q0}, [r2], r3
+    vld1.u8     {q3}, [r0], r1
+    vst1.u8     {q1}, [r2], r3
+    vld1.u8     {q4}, [r0], r1
+    vst1.u8     {q2}, [r2], r3
+    vld1.u8     {q5}, [r0], r1
+    vst1.u8     {q3}, [r2], r3
+    vld1.u8     {q6}, [r0], r1
+    vst1.u8     {q4}, [r2], r3
+    vld1.u8     {q7}, [r0], r1
+    vst1.u8     {q5}, [r2], r3
+    vld1.u8     {q8}, [r0], r1
+    vst1.u8     {q6}, [r2], r3
+    vld1.u8     {q9}, [r0], r1
+    vst1.u8     {q7}, [r2], r3
+    vld1.u8     {q10}, [r0], r1
+    vst1.u8     {q8}, [r2], r3
+    vld1.u8     {q11}, [r0], r1
+    vst1.u8     {q9}, [r2], r3
+    vld1.u8     {q12}, [r0], r1
+    vst1.u8     {q10}, [r2], r3
+    vld1.u8     {q13}, [r0], r1
+    vst1.u8     {q11}, [r2], r3
+    vld1.u8     {q14}, [r0], r1
+    vst1.u8     {q12}, [r2], r3
+    vld1.u8     {q15}, [r0], r1
+    vst1.u8     {q13}, [r2], r3
+    vst1.u8     {q14}, [r2], r3
+    vst1.u8     {q15}, [r2], r3
+
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem16x16_neon|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/copymem8x4_neon.asm
@@ -0,0 +1,34 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem8x4_neon|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void copy_mem8x4_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem8x4_neon| PROC
+    vld1.u8     {d0}, [r0], r1
+    vld1.u8     {d1}, [r0], r1
+    vst1.u8     {d0}, [r2], r3
+    vld1.u8     {d2}, [r0], r1
+    vst1.u8     {d1}, [r2], r3
+    vld1.u8     {d3}, [r0], r1
+    vst1.u8     {d2}, [r2], r3
+    vst1.u8     {d3}, [r2], r3
+
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem8x4_neon|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/copymem8x8_neon.asm
@@ -0,0 +1,43 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem8x8_neon|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void copy_mem8x8_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem8x8_neon| PROC
+
+    vld1.u8     {d0}, [r0], r1
+    vld1.u8     {d1}, [r0], r1
+    vst1.u8     {d0}, [r2], r3
+    vld1.u8     {d2}, [r0], r1
+    vst1.u8     {d1}, [r2], r3
+    vld1.u8     {d3}, [r0], r1
+    vst1.u8     {d2}, [r2], r3
+    vld1.u8     {d4}, [r0], r1
+    vst1.u8     {d3}, [r2], r3
+    vld1.u8     {d5}, [r0], r1
+    vst1.u8     {d4}, [r2], r3
+    vld1.u8     {d6}, [r0], r1
+    vst1.u8     {d5}, [r2], r3
+    vld1.u8     {d7}, [r0], r1
+    vst1.u8     {d6}, [r2], r3
+    vst1.u8     {d7}, [r2], r3
+
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem8x8_neon|
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/copymem_neon.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_copy_mem8x4_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    uint8x8_t vtmp;
-    int r;
-
-    for (r = 0; r < 4; r++) {
-        vtmp = vld1_u8(src);
-        vst1_u8(dst, vtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
-
-void vp8_copy_mem8x8_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    uint8x8_t vtmp;
-    int r;
-
-    for (r = 0; r < 8; r++) {
-        vtmp = vld1_u8(src);
-        vst1_u8(dst, vtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
-
-void vp8_copy_mem16x16_neon(
-        unsigned char *src,
-        int src_stride,
-        unsigned char *dst,
-        int dst_stride) {
-    int r;
-    uint8x16_t qtmp;
-
-    for (r = 0; r < 16; r++) {
-        qtmp = vld1q_u8(src);
-        vst1q_u8(dst, qtmp);
-        src += src_stride;
-        dst += dst_stride;
-    }
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.asm
@@ -0,0 +1,54 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license and patent
+;  grant that can be found in the LICENSE file in the root of the source
+;  tree. All contributing project authors may be found in the AUTHORS
+;  file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_dc_only_idct_add_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+;void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
+;                            int pred_stride, unsigned char *dst_ptr,
+;                            int dst_stride)
+
+; r0  input_dc
+; r1  pred_ptr
+; r2  pred_stride
+; r3  dst_ptr
+; sp  dst_stride
+
+|vp8_dc_only_idct_add_neon| PROC
+    add             r0, r0, #4
+    asr             r0, r0, #3
+    ldr             r12, [sp]
+    vdup.16         q0, r0
+
+    vld1.32         {d2[0]}, [r1], r2
+    vld1.32         {d2[1]}, [r1], r2
+    vld1.32         {d4[0]}, [r1], r2
+    vld1.32         {d4[1]}, [r1]
+
+    vaddw.u8        q1, q0, d2
+    vaddw.u8        q2, q0, d4
+
+    vqmovun.s16     d2, q1
+    vqmovun.s16     d4, q2
+
+    vst1.32         {d2[0]}, [r3], r12
+    vst1.32         {d2[1]}, [r3], r12
+    vst1.32         {d4[0]}, [r3], r12
+    vst1.32         {d4[1]}, [r3]
+
+    bx              lr
+
+    ENDP
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_dc_only_idct_add_neon(
-        int16_t input_dc,
-        unsigned char *pred_ptr,
-        int pred_stride,
-        unsigned char *dst_ptr,
-        int dst_stride) {
-    int i;
-    uint16_t a1 = ((input_dc + 4) >> 3);
-    uint32x2_t d2u32 = vdup_n_u32(0);
-    uint8x8_t d2u8;
-    uint16x8_t q1u16;
-    uint16x8_t qAdd;
-
-    qAdd = vdupq_n_u16(a1);
-
-    for (i = 0; i < 2; i++) {
-        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
-        pred_ptr += pred_stride;
-        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);
-        pred_ptr += pred_stride;
-
-        q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32));
-        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
-
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
-        dst_ptr += dst_stride;
-        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
-        dst_ptr += dst_stride;
-    }
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/dequant_idct_neon.asm
@@ -0,0 +1,131 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_dequant_idct_add_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+;void vp8_dequant_idct_add_neon(short *input, short *dq,
+;                           unsigned char *dest, int stride)
+; r0    short *input,
+; r1    short *dq,
+; r2    unsigned char *dest
+; r3    int stride
+
+|vp8_dequant_idct_add_neon| PROC
+    vld1.16         {q3, q4}, [r0]
+    vld1.16         {q5, q6}, [r1]
+
+    add             r1, r2, r3              ; r1 = dest + stride
+    lsl             r3, #1                  ; 2x stride
+
+    vld1.32         {d14[0]}, [r2], r3
+    vld1.32         {d14[1]}, [r1], r3
+    vld1.32         {d15[0]}, [r2]
+    vld1.32         {d15[1]}, [r1]
+
+    adr             r12, cospi8sqrt2minus1  ; pointer to the first constant
+
+    vmul.i16        q1, q3, q5              ;input for short_idct4x4llm_neon
+    vmul.i16        q2, q4, q6
+
+;|short_idct4x4llm_neon| PROC
+    vld1.16         {d0}, [r12]
+    vswp            d3, d4                  ;q2(vp[4] vp[12])
+
+    vqdmulh.s16     q3, q2, d0[2]
+    vqdmulh.s16     q4, q2, d0[0]
+
+    vqadd.s16       d12, d2, d3             ;a1
+    vqsub.s16       d13, d2, d3             ;b1
+
+    vshr.s16        q3, q3, #1
+    vshr.s16        q4, q4, #1
+
+    vqadd.s16       q3, q3, q2
+    vqadd.s16       q4, q4, q2
+
+    vqsub.s16       d10, d6, d9             ;c1
+    vqadd.s16       d11, d7, d8             ;d1
+
+    vqadd.s16       d2, d12, d11
+    vqadd.s16       d3, d13, d10
+    vqsub.s16       d4, d13, d10
+    vqsub.s16       d5, d12, d11
+
+    vtrn.32         d2, d4
+    vtrn.32         d3, d5
+    vtrn.16         d2, d3
+    vtrn.16         d4, d5
+
+; memset(input, 0, 32) -- 32bytes
+    vmov.i16        q14, #0
+
+    vswp            d3, d4
+    vqdmulh.s16     q3, q2, d0[2]
+    vqdmulh.s16     q4, q2, d0[0]
+
+    vqadd.s16       d12, d2, d3             ;a1
+    vqsub.s16       d13, d2, d3             ;b1
+
+    vmov            q15, q14
+
+    vshr.s16        q3, q3, #1
+    vshr.s16        q4, q4, #1
+
+    vqadd.s16       q3, q3, q2
+    vqadd.s16       q4, q4, q2
+
+    vqsub.s16       d10, d6, d9             ;c1
+    vqadd.s16       d11, d7, d8             ;d1
+
+    vqadd.s16       d2, d12, d11
+    vqadd.s16       d3, d13, d10
+    vqsub.s16       d4, d13, d10
+    vqsub.s16       d5, d12, d11
+
+    vst1.16         {q14, q15}, [r0]
+
+    vrshr.s16       d2, d2, #3
+    vrshr.s16       d3, d3, #3
+    vrshr.s16       d4, d4, #3
+    vrshr.s16       d5, d5, #3
+
+    vtrn.32         d2, d4
+    vtrn.32         d3, d5
+    vtrn.16         d2, d3
+    vtrn.16         d4, d5
+
+    vaddw.u8        q1, q1, d14
+    vaddw.u8        q2, q2, d15
+
+    sub             r2, r2, r3
+    sub             r1, r1, r3
+
+    vqmovun.s16     d0, q1
+    vqmovun.s16     d1, q2
+
+    vst1.32         {d0[0]}, [r2], r3
+    vst1.32         {d0[1]}, [r1], r3
+    vst1.32         {d1[0]}, [r2]
+    vst1.32         {d1[1]}, [r1]
+
+    bx             lr
+
+    ENDP           ; |vp8_dequant_idct_add_neon|
+
+; Constant Pool
+cospi8sqrt2minus1 DCD 0x4e7b4e7b
+sinpi8sqrt2       DCD 0x8a8c8a8c
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const int16_t cospi8sqrt2minus1 = 20091;
-static const int16_t sinpi8sqrt2       = 35468;
-
-void vp8_dequant_idct_add_neon(
-        int16_t *input,
-        int16_t *dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0;
-    int32x2_t d14, d15;
-    int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
-    int16x8_t q1, q2, q3, q4, q5, q6;
-    int16x8_t qEmpty = vdupq_n_s16(0);
-    int32x2x2_t d2tmp0, d2tmp1;
-    int16x4x2_t d2tmp2, d2tmp3;
-
-    d14 = d15 = vdup_n_s32(0);
-
-    // load input
-    q3 = vld1q_s16(input);
-    vst1q_s16(input, qEmpty);
-    input += 8;
-    q4 = vld1q_s16(input);
-    vst1q_s16(input, qEmpty);
-
-    // load dq
-    q5 = vld1q_s16(dq);
-    dq += 8;
-    q6 = vld1q_s16(dq);
-
-    // load src from dst
-    dst0 = dst;
-    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
-    dst0 += stride;
-    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
-    dst0 += stride;
-    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
-    dst0 += stride;
-    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
-
-    q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3),
-                                         vreinterpretq_u16_s16(q5)));
-    q2 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q4),
-                                         vreinterpretq_u16_s16(q6)));
-
-    d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
-    d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
-
-    q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
-
-    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
-    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
-
-    q3 = vshrq_n_s16(q3, 1);
-    q4 = vshrq_n_s16(q4, 1);
-
-    q3 = vqaddq_s16(q3, q2);
-    q4 = vqaddq_s16(q4, q2);
-
-    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
-    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
-                      vreinterpret_s16_s32(d2tmp1.val[0]));
-    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
-                      vreinterpret_s16_s32(d2tmp1.val[1]));
-
-    // loop 2
-    q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
-
-    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
-    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
-
-    d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
-    d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
-
-    q3 = vshrq_n_s16(q3, 1);
-    q4 = vshrq_n_s16(q4, 1);
-
-    q3 = vqaddq_s16(q3, q2);
-    q4 = vqaddq_s16(q4, q2);
-
-    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
-    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
-
-    d2 = vqadd_s16(d12, d11);
-    d3 = vqadd_s16(d13, d10);
-    d4 = vqsub_s16(d13, d10);
-    d5 = vqsub_s16(d12, d11);
-
-    d2 = vrshr_n_s16(d2, 3);
-    d3 = vrshr_n_s16(d3, 3);
-    d4 = vrshr_n_s16(d4, 3);
-    d5 = vrshr_n_s16(d5, 3);
-
-    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
-    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
-    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
-                      vreinterpret_s16_s32(d2tmp1.val[0]));
-    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
-                      vreinterpret_s16_s32(d2tmp1.val[1]));
-
-    q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
-    q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
-
-    q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1),
-                                        vreinterpret_u8_s32(d14)));
-    q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2),
-                                        vreinterpret_u8_s32(d15)));
-
-    d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
-    d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
-
-    dst0 = dst;
-    vst1_lane_s32((int32_t *)dst0, d14, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d14, 1);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d15, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst0, d15, 1);
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/dequantizeb_neon.asm
@@ -0,0 +1,34 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_dequantize_b_loop_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    short *Q,
+; r1    short *DQC
+; r2    short *DQ
+|vp8_dequantize_b_loop_neon| PROC
+    vld1.16         {q0, q1}, [r0]
+    vld1.16         {q2, q3}, [r1]
+
+    vmul.i16        q4, q0, q2
+    vmul.i16        q5, q1, q3
+
+    vst1.16         {q4, q5}, [r2]
+
+    bx             lr
+
+    ENDP
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-#include "vp8/common/blockd.h"
-
-void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
-    int16x8x2_t qQ, qDQC, qDQ;
-
-    qQ   = vld2q_s16(d->qcoeff);
-    qDQC = vld2q_s16(DQC);
-
-    qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
-    qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
-
-    vst2q_s16(d->dqcoeff, qDQ);
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.asm
@@ -0,0 +1,79 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license and patent
+;  grant that can be found in the LICENSE file in the root of the source
+;  tree. All contributing project authors may be found in the AUTHORS
+;  file in the root of the source tree.
+;
+
+
+    EXPORT  |idct_dequant_0_2x_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+;void idct_dequant_0_2x_neon(short *q, short dq,
+;                            unsigned char *dst, int stride);
+; r0   *q
+; r1   dq
+; r2   *dst
+; r3   stride
+|idct_dequant_0_2x_neon| PROC
+    push            {r4, r5}
+
+    add             r12, r2, #4
+    vld1.32         {d2[0]}, [r2], r3
+    vld1.32         {d8[0]}, [r12], r3
+    vld1.32         {d2[1]}, [r2], r3
+    vld1.32         {d8[1]}, [r12], r3
+    vld1.32         {d4[0]}, [r2], r3
+    vld1.32         {d10[0]}, [r12], r3
+    vld1.32         {d4[1]}, [r2], r3
+    vld1.32         {d10[1]}, [r12], r3
+
+    ldrh            r12, [r0]               ; lo q
+    ldrh            r4, [r0, #32]           ; hi q
+    mov             r5, #0
+    strh            r5, [r0]
+    strh            r5, [r0, #32]
+
+    sxth            r12, r12                ; lo
+    mul             r0, r12, r1
+    add             r0, r0, #4
+    asr             r0, r0, #3
+    vdup.16         q0, r0
+    sxth            r4, r4                  ; hi
+    mul             r0, r4, r1
+    add             r0, r0, #4
+    asr             r0, r0, #3
+    vdup.16         q3, r0
+
+    vaddw.u8        q1, q0, d2              ; lo
+    vaddw.u8        q2, q0, d4
+    vaddw.u8        q4, q3, d8              ; hi
+    vaddw.u8        q5, q3, d10
+
+    sub             r2, r2, r3, lsl #2      ; dst - 4*stride
+    add             r0, r2, #4
+
+    vqmovun.s16     d2, q1                  ; lo
+    vqmovun.s16     d4, q2
+    vqmovun.s16     d8, q4                  ; hi
+    vqmovun.s16     d10, q5
+
+    vst1.32         {d2[0]}, [r2], r3       ; lo
+    vst1.32         {d8[0]}, [r0], r3       ; hi
+    vst1.32         {d2[1]}, [r2], r3
+    vst1.32         {d8[1]}, [r0], r3
+    vst1.32         {d4[0]}, [r2], r3
+    vst1.32         {d10[0]}, [r0], r3
+    vst1.32         {d4[1]}, [r2]
+    vst1.32         {d10[1]}, [r0]
+
+    pop             {r4, r5}
+    bx              lr
+
+    ENDP            ; |idct_dequant_0_2x_neon|
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void idct_dequant_0_2x_neon(
-        int16_t *q,
-        int16_t dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0;
-    int i, a0, a1;
-    int16x8x2_t q2Add;
-    int32x2_t d2s32, d4s32;
-    uint8x8_t d2u8, d4u8;
-    uint16x8_t q1u16, q2u16;
-
-    a0 = ((q[0] * dq) + 4) >> 3;
-    a1 = ((q[16] * dq) + 4) >> 3;
-    q[0] = q[16] = 0;
-    q2Add.val[0] = vdupq_n_s16((int16_t)a0);
-    q2Add.val[1] = vdupq_n_s16((int16_t)a1);
-
-    for (i = 0; i < 2; i++, dst += 4) {
-        dst0 = dst;
-        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
-        dst0 += stride;
-        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
-        dst0 += stride;
-        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
-        dst0 += stride;
-        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
-
-        q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
-                         vreinterpret_u8_s32(d2s32));
-        q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
-                         vreinterpret_u8_s32(d4s32));
-
-        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
-        d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
-
-        d2s32 = vreinterpret_s32_u8(d2u8);
-        d4s32 = vreinterpret_s32_u8(d4u8);
-
-        dst0 = dst;
-        vst1_lane_s32((int32_t *)dst0, d2s32, 0);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d2s32, 1);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d4s32, 0);
-        dst0 += stride;
-        vst1_lane_s32((int32_t *)dst0, d4s32, 1);
-    }
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.asm
@@ -0,0 +1,196 @@
+;
+;  Copyright (c) 2010 The Webm project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |idct_dequant_full_2x_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+;void idct_dequant_full_2x_neon(short *q, short *dq,
+;                               unsigned char *dst, int stride);
+; r0    *q,
+; r1    *dq,
+; r2    *dst
+; r3    stride
+|idct_dequant_full_2x_neon| PROC
+    vld1.16         {q0, q1}, [r1]          ; dq (same l/r)
+    vld1.16         {q2, q3}, [r0]          ; l q
+    add             r0, r0, #32
+    vld1.16         {q4, q5}, [r0]          ; r q
+    add             r12, r2, #4
+
+    ; interleave the predictors
+    vld1.32         {d28[0]}, [r2],  r3     ; l pre
+    vld1.32         {d28[1]}, [r12], r3     ; r pre
+    vld1.32         {d29[0]}, [r2],  r3
+    vld1.32         {d29[1]}, [r12], r3
+    vld1.32         {d30[0]}, [r2],  r3
+    vld1.32         {d30[1]}, [r12], r3
+    vld1.32         {d31[0]}, [r2],  r3
+    vld1.32         {d31[1]}, [r12]
+
+    adr             r1, cospi8sqrt2minus1   ; pointer to the first constant
+
+    ; dequant: q[i] = q[i] * dq[i]
+    vmul.i16        q2, q2, q0
+    vmul.i16        q3, q3, q1
+    vmul.i16        q4, q4, q0
+    vmul.i16        q5, q5, q1
+
+    vld1.16         {d0}, [r1]
+
+    ; q2: l0r0  q3: l8r8
+    ; q4: l4r4  q5: l12r12
+    vswp            d5, d8
+    vswp            d7, d10
+
+    ; _CONSTANTS_ * 4,12 >> 16
+    ; q6:  4 * sinpi : c1/temp1
+    ; q7: 12 * sinpi : d1/temp2
+    ; q8:  4 * cospi
+    ; q9: 12 * cospi
+    vqdmulh.s16     q6, q4, d0[2]           ; sinpi8sqrt2
+    vqdmulh.s16     q7, q5, d0[2]
+    vqdmulh.s16     q8, q4, d0[0]           ; cospi8sqrt2minus1
+    vqdmulh.s16     q9, q5, d0[0]
+
+    vqadd.s16       q10, q2, q3             ; a1 = 0 + 8
+    vqsub.s16       q11, q2, q3             ; b1 = 0 - 8
+
+    ; vqdmulh only accepts signed values. this was a problem because
+    ; our constant had the high bit set, and was treated as a negative value.
+    ; vqdmulh also doubles the value before it shifts by 16. we need to
+    ; compensate for this. in the case of sinpi8sqrt2, the lowest bit is 0,
+    ; so we can shift the constant without losing precision. this avoids
+    ; shift again afterward, but also avoids the sign issue. win win!
+    ; for cospi8sqrt2minus1 the lowest bit is 1, so we lose precision if we
+    ; pre-shift it
+    vshr.s16        q8, q8, #1
+    vshr.s16        q9, q9, #1
+
+    ; q4:  4 +  4 * cospi : d1/temp1
+    ; q5: 12 + 12 * cospi : c1/temp2
+    vqadd.s16       q4, q4, q8
+    vqadd.s16       q5, q5, q9
+
+    ; c1 = temp1 - temp2
+    ; d1 = temp1 + temp2
+    vqsub.s16       q2, q6, q5
+    vqadd.s16       q3, q4, q7
+
+    ; [0]: a1+d1
+    ; [1]: b1+c1
+    ; [2]: b1-c1
+    ; [3]: a1-d1
+    vqadd.s16       q4, q10, q3
+    vqadd.s16       q5, q11, q2
+    vqsub.s16       q6, q11, q2
+    vqsub.s16       q7, q10, q3
+
+    ; rotate
+    vtrn.32         q4, q6
+    vtrn.32         q5, q7
+    vtrn.16         q4, q5
+    vtrn.16         q6, q7
+    ; idct loop 2
+    ; q4: l 0, 4, 8,12 r 0, 4, 8,12
+    ; q5: l 1, 5, 9,13 r 1, 5, 9,13
+    ; q6: l 2, 6,10,14 r 2, 6,10,14
+    ; q7: l 3, 7,11,15 r 3, 7,11,15
+
+    ; q8:  1 * sinpi : c1/temp1
+    ; q9:  3 * sinpi : d1/temp2
+    ; q10: 1 * cospi
+    ; q11: 3 * cospi
+    vqdmulh.s16     q8, q5, d0[2]           ; sinpi8sqrt2
+    vqdmulh.s16     q9, q7, d0[2]
+    vqdmulh.s16     q10, q5, d0[0]          ; cospi8sqrt2minus1
+    vqdmulh.s16     q11, q7, d0[0]
+
+    vqadd.s16       q2, q4, q6             ; a1 = 0 + 2
+    vqsub.s16       q3, q4, q6             ; b1 = 0 - 2
+
+    ; see note on shifting above
+    vshr.s16        q10, q10, #1
+    vshr.s16        q11, q11, #1
+
+    ; q10: 1 + 1 * cospi : d1/temp1
+    ; q11: 3 + 3 * cospi : c1/temp2
+    vqadd.s16       q10, q5, q10
+    vqadd.s16       q11, q7, q11
+
+    ; q8: c1 = temp1 - temp2
+    ; q9: d1 = temp1 + temp2
+    vqsub.s16       q8, q8, q11
+    vqadd.s16       q9, q10, q9
+
+    ; a1+d1
+    ; b1+c1
+    ; b1-c1
+    ; a1-d1
+    vqadd.s16       q4, q2, q9
+    vqadd.s16       q5, q3, q8
+    vqsub.s16       q6, q3, q8
+    vqsub.s16       q7, q2, q9
+
+    ; +4 >> 3 (rounding)
+    vrshr.s16       q4, q4, #3              ; lo
+    vrshr.s16       q5, q5, #3
+    vrshr.s16       q6, q6, #3              ; hi
+    vrshr.s16       q7, q7, #3
+
+    vtrn.32         q4, q6
+    vtrn.32         q5, q7
+    vtrn.16         q4, q5
+    vtrn.16         q6, q7
+
+    ; adding pre
+    ; input is still packed. pre was read interleaved
+    vaddw.u8        q4, q4, d28
+    vaddw.u8        q5, q5, d29
+    vaddw.u8        q6, q6, d30
+    vaddw.u8        q7, q7, d31
+
+    vmov.i16        q14, #0
+    vmov            q15, q14
+    vst1.16         {q14, q15}, [r0]        ; write over high input
+    sub             r0, r0, #32
+    vst1.16         {q14, q15}, [r0]        ; write over low input
+
+    sub             r2, r2, r3, lsl #2      ; dst - 4*stride
+    add             r1, r2, #4              ; hi
+
+    ;saturate and narrow
+    vqmovun.s16     d0, q4                  ; lo
+    vqmovun.s16     d1, q5
+    vqmovun.s16     d2, q6                  ; hi
+    vqmovun.s16     d3, q7
+
+    vst1.32         {d0[0]}, [r2], r3       ; lo
+    vst1.32         {d0[1]}, [r1], r3       ; hi
+    vst1.32         {d1[0]}, [r2], r3
+    vst1.32         {d1[1]}, [r1], r3
+    vst1.32         {d2[0]}, [r2], r3
+    vst1.32         {d2[1]}, [r1], r3
+    vst1.32         {d3[0]}, [r2]
+    vst1.32         {d3[1]}, [r1]
+
+    bx             lr
+
+    ENDP           ; |idct_dequant_full_2x_neon|
+
+; Constant Pool
+cospi8sqrt2minus1 DCD 0x4e7b
+; because the lowest bit in 0x8a8c is 0, we can pre-shift this
+sinpi8sqrt2       DCD 0x4546
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-static const int16_t cospi8sqrt2minus1 = 20091;
-static const int16_t sinpi8sqrt2       = 17734;
-// because the lowest bit in 0x8a8c is 0, we can pre-shift this
-
-void idct_dequant_full_2x_neon(
-        int16_t *q,
-        int16_t *dq,
-        unsigned char *dst,
-        int stride) {
-    unsigned char *dst0, *dst1;
-    int32x2_t d28, d29, d30, d31;
-    int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
-    int16x8_t qEmpty = vdupq_n_s16(0);
-    int32x4x2_t q2tmp0, q2tmp1;
-    int16x8x2_t q2tmp2, q2tmp3;
-    int16x4_t dLow0, dLow1, dHigh0, dHigh1;
-
-    d28 = d29 = d30 = d31 = vdup_n_s32(0);
-
-    // load dq
-    q0 = vld1q_s16(dq);
-    dq += 8;
-    q1 = vld1q_s16(dq);
-
-    // load q
-    q2 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q3 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q4 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-    q += 8;
-    q5 = vld1q_s16(q);
-    vst1q_s16(q, qEmpty);
-
-    // load src from dst
-    dst0 = dst;
-    dst1 = dst + 4;
-    d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
-    dst0 += stride;
-    d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
-    dst1 += stride;
-    d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
-    dst0 += stride;
-    d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
-    dst1 += stride;
-
-    d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
-    dst0 += stride;
-    d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
-    dst1 += stride;
-    d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
-    d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
-
-    q2 = vmulq_s16(q2, q0);
-    q3 = vmulq_s16(q3, q1);
-    q4 = vmulq_s16(q4, q0);
-    q5 = vmulq_s16(q5, q1);
-
-    // vswp
-    dLow0 = vget_low_s16(q2);
-    dHigh0 = vget_high_s16(q2);
-    dLow1 = vget_low_s16(q4);
-    dHigh1 = vget_high_s16(q4);
-    q2 = vcombine_s16(dLow0, dLow1);
-    q4 = vcombine_s16(dHigh0, dHigh1);
-
-    dLow0 = vget_low_s16(q3);
-    dHigh0 = vget_high_s16(q3);
-    dLow1 = vget_low_s16(q5);
-    dHigh1 = vget_high_s16(q5);
-    q3 = vcombine_s16(dLow0, dLow1);
-    q5 = vcombine_s16(dHigh0, dHigh1);
-
-    q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
-    q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
-    q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
-    q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
-
-    q10 = vqaddq_s16(q2, q3);
-    q11 = vqsubq_s16(q2, q3);
-
-    q8 = vshrq_n_s16(q8, 1);
-    q9 = vshrq_n_s16(q9, 1);
-
-    q4 = vqaddq_s16(q4, q8);
-    q5 = vqaddq_s16(q5, q9);
-
-    q2 = vqsubq_s16(q6, q5);
-    q3 = vqaddq_s16(q7, q4);
-
-    q4 = vqaddq_s16(q10, q3);
-    q5 = vqaddq_s16(q11, q2);
-    q6 = vqsubq_s16(q11, q2);
-    q7 = vqsubq_s16(q10, q3);
-
-    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
-    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
-    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
-                       vreinterpretq_s16_s32(q2tmp1.val[0]));
-    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
-                       vreinterpretq_s16_s32(q2tmp1.val[1]));
-
-    // loop 2
-    q8  = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
-    q9  = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
-    q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
-    q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
-
-    q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
-    q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
-
-    q10 = vshrq_n_s16(q10, 1);
-    q11 = vshrq_n_s16(q11, 1);
-
-    q10 = vqaddq_s16(q2tmp2.val[1], q10);
-    q11 = vqaddq_s16(q2tmp3.val[1], q11);
-
-    q8 = vqsubq_s16(q8, q11);
-    q9 = vqaddq_s16(q9, q10);
-
-    q4 = vqaddq_s16(q2, q9);
-    q5 = vqaddq_s16(q3, q8);
-    q6 = vqsubq_s16(q3, q8);
-    q7 = vqsubq_s16(q2, q9);
-
-    q4 = vrshrq_n_s16(q4, 3);
-    q5 = vrshrq_n_s16(q5, 3);
-    q6 = vrshrq_n_s16(q6, 3);
-    q7 = vrshrq_n_s16(q7, 3);
-
-    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
-    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
-    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
-                       vreinterpretq_s16_s32(q2tmp1.val[0]));
-    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
-                       vreinterpretq_s16_s32(q2tmp1.val[1]));
-
-    q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]),
-                                          vreinterpret_u8_s32(d28)));
-    q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]),
-                                          vreinterpret_u8_s32(d29)));
-    q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]),
-                                          vreinterpret_u8_s32(d30)));
-    q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]),
-                                          vreinterpret_u8_s32(d31)));
-
-    d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
-    d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
-    d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
-    d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
-
-    dst0 = dst;
-    dst1 = dst + 4;
-    vst1_lane_s32((int32_t *)dst0, d28, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d28, 1);
-    dst1 += stride;
-    vst1_lane_s32((int32_t *)dst0, d29, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d29, 1);
-    dst1 += stride;
-
-    vst1_lane_s32((int32_t *)dst0, d30, 0);
-    dst0 += stride;
-    vst1_lane_s32((int32_t *)dst1, d30, 1);
-    dst1 += stride;
-    vst1_lane_s32((int32_t *)dst0, d31, 0);
-    vst1_lane_s32((int32_t *)dst1, d31, 1);
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/iwalsh_neon.asm
@@ -0,0 +1,87 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+    EXPORT  |vp8_short_inv_walsh4x4_neon|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+
+;short vp8_short_inv_walsh4x4_neon(short *input, short *mb_dqcoeff)
+|vp8_short_inv_walsh4x4_neon| PROC
+
+    ; read in all four lines of values: d0->d3
+    vld1.i16 {q0-q1}, [r0@128]
+
+    ; first for loop
+    vadd.s16 d4, d0, d3 ;a = [0] + [12]
+    vadd.s16 d6, d1, d2 ;b = [4] + [8]
+    vsub.s16 d5, d0, d3 ;d = [0] - [12]
+    vsub.s16 d7, d1, d2 ;c = [4] - [8]
+
+    vadd.s16 q0, q2, q3 ; a+b d+c
+    vsub.s16 q1, q2, q3 ; a-b d-c
+
+    vtrn.32 d0, d2 ;d0:  0  1  8  9
+                   ;d2:  2  3 10 11
+    vtrn.32 d1, d3 ;d1:  4  5 12 13
+                   ;d3:  6  7 14 15
+
+    vtrn.16 d0, d1 ;d0:  0  4  8 12
+                   ;d1:  1  5  9 13
+    vtrn.16 d2, d3 ;d2:  2  6 10 14
+                   ;d3:  3  7 11 15
+
+    ; second for loop
+
+    vadd.s16 d4, d0, d3 ;a = [0] + [3]
+    vadd.s16 d6, d1, d2 ;b = [1] + [2]
+    vsub.s16 d5, d0, d3 ;d = [0] - [3]
+    vsub.s16 d7, d1, d2 ;c = [1] - [2]
+
+    vmov.i16 q8, #3
+
+    vadd.s16 q0, q2, q3 ; a+b d+c
+    vsub.s16 q1, q2, q3 ; a-b d-c
+
+    vadd.i16 q0, q0, q8 ;e/f += 3
+    vadd.i16 q1, q1, q8 ;g/h += 3
+
+    vshr.s16 q0, q0, #3 ;e/f >> 3
+    vshr.s16 q1, q1, #3 ;g/h >> 3
+
+    mov      r2, #64
+    add      r3, r1, #32
+
+    vst1.i16 d0[0], [r1],r2
+    vst1.i16 d1[0], [r3],r2
+    vst1.i16 d2[0], [r1],r2
+    vst1.i16 d3[0], [r3],r2
+
+    vst1.i16 d0[1], [r1],r2
+    vst1.i16 d1[1], [r3],r2
+    vst1.i16 d2[1], [r1],r2
+    vst1.i16 d3[1], [r3],r2
+
+    vst1.i16 d0[2], [r1],r2
+    vst1.i16 d1[2], [r3],r2
+    vst1.i16 d2[2], [r1],r2
+    vst1.i16 d3[2], [r3],r2
+
+    vst1.i16 d0[3], [r1],r2
+    vst1.i16 d1[3], [r3],r2
+    vst1.i16 d2[3], [r1]
+    vst1.i16 d3[3], [r3]
+
+    bx lr
+    ENDP    ; |vp8_short_inv_walsh4x4_neon|
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-void vp8_short_inv_walsh4x4_neon(
-        int16_t *input,
-        int16_t *mb_dqcoeff) {
-    int16x8_t q0s16, q1s16, q2s16, q3s16;
-    int16x4_t d4s16, d5s16, d6s16, d7s16;
-    int16x4x2_t v2tmp0, v2tmp1;
-    int32x2x2_t v2tmp2, v2tmp3;
-    int16x8_t qAdd3;
-
-    q0s16 = vld1q_s16(input);
-    q1s16 = vld1q_s16(input + 8);
-
-    // 1st for loop
-    d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
-    d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
-    d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
-    d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
-
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-
-    q0s16 = vaddq_s16(q2s16, q3s16);
-    q1s16 = vsubq_s16(q2s16, q3s16);
-
-    v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)),
-                      vreinterpret_s32_s16(vget_low_s16(q1s16)));
-    v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)),
-                      vreinterpret_s32_s16(vget_high_s16(q1s16)));
-    v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),
-                      vreinterpret_s16_s32(v2tmp3.val[0]));
-    v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),
-                      vreinterpret_s16_s32(v2tmp3.val[1]));
-
-    // 2nd for loop
-    d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
-    d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
-    d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
-    d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
-    q2s16 = vcombine_s16(d4s16, d5s16);
-    q3s16 = vcombine_s16(d6s16, d7s16);
-
-    qAdd3 = vdupq_n_s16(3);
-
-    q0s16 = vaddq_s16(q2s16, q3s16);
-    q1s16 = vsubq_s16(q2s16, q3s16);
-
-    q0s16 = vaddq_s16(q0s16, qAdd3);
-    q1s16 = vaddq_s16(q1s16, qAdd3);
-
-    q0s16 = vshrq_n_s16(q0s16, 3);
-    q1s16 = vshrq_n_s16(q1s16, 3);
-
-    // store
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  0);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  1);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  2);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2);
-    mb_dqcoeff += 16;
-
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  3);
-    mb_dqcoeff += 16;
-    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3);
-    mb_dqcoeff += 16;
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfilter_neon.asm
@@ -0,0 +1,397 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_loop_filter_horizontal_edge_y_neon|
+    EXPORT  |vp8_loop_filter_horizontal_edge_uv_neon|
+    EXPORT  |vp8_loop_filter_vertical_edge_y_neon|
+    EXPORT  |vp8_loop_filter_vertical_edge_uv_neon|
+    ARM
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0    unsigned char *src
+; r1    int pitch
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+|vp8_loop_filter_horizontal_edge_y_neon| PROC
+    push        {lr}
+    vdup.u8     q0, r2                     ; duplicate blimit
+    vdup.u8     q1, r3                     ; duplicate limit
+    sub         r2, r0, r1, lsl #2         ; move src pointer down by 4 lines
+    ldr         r3, [sp, #4]               ; load thresh
+    add         r12, r2, r1
+    add         r1, r1, r1
+
+    vdup.u8     q2, r3                     ; duplicate thresh
+
+    vld1.u8     {q3}, [r2@128], r1              ; p3
+    vld1.u8     {q4}, [r12@128], r1             ; p2
+    vld1.u8     {q5}, [r2@128], r1              ; p1
+    vld1.u8     {q6}, [r12@128], r1             ; p0
+    vld1.u8     {q7}, [r2@128], r1              ; q0
+    vld1.u8     {q8}, [r12@128], r1             ; q1
+    vld1.u8     {q9}, [r2@128]                  ; q2
+    vld1.u8     {q10}, [r12@128]                ; q3
+
+    sub         r2, r2, r1, lsl #1
+    sub         r12, r12, r1, lsl #1
+
+    bl          vp8_loop_filter_neon
+
+    vst1.u8     {q5}, [r2@128], r1              ; store op1
+    vst1.u8     {q6}, [r12@128], r1             ; store op0
+    vst1.u8     {q7}, [r2@128], r1              ; store oq0
+    vst1.u8     {q8}, [r12@128], r1             ; store oq1
+
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
+
+
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+; sp+4  unsigned char *v
+|vp8_loop_filter_horizontal_edge_uv_neon| PROC
+    push        {lr}
+    vdup.u8     q0, r2                      ; duplicate blimit
+    vdup.u8     q1, r3                      ; duplicate limit
+    ldr         r12, [sp, #4]               ; load thresh
+    ldr         r2, [sp, #8]                ; load v ptr
+    vdup.u8     q2, r12                     ; duplicate thresh
+
+    sub         r3, r0, r1, lsl #2          ; move u pointer down by 4 lines
+    sub         r12, r2, r1, lsl #2         ; move v pointer down by 4 lines
+
+    vld1.u8     {d6}, [r3@64], r1              ; p3
+    vld1.u8     {d7}, [r12@64], r1             ; p3
+    vld1.u8     {d8}, [r3@64], r1              ; p2
+    vld1.u8     {d9}, [r12@64], r1             ; p2
+    vld1.u8     {d10}, [r3@64], r1             ; p1
+    vld1.u8     {d11}, [r12@64], r1            ; p1
+    vld1.u8     {d12}, [r3@64], r1             ; p0
+    vld1.u8     {d13}, [r12@64], r1            ; p0
+    vld1.u8     {d14}, [r3@64], r1             ; q0
+    vld1.u8     {d15}, [r12@64], r1            ; q0
+    vld1.u8     {d16}, [r3@64], r1             ; q1
+    vld1.u8     {d17}, [r12@64], r1            ; q1
+    vld1.u8     {d18}, [r3@64], r1             ; q2
+    vld1.u8     {d19}, [r12@64], r1            ; q2
+    vld1.u8     {d20}, [r3@64]                 ; q3
+    vld1.u8     {d21}, [r12@64]                ; q3
+
+    bl          vp8_loop_filter_neon
+
+    sub         r0, r0, r1, lsl #1
+    sub         r2, r2, r1, lsl #1
+
+    vst1.u8     {d10}, [r0@64], r1             ; store u op1
+    vst1.u8     {d11}, [r2@64], r1             ; store v op1
+    vst1.u8     {d12}, [r0@64], r1             ; store u op0
+    vst1.u8     {d13}, [r2@64], r1             ; store v op0
+    vst1.u8     {d14}, [r0@64], r1             ; store u oq0
+    vst1.u8     {d15}, [r2@64], r1             ; store v oq0
+    vst1.u8     {d16}, [r0@64]                 ; store u oq1
+    vst1.u8     {d17}, [r2@64]                 ; store v oq1
+
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_horizontal_edge_uv_neon|
+
+; void vp8_loop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
+;                                           const signed char *flimit,
+;                                           const signed char *limit,
+;                                           const signed char *thresh,
+;                                           int count)
+; r0    unsigned char *src
+; r1    int pitch
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+
+|vp8_loop_filter_vertical_edge_y_neon| PROC
+    push        {lr}
+    vdup.u8     q0, r2                     ; duplicate blimit
+    vdup.u8     q1, r3                     ; duplicate limit
+    sub         r2, r0, #4                 ; src ptr down by 4 columns
+    add         r1, r1, r1
+    ldr         r3, [sp, #4]               ; load thresh
+    add         r12, r2, r1, asr #1
+
+    vld1.u8     {d6}, [r2], r1
+    vld1.u8     {d8}, [r12], r1
+    vld1.u8     {d10}, [r2], r1
+    vld1.u8     {d12}, [r12], r1
+    vld1.u8     {d14}, [r2], r1
+    vld1.u8     {d16}, [r12], r1
+    vld1.u8     {d18}, [r2], r1
+    vld1.u8     {d20}, [r12], r1
+
+    vld1.u8     {d7}, [r2], r1              ; load second 8-line src data
+    vld1.u8     {d9}, [r12], r1
+    vld1.u8     {d11}, [r2], r1
+    vld1.u8     {d13}, [r12], r1
+    vld1.u8     {d15}, [r2], r1
+    vld1.u8     {d17}, [r12], r1
+    vld1.u8     {d19}, [r2]
+    vld1.u8     {d21}, [r12]
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vdup.u8     q2, r3                     ; duplicate thresh
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    bl          vp8_loop_filter_neon
+
+    vswp        d12, d11
+    vswp        d16, d13
+
+    sub         r0, r0, #2                 ; dst ptr
+
+    vswp        d14, d12
+    vswp        d16, d15
+
+    add         r12, r0, r1, asr #1
+
+    ;store op1, op0, oq0, oq1
+    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
+    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r12], r1
+    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
+    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r12], r1
+    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
+    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r12], r1
+    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
+    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r12], r1
+
+    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r0], r1
+    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r12], r1
+    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r0], r1
+    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r12], r1
+    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r0], r1
+    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r12], r1
+    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r0]
+    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r12]
+
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_vertical_edge_y_neon|
+
+; void vp8_loop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch
+;                                            const signed char *flimit,
+;                                            const signed char *limit,
+;                                            const signed char *thresh,
+;                                            unsigned char *v)
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+; sp+4  unsigned char *v
+|vp8_loop_filter_vertical_edge_uv_neon| PROC
+    push        {lr}
+    vdup.u8     q0, r2                      ; duplicate blimit
+    sub         r12, r0, #4                 ; move u pointer down by 4 columns
+    ldr         r2, [sp, #8]                ; load v ptr
+    vdup.u8     q1, r3                      ; duplicate limit
+    sub         r3, r2, #4                  ; move v pointer down by 4 columns
+
+    vld1.u8     {d6}, [r12], r1             ;load u data
+    vld1.u8     {d7}, [r3], r1              ;load v data
+    vld1.u8     {d8}, [r12], r1
+    vld1.u8     {d9}, [r3], r1
+    vld1.u8     {d10}, [r12], r1
+    vld1.u8     {d11}, [r3], r1
+    vld1.u8     {d12}, [r12], r1
+    vld1.u8     {d13}, [r3], r1
+    vld1.u8     {d14}, [r12], r1
+    vld1.u8     {d15}, [r3], r1
+    vld1.u8     {d16}, [r12], r1
+    vld1.u8     {d17}, [r3], r1
+    vld1.u8     {d18}, [r12], r1
+    vld1.u8     {d19}, [r3], r1
+    vld1.u8     {d20}, [r12]
+    vld1.u8     {d21}, [r3]
+
+    ldr        r12, [sp, #4]               ; load thresh
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vdup.u8     q2, r12                     ; duplicate thresh
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    bl          vp8_loop_filter_neon
+
+    vswp        d12, d11
+    vswp        d16, d13
+    vswp        d14, d12
+    vswp        d16, d15
+
+    sub         r0, r0, #2
+    sub         r2, r2, #2
+
+    ;store op1, op0, oq0, oq1
+    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
+    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r2], r1
+    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r0], r1
+    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r2], r1
+    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
+    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r2], r1
+    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r0], r1
+    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r2], r1
+    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
+    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r2], r1
+    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r0], r1
+    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r2], r1
+    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
+    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r2], r1
+    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r0]
+    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r2]
+
+    pop         {pc}
+    ENDP        ; |vp8_loop_filter_vertical_edge_uv_neon|
+
+; void vp8_loop_filter_neon();
+; This is a helper function for the loopfilters. The invidual functions do the
+; necessary load, transpose (if necessary) and store.
+
+; r0-r3 PRESERVE
+; q0    flimit
+; q1    limit
+; q2    thresh
+; q3    p3
+; q4    p2
+; q5    p1
+; q6    p0
+; q7    q0
+; q8    q1
+; q9    q2
+; q10   q3
+|vp8_loop_filter_neon| PROC
+
+    ; vp8_filter_mask
+    vabd.u8     q11, q3, q4                 ; abs(p3 - p2)
+    vabd.u8     q12, q4, q5                 ; abs(p2 - p1)
+    vabd.u8     q13, q5, q6                 ; abs(p1 - p0)
+    vabd.u8     q14, q8, q7                 ; abs(q1 - q0)
+    vabd.u8     q3, q9, q8                  ; abs(q2 - q1)
+    vabd.u8     q4, q10, q9                 ; abs(q3 - q2)
+
+    vmax.u8     q11, q11, q12
+    vmax.u8     q12, q13, q14
+    vmax.u8     q3, q3, q4
+    vmax.u8     q15, q11, q12
+
+    vabd.u8     q9, q6, q7                  ; abs(p0 - q0)
+
+    ; vp8_hevmask
+    vcgt.u8     q13, q13, q2                ; (abs(p1 - p0) > thresh)*-1
+    vcgt.u8     q14, q14, q2                ; (abs(q1 - q0) > thresh)*-1
+    vmax.u8     q15, q15, q3
+
+    vmov.u8     q10, #0x80                   ; 0x80
+
+    vabd.u8     q2, q5, q8                  ; a = abs(p1 - q1)
+    vqadd.u8    q9, q9, q9                  ; b = abs(p0 - q0) * 2
+
+    vcge.u8     q15, q1, q15
+
+    ; vp8_filter() function
+    ; convert to signed
+    veor        q7, q7, q10                 ; qs0
+    vshr.u8     q2, q2, #1                  ; a = a / 2
+    veor        q6, q6, q10                 ; ps0
+
+    veor        q5, q5, q10                 ; ps1
+    vqadd.u8    q9, q9, q2                  ; a = b + a
+
+    veor        q8, q8, q10                 ; qs1
+
+    vmov.u8     q10, #3                     ; #3
+
+    vsubl.s8    q2, d14, d12                ; ( qs0 - ps0)
+    vsubl.s8    q11, d15, d13
+
+    vcge.u8     q9, q0, q9                  ; (a > flimit * 2 + limit) * -1
+
+    vmovl.u8    q4, d20
+
+    vqsub.s8    q1, q5, q8                  ; vp8_filter = clamp(ps1-qs1)
+    vorr        q14, q13, q14               ; vp8_hevmask
+
+    vmul.i16    q2, q2, q4                  ; 3 * ( qs0 - ps0)
+    vmul.i16    q11, q11, q4
+
+    vand        q1, q1, q14                 ; vp8_filter &= hev
+    vand        q15, q15, q9                ; vp8_filter_mask
+
+    vaddw.s8    q2, q2, d2
+    vaddw.s8    q11, q11, d3
+
+    vmov.u8     q9, #4                      ; #4
+
+    ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+    vqmovn.s16  d2, q2
+    vqmovn.s16  d3, q11
+    vand        q1, q1, q15                 ; vp8_filter &= mask
+
+    vqadd.s8    q2, q1, q10                 ; Filter2 = clamp(vp8_filter+3)
+    vqadd.s8    q1, q1, q9                  ; Filter1 = clamp(vp8_filter+4)
+    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
+    vshr.s8     q1, q1, #3                  ; Filter1 >>= 3
+
+
+    vqadd.s8    q11, q6, q2                 ; u = clamp(ps0 + Filter2)
+    vqsub.s8    q10, q7, q1                 ; u = clamp(qs0 - Filter1)
+
+    ; outer tap adjustments: ++vp8_filter >> 1
+    vrshr.s8    q1, q1, #1
+    vbic        q1, q1, q14                 ; vp8_filter &= ~hev
+    vmov.u8     q0, #0x80                   ; 0x80
+    vqadd.s8    q13, q5, q1                 ; u = clamp(ps1 + vp8_filter)
+    vqsub.s8    q12, q8, q1                 ; u = clamp(qs1 - vp8_filter)
+
+    veor        q6, q11, q0                 ; *op0 = u^0x80
+    veor        q7, q10, q0                 ; *oq0 = u^0x80
+    veor        q5, q13, q0                 ; *op1 = u^0x80
+    veor        q8, q12, q0                 ; *oq1 = u^0x80
+
+    bx          lr
+    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
+
+;-----------------
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-#include "vpx_ports/arm.h"
-
-static INLINE void vp8_loop_filter_neon(
-        uint8x16_t qblimit,  // flimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p3
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r) {   // q1
-    uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q2s16, q11s16;
-    uint16x8_t q4u16;
-    int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
-    int8x8_t d2s8, d3s8;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q3    = vabdq_u8(q9, q8);
-    q4    = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q3    = vmaxq_u8(q3, q4);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q9 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q3);
-
-    q2u8 = vabdq_u8(q5, q8);
-    q9 = vqaddq_u8(q9, q9);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q10 = vdupq_n_u8(0x80);
-    q8 = veorq_u8(q8, q10);
-    q7 = veorq_u8(q7, q10);
-    q6 = veorq_u8(q6, q10);
-    q5 = veorq_u8(q5, q10);
-
-    q2u8 = vshrq_n_u8(q2u8, 1);
-    q9 = vqaddq_u8(q9, q2u8);
-
-    q10 = vdupq_n_u8(3);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q9 = vcgeq_u8(qblimit, q9);
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                    vreinterpretq_s8_u8(q8));
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-
-    q4u16 = vmovl_u8(vget_low_u8(q10));
-    q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
-    q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
-
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
-    q15u8 = vandq_u8(q15u8, q9);
-
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
-
-    q9 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2s8 = vqmovn_s16(q2s16);
-    d3s8 = vqmovn_s16(q11s16);
-    q1s8 = vcombine_s8(d2s8, d3s8);
-    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
-    q1s8 = vreinterpretq_s8_u8(q1u8);
-
-    q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
-    q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q1s8 = vshrq_n_s8(q1s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
-
-    q1s8 = vrshrq_n_s8(q1s8, 1);
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
-    q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
-
-    q0u8 = vdupq_n_u8(0x80);
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-    src -= (pitch << 2);
-
-    q3 = vld1q_u8(src);
-    src += pitch;
-    q4 = vld1q_u8(src);
-    src += pitch;
-    q5 = vld1q_u8(src);
-    src += pitch;
-    q6 = vld1q_u8(src);
-    src += pitch;
-    q7 = vld1q_u8(src);
-    src += pitch;
-    q8 = vld1q_u8(src);
-    src += pitch;
-    q9 = vld1q_u8(src);
-    src += pitch;
-    q10 = vld1q_u8(src);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    src -= (pitch * 5);
-    vst1q_u8(src, q5);
-    src += pitch;
-    vst1q_u8(src, q6);
-    src += pitch;
-    vst1q_u8(src, q7);
-    src += pitch;
-    vst1q_u8(src, q8);
-    return;
-}
-
-void vp8_loop_filter_horizontal_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    u -= (pitch << 2);
-    v -= (pitch << 2);
-
-    d6  = vld1_u8(u);
-    u += pitch;
-    d7  = vld1_u8(v);
-    v += pitch;
-    d8  = vld1_u8(u);
-    u += pitch;
-    d9  = vld1_u8(v);
-    v += pitch;
-    d10 = vld1_u8(u);
-    u += pitch;
-    d11 = vld1_u8(v);
-    v += pitch;
-    d12 = vld1_u8(u);
-    u += pitch;
-    d13 = vld1_u8(v);
-    v += pitch;
-    d14 = vld1_u8(u);
-    u += pitch;
-    d15 = vld1_u8(v);
-    v += pitch;
-    d16 = vld1_u8(u);
-    u += pitch;
-    d17 = vld1_u8(v);
-    v += pitch;
-    d18 = vld1_u8(u);
-    u += pitch;
-    d19 = vld1_u8(v);
-    v += pitch;
-    d20 = vld1_u8(u);
-    d21 = vld1_u8(v);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    u -= (pitch * 5);
-    vst1_u8(u, vget_low_u8(q5));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q6));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q7));
-    u += pitch;
-    vst1_u8(u, vget_low_u8(q8));
-
-    v -= (pitch * 5);
-    vst1_u8(v, vget_high_u8(q5));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q6));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q7));
-    v += pitch;
-    vst1_u8(v, vget_high_u8(q8));
-    return;
-}
-
-static INLINE void write_4x8(unsigned char *dst, int pitch,
-                             const uint8x8x4_t result) {
-#ifdef VPX_INCOMPATIBLE_GCC
-    /*
-     * uint8x8x4_t result
-    00 01 02 03 | 04 05 06 07
-    10 11 12 13 | 14 15 16 17
-    20 21 22 23 | 24 25 26 27
-    30 31 32 33 | 34 35 36 37
-    ---
-    * after vtrn_u16
-    00 01 20 21 | 04 05 24 25
-    02 03 22 23 | 06 07 26 27
-    10 11 30 31 | 14 15 34 35
-    12 13 32 33 | 16 17 36 37
-    ---
-    * after vtrn_u8
-    00 10 20 30 | 04 14 24 34
-    01 11 21 31 | 05 15 25 35
-    02 12 22 32 | 06 16 26 36
-    03 13 23 33 | 07 17 27 37
-    */
-    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]),
-                                          vreinterpret_u16_u8(result.val[2]));
-    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]),
-                                          vreinterpret_u16_u8(result.val[3]));
-    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
-                                       vreinterpret_u8_u16(r13_u16.val[0]));
-    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
-                                       vreinterpret_u8_u16(r13_u16.val[1]));
-    const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]);
-    const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]);
-    const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]);
-    const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]);
-    vst1_lane_u32((uint32_t *)dst, x_0_4, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_1_5, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_2_6, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_3_7, 0);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_0_4, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_1_5, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_2_6, 1);
-    dst += pitch;
-    vst1_lane_u32((uint32_t *)dst, x_3_7, 1);
-#else
-    vst4_lane_u8(dst, result, 0);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 1);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 2);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 3);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 4);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 5);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 6);
-    dst += pitch;
-    vst4_lane_u8(dst, result, 7);
-#endif  // VPX_INCOMPATIBLE_GCC
-}
-
-void vp8_loop_filter_vertical_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    unsigned char *s, *d;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    s = src - 4;
-    d6  = vld1_u8(s);
-    s += pitch;
-    d8  = vld1_u8(s);
-    s += pitch;
-    d10 = vld1_u8(s);
-    s += pitch;
-    d12 = vld1_u8(s);
-    s += pitch;
-    d14 = vld1_u8(s);
-    s += pitch;
-    d16 = vld1_u8(s);
-    s += pitch;
-    d18 = vld1_u8(s);
-    s += pitch;
-    d20 = vld1_u8(s);
-    s += pitch;
-    d7  = vld1_u8(s);
-    s += pitch;
-    d9  = vld1_u8(s);
-    s += pitch;
-    d11 = vld1_u8(s);
-    s += pitch;
-    d13 = vld1_u8(s);
-    s += pitch;
-    d15 = vld1_u8(s);
-    s += pitch;
-    d17 = vld1_u8(s);
-    s += pitch;
-    d19 = vld1_u8(s);
-    s += pitch;
-    d21 = vld1_u8(s);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-
-    d = src - 2;
-    write_4x8(d, pitch, q4ResultL);
-    d += pitch * 8;
-    write_4x8(d, pitch, q4ResultH);
-}
-
-void vp8_loop_filter_vertical_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    unsigned char *us, *ud;
-    unsigned char *vs, *vd;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-    uint8x8x4_t q4ResultH, q4ResultL;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit  = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    us = u - 4;
-    d6 = vld1_u8(us);
-    us += pitch;
-    d8 = vld1_u8(us);
-    us += pitch;
-    d10 = vld1_u8(us);
-    us += pitch;
-    d12 = vld1_u8(us);
-    us += pitch;
-    d14 = vld1_u8(us);
-    us += pitch;
-    d16 = vld1_u8(us);
-    us += pitch;
-    d18 = vld1_u8(us);
-    us += pitch;
-    d20 = vld1_u8(us);
-
-    vs = v - 4;
-    d7 = vld1_u8(vs);
-    vs += pitch;
-    d9 = vld1_u8(vs);
-    vs += pitch;
-    d11 = vld1_u8(vs);
-    vs += pitch;
-    d13 = vld1_u8(vs);
-    vs += pitch;
-    d15 = vld1_u8(vs);
-    vs += pitch;
-    d17 = vld1_u8(vs);
-    vs += pitch;
-    d19 = vld1_u8(vs);
-    vs += pitch;
-    d21 = vld1_u8(vs);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q5, &q6, &q7, &q8);
-
-    q4ResultL.val[0] = vget_low_u8(q5);   // d10
-    q4ResultL.val[1] = vget_low_u8(q6);   // d12
-    q4ResultL.val[2] = vget_low_u8(q7);   // d14
-    q4ResultL.val[3] = vget_low_u8(q8);   // d16
-    ud = u - 2;
-    write_4x8(ud, pitch, q4ResultL);
-
-    q4ResultH.val[0] = vget_high_u8(q5);  // d11
-    q4ResultH.val[1] = vget_high_u8(q6);  // d13
-    q4ResultH.val[2] = vget_high_u8(q7);  // d15
-    q4ResultH.val[3] = vget_high_u8(q8);  // d17
-    vd = v - 2;
-    write_4x8(vd, pitch, q4ResultH);
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
@@ -0,0 +1,117 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    ;EXPORT  |vp8_loop_filter_simple_horizontal_edge_neon|
+    EXPORT  |vp8_loop_filter_bhs_neon|
+    EXPORT  |vp8_loop_filter_mbhs_neon|
+    ARM
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0    unsigned char *s, PRESERVE
+; r1    int p, PRESERVE
+; q1    limit, PRESERVE
+
+|vp8_loop_filter_simple_horizontal_edge_neon| PROC
+
+    sub         r3, r0, r1, lsl #1          ; move src pointer down by 2 lines
+
+    vld1.u8     {q7}, [r0@128], r1          ; q0
+    vld1.u8     {q5}, [r3@128], r1          ; p0
+    vld1.u8     {q8}, [r0@128]              ; q1
+    vld1.u8     {q6}, [r3@128]              ; p1
+
+    vabd.u8     q15, q6, q7                 ; abs(p0 - q0)
+    vabd.u8     q14, q5, q8                 ; abs(p1 - q1)
+
+    vqadd.u8    q15, q15, q15               ; abs(p0 - q0) * 2
+    vshr.u8     q14, q14, #1                ; abs(p1 - q1) / 2
+    vmov.u8     q0, #0x80                   ; 0x80
+    vmov.s16    q13, #3
+    vqadd.u8    q15, q15, q14               ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
+
+    veor        q7, q7, q0                  ; qs0: q0 offset to convert to a signed value
+    veor        q6, q6, q0                  ; ps0: p0 offset to convert to a signed value
+    veor        q5, q5, q0                  ; ps1: p1 offset to convert to a signed value
+    veor        q8, q8, q0                  ; qs1: q1 offset to convert to a signed value
+
+    vcge.u8     q15, q1, q15                ; (abs(p0 - q0)*2 + abs(p1-q1)/2 > limit)*-1
+
+    vsubl.s8    q2, d14, d12                ; ( qs0 - ps0)
+    vsubl.s8    q3, d15, d13
+
+    vqsub.s8    q4, q5, q8                  ; q4: vp8_filter = vp8_signed_char_clamp(ps1-qs1)
+
+    vmul.s16    q2, q2, q13                 ;  3 * ( qs0 - ps0)
+    vmul.s16    q3, q3, q13
+
+    vmov.u8     q10, #0x03                  ; 0x03
+    vmov.u8     q9, #0x04                   ; 0x04
+
+    vaddw.s8    q2, q2, d8                  ; vp8_filter + 3 * ( qs0 - ps0)
+    vaddw.s8    q3, q3, d9
+
+    vqmovn.s16  d8, q2                      ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+    vqmovn.s16  d9, q3
+
+    vand        q14, q4, q15                ; vp8_filter &= mask
+
+    vqadd.s8    q2, q14, q10                ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
+    vqadd.s8    q3, q14, q9                 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4)
+    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
+    vshr.s8     q4, q3, #3                  ; Filter1 >>= 3
+
+    sub         r0, r0, r1
+
+    ;calculate output
+    vqadd.s8    q11, q6, q2                 ; u = vp8_signed_char_clamp(ps0 + Filter2)
+    vqsub.s8    q10, q7, q4                 ; u = vp8_signed_char_clamp(qs0 - Filter1)
+
+    veor        q6, q11, q0                 ; *op0 = u^0x80
+    veor        q7, q10, q0                 ; *oq0 = u^0x80
+
+    vst1.u8     {q6}, [r3@128]              ; store op0
+    vst1.u8     {q7}, [r0@128]              ; store oq0
+
+    bx          lr
+    ENDP        ; |vp8_loop_filter_simple_horizontal_edge_neon|
+
+; r0    unsigned char *y
+; r1    int ystride
+; r2    const unsigned char *blimit
+
+|vp8_loop_filter_bhs_neon| PROC
+    push        {r4, lr}
+    ldrb        r3, [r2]                    ; load blim from mem
+    vdup.s8     q1, r3                      ; duplicate blim
+
+    add         r0, r0, r1, lsl #2          ; src = y_ptr + 4 * y_stride
+    bl          vp8_loop_filter_simple_horizontal_edge_neon
+    ; vp8_loop_filter_simple_horizontal_edge_neon preserves r0, r1 and q1
+    add         r0, r0, r1, lsl #2          ; src = y_ptr + 8* y_stride
+    bl          vp8_loop_filter_simple_horizontal_edge_neon
+    add         r0, r0, r1, lsl #2          ; src = y_ptr + 12 * y_stride
+    pop         {r4, lr}
+    b           vp8_loop_filter_simple_horizontal_edge_neon
+    ENDP        ;|vp8_loop_filter_bhs_neon|
+
+; r0    unsigned char *y
+; r1    int ystride
+; r2    const unsigned char *blimit
+
+|vp8_loop_filter_mbhs_neon| PROC
+    ldrb        r3, [r2]                   ; load blim from mem
+    vdup.s8     q1, r3                     ; duplicate mblim
+    b           vp8_loop_filter_simple_horizontal_edge_neon
+    ENDP        ;|vp8_loop_filter_bhs_neon|
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_loop_filter_simple_horizontal_edge_neon(
-        unsigned char *s,
-        int p,
-        const unsigned char *blimit) {
-    uint8_t *sp;
-    uint8x16_t qblimit, q0u8;
-    uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8;
-    int16x8_t q2s16, q3s16, q13s16;
-    int8x8_t d8s8, d9s8;
-    int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8;
-
-    qblimit = vdupq_n_u8(*blimit);
-
-    sp = s - (p << 1);
-    q5u8 = vld1q_u8(sp);
-    sp += p;
-    q6u8 = vld1q_u8(sp);
-    sp += p;
-    q7u8 = vld1q_u8(sp);
-    sp += p;
-    q8u8 = vld1q_u8(sp);
-
-    q15u8 = vabdq_u8(q6u8, q7u8);
-    q14u8 = vabdq_u8(q5u8, q8u8);
-
-    q15u8 = vqaddq_u8(q15u8, q15u8);
-    q14u8 = vshrq_n_u8(q14u8, 1);
-    q0u8 = vdupq_n_u8(0x80);
-    q13s16 = vdupq_n_s16(3);
-    q15u8 = vqaddq_u8(q15u8, q14u8);
-
-    q5u8 = veorq_u8(q5u8, q0u8);
-    q6u8 = veorq_u8(q6u8, q0u8);
-    q7u8 = veorq_u8(q7u8, q0u8);
-    q8u8 = veorq_u8(q8u8, q0u8);
-
-    q15u8 = vcgeq_u8(qblimit, q15u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6u8)));
-    q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)),
-                     vget_high_s8(vreinterpretq_s8_u8(q6u8)));
-
-    q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8),
-                     vreinterpretq_s8_u8(q8u8));
-
-    q2s16 = vmulq_s16(q2s16, q13s16);
-    q3s16 = vmulq_s16(q3s16, q13s16);
-
-    q10u8 = vdupq_n_u8(3);
-    q9u8 = vdupq_n_u8(4);
-
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8));
-    q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8));
-
-    d8s8 = vqmovn_s16(q2s16);
-    d9s8 = vqmovn_s16(q3s16);
-    q4s8 = vcombine_s8(d8s8, d9s8);
-
-    q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8));
-
-    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8));
-    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q3s8 = vshrq_n_s8(q3s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8);
-
-    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-
-    vst1q_u8(s, q7u8);
-    s -= p;
-    vst1q_u8(s, q6u8);
-    return;
-}
-
-void vp8_loop_filter_bhs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += y_stride * 4;
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
-
-void vp8_loop_filter_mbhs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
@@ -0,0 +1,154 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    ;EXPORT  |vp8_loop_filter_simple_vertical_edge_neon|
+    EXPORT |vp8_loop_filter_bvs_neon|
+    EXPORT |vp8_loop_filter_mbvs_neon|
+    ARM
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0    unsigned char *s, PRESERVE
+; r1    int p, PRESERVE
+; q1    limit, PRESERVE
+
+|vp8_loop_filter_simple_vertical_edge_neon| PROC
+    sub         r0, r0, #2                  ; move src pointer down by 2 columns
+    add         r12, r1, r1
+    add         r3, r0, r1
+
+    vld4.8      {d6[0], d7[0], d8[0], d9[0]}, [r0], r12
+    vld4.8      {d6[1], d7[1], d8[1], d9[1]}, [r3], r12
+    vld4.8      {d6[2], d7[2], d8[2], d9[2]}, [r0], r12
+    vld4.8      {d6[3], d7[3], d8[3], d9[3]}, [r3], r12
+    vld4.8      {d6[4], d7[4], d8[4], d9[4]}, [r0], r12
+    vld4.8      {d6[5], d7[5], d8[5], d9[5]}, [r3], r12
+    vld4.8      {d6[6], d7[6], d8[6], d9[6]}, [r0], r12
+    vld4.8      {d6[7], d7[7], d8[7], d9[7]}, [r3], r12
+
+    vld4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r12
+    vld4.8      {d10[1], d11[1], d12[1], d13[1]}, [r3], r12
+    vld4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r12
+    vld4.8      {d10[3], d11[3], d12[3], d13[3]}, [r3], r12
+    vld4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r12
+    vld4.8      {d10[5], d11[5], d12[5], d13[5]}, [r3], r12
+    vld4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r12
+    vld4.8      {d10[7], d11[7], d12[7], d13[7]}, [r3]
+
+    vswp        d7, d10
+    vswp        d12, d9
+
+    ;vp8_filter_mask() function
+    ;vp8_hevmask() function
+    sub         r0, r0, r1, lsl #4
+    vabd.u8     q15, q5, q4                 ; abs(p0 - q0)
+    vabd.u8     q14, q3, q6                 ; abs(p1 - q1)
+
+    vqadd.u8    q15, q15, q15               ; abs(p0 - q0) * 2
+    vshr.u8     q14, q14, #1                ; abs(p1 - q1) / 2
+    vmov.u8     q0, #0x80                   ; 0x80
+    vmov.s16    q11, #3
+    vqadd.u8    q15, q15, q14               ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
+
+    veor        q4, q4, q0                  ; qs0: q0 offset to convert to a signed value
+    veor        q5, q5, q0                  ; ps0: p0 offset to convert to a signed value
+    veor        q3, q3, q0                  ; ps1: p1 offset to convert to a signed value
+    veor        q6, q6, q0                  ; qs1: q1 offset to convert to a signed value
+
+    vcge.u8     q15, q1, q15                ; abs(p0 - q0)*2 + abs(p1-q1)/2 > flimit*2 + limit)*-1
+
+    vsubl.s8    q2, d8, d10                 ; ( qs0 - ps0)
+    vsubl.s8    q13, d9, d11
+
+    vqsub.s8    q14, q3, q6                  ; vp8_filter = vp8_signed_char_clamp(ps1-qs1)
+
+    vmul.s16    q2, q2, q11                 ;  3 * ( qs0 - ps0)
+    vmul.s16    q13, q13, q11
+
+    vmov.u8     q11, #0x03                  ; 0x03
+    vmov.u8     q12, #0x04                  ; 0x04
+
+    vaddw.s8    q2, q2, d28                  ; vp8_filter + 3 * ( qs0 - ps0)
+    vaddw.s8    q13, q13, d29
+
+    vqmovn.s16  d28, q2                      ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+    vqmovn.s16  d29, q13
+
+    add         r0, r0, #1
+    add         r3, r0, r1
+
+    vand        q14, q14, q15                 ; vp8_filter &= mask
+
+    vqadd.s8    q2, q14, q11                 ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
+    vqadd.s8    q3, q14, q12                 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4)
+    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
+    vshr.s8     q14, q3, #3                  ; Filter1 >>= 3
+
+    ;calculate output
+    vqadd.s8    q11, q5, q2                 ; u = vp8_signed_char_clamp(ps0 + Filter2)
+    vqsub.s8    q10, q4, q14                 ; u = vp8_signed_char_clamp(qs0 - Filter1)
+
+    veor        q6, q11, q0                 ; *op0 = u^0x80
+    veor        q7, q10, q0                 ; *oq0 = u^0x80
+    add         r12, r1, r1
+    vswp        d13, d14
+
+    ;store op1, op0, oq0, oq1
+    vst2.8      {d12[0], d13[0]}, [r0], r12
+    vst2.8      {d12[1], d13[1]}, [r3], r12
+    vst2.8      {d12[2], d13[2]}, [r0], r12
+    vst2.8      {d12[3], d13[3]}, [r3], r12
+    vst2.8      {d12[4], d13[4]}, [r0], r12
+    vst2.8      {d12[5], d13[5]}, [r3], r12
+    vst2.8      {d12[6], d13[6]}, [r0], r12
+    vst2.8      {d12[7], d13[7]}, [r3], r12
+    vst2.8      {d14[0], d15[0]}, [r0], r12
+    vst2.8      {d14[1], d15[1]}, [r3], r12
+    vst2.8      {d14[2], d15[2]}, [r0], r12
+    vst2.8      {d14[3], d15[3]}, [r3], r12
+    vst2.8      {d14[4], d15[4]}, [r0], r12
+    vst2.8      {d14[5], d15[5]}, [r3], r12
+    vst2.8      {d14[6], d15[6]}, [r0], r12
+    vst2.8      {d14[7], d15[7]}, [r3]
+
+    bx          lr
+    ENDP        ; |vp8_loop_filter_simple_vertical_edge_neon|
+
+; r0    unsigned char *y
+; r1    int ystride
+; r2    const unsigned char *blimit
+
+|vp8_loop_filter_bvs_neon| PROC
+    push        {r4, lr}
+    ldrb        r3, [r2]                   ; load blim from mem
+    mov         r4, r0
+    add         r0, r0, #4
+    vdup.s8     q1, r3                     ; duplicate blim
+    bl          vp8_loop_filter_simple_vertical_edge_neon
+    ; vp8_loop_filter_simple_vertical_edge_neon preserves  r1 and q1
+    add         r0, r4, #8
+    bl          vp8_loop_filter_simple_vertical_edge_neon
+    add         r0, r4, #12
+    pop         {r4, lr}
+    b           vp8_loop_filter_simple_vertical_edge_neon
+    ENDP        ;|vp8_loop_filter_bvs_neon|
+
+; r0    unsigned char *y
+; r1    int ystride
+; r2    const unsigned char *blimit
+
+|vp8_loop_filter_mbvs_neon| PROC
+    ldrb        r3, [r2]                   ; load mblim from mem
+    vdup.s8     q1, r3                     ; duplicate mblim
+    b           vp8_loop_filter_simple_vertical_edge_neon
+    ENDP        ;|vp8_loop_filter_bvs_neon|
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-#include "vpx_ports/arm.h"
-
-#ifdef VPX_INCOMPATIBLE_GCC
-static INLINE void write_2x4(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result) {
-    /*
-     * uint8x8x2_t result
-    00 01 02 03 | 04 05 06 07
-    10 11 12 13 | 14 15 16 17
-    ---
-    * after vtrn_u8
-    00 10 02 12 | 04 14 06 16
-    01 11 03 13 | 05 15 07 17
-    */
-    const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
-                                       result.val[1]);
-    const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
-    const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
-    dst += pitch;
-    vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
-}
-
-static INLINE void write_2x8(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result,
-                             const uint8x8x2_t result2) {
-  write_2x4(dst, pitch, result);
-  dst += pitch * 8;
-  write_2x4(dst, pitch, result2);
-}
-#else
-static INLINE void write_2x8(unsigned char *dst, int pitch,
-                             const uint8x8x2_t result,
-                             const uint8x8x2_t result2) {
-  vst2_lane_u8(dst, result, 0);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 1);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 2);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 3);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 4);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 5);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 6);
-  dst += pitch;
-  vst2_lane_u8(dst, result, 7);
-  dst += pitch;
-
-  vst2_lane_u8(dst, result2, 0);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 1);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 2);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 3);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 4);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 5);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 6);
-  dst += pitch;
-  vst2_lane_u8(dst, result2, 7);
-}
-#endif  // VPX_INCOMPATIBLE_GCC
-
-
-#ifdef VPX_INCOMPATIBLE_GCC
-static INLINE
-uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
-    const uint8x8_t a = vld1_u8(src);
-    const uint8x8_t b = vld1_u8(src + pitch * 1);
-    const uint8x8_t c = vld1_u8(src + pitch * 2);
-    const uint8x8_t d = vld1_u8(src + pitch * 3);
-    const uint8x8_t e = vld1_u8(src + pitch * 4);
-    const uint8x8_t f = vld1_u8(src + pitch * 5);
-    const uint8x8_t g = vld1_u8(src + pitch * 6);
-    const uint8x8_t h = vld1_u8(src + pitch * 7);
-    const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
-                                          vreinterpret_u32_u8(e));
-    const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
-                                          vreinterpret_u32_u8(f));
-    const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
-                                          vreinterpret_u32_u8(g));
-    const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
-                                          vreinterpret_u32_u8(h));
-    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
-                                          vreinterpret_u16_u32(r26_u32.val[0]));
-    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
-                                          vreinterpret_u16_u32(r37_u32.val[0]));
-    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
-                                       vreinterpret_u8_u16(r13_u16.val[0]));
-    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
-                                       vreinterpret_u8_u16(r13_u16.val[1]));
-    /*
-     * after vtrn_u32
-    00 01 02 03 | 40 41 42 43
-    10 11 12 13 | 50 51 52 53
-    20 21 22 23 | 60 61 62 63
-    30 31 32 33 | 70 71 72 73
-    ---
-    * after vtrn_u16
-    00 01 20 21 | 40 41 60 61
-    02 03 22 23 | 42 43 62 63
-    10 11 30 31 | 50 51 70 71
-    12 13 32 33 | 52 52 72 73
-
-    00 01 20 21 | 40 41 60 61
-    10 11 30 31 | 50 51 70 71
-    02 03 22 23 | 42 43 62 63
-    12 13 32 33 | 52 52 72 73
-    ---
-    * after vtrn_u8
-    00 10 20 30 | 40 50 60 70
-    01 11 21 31 | 41 51 61 71
-    02 12 22 32 | 42 52 62 72
-    03 13 23 33 | 43 53 63 73
-    */
-    x.val[0] = r01_u8.val[0];
-    x.val[1] = r01_u8.val[1];
-    x.val[2] = r23_u8.val[0];
-    x.val[3] = r23_u8.val[1];
-
-    return x;
-}
-#else
-static INLINE
-uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
-    x = vld4_lane_u8(src, x, 0);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 1);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 2);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 3);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 4);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 5);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 6);
-    src += pitch;
-    x = vld4_lane_u8(src, x, 7);
-    return x;
-}
-#endif  // VPX_INCOMPATIBLE_GCC
-
-static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
-        unsigned char *s,
-        int p,
-        const unsigned char *blimit) {
-    unsigned char *src1;
-    uint8x16_t qblimit, q0u8;
-    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
-    int16x8_t q2s16, q13s16, q11s16;
-    int8x8_t d28s8, d29s8;
-    int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
-    uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
-    uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
-    uint8x8x2_t d2u8x2;  // d12, d13
-    uint8x8x2_t d3u8x2;  // d14, d15
-
-    qblimit = vdupq_n_u8(*blimit);
-
-    src1 = s - 2;
-    d0u8x4 = read_4x8(src1, p, d0u8x4);
-    src1 += p * 8;
-    d1u8x4 = read_4x8(src1, p, d1u8x4);
-
-    q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
-    q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
-    q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
-    q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13
-
-    q15u8 = vabdq_u8(q5u8, q4u8);
-    q14u8 = vabdq_u8(q3u8, q6u8);
-
-    q15u8 = vqaddq_u8(q15u8, q15u8);
-    q14u8 = vshrq_n_u8(q14u8, 1);
-    q0u8 = vdupq_n_u8(0x80);
-    q11s16 = vdupq_n_s16(3);
-    q15u8 = vqaddq_u8(q15u8, q14u8);
-
-    q3u8 = veorq_u8(q3u8, q0u8);
-    q4u8 = veorq_u8(q4u8, q0u8);
-    q5u8 = veorq_u8(q5u8, q0u8);
-    q6u8 = veorq_u8(q6u8, q0u8);
-
-    q15u8 = vcgeq_u8(qblimit, q15u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
-                     vget_low_s8(vreinterpretq_s8_u8(q5u8)));
-    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
-                      vget_high_s8(vreinterpretq_s8_u8(q5u8)));
-
-    q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
-                      vreinterpretq_s8_u8(q6u8));
-
-    q2s16 = vmulq_s16(q2s16, q11s16);
-    q13s16 = vmulq_s16(q13s16, q11s16);
-
-    q11u8 = vdupq_n_u8(3);
-    q12u8 = vdupq_n_u8(4);
-
-    q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
-    q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
-
-    d28s8 = vqmovn_s16(q2s16);
-    d29s8 = vqmovn_s16(q13s16);
-    q14s8 = vcombine_s8(d28s8, d29s8);
-
-    q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
-
-    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
-    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q14s8 = vshrq_n_s8(q3s8, 3);
-
-    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
-    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
-
-    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
-    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
-
-    d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
-    d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
-    d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
-    d3u8x2.val[1] = vget_high_u8(q7u8);  // d15
-
-    src1 = s - 1;
-    write_2x8(src1, p, d2u8x2, d3u8x2);
-}
-
-void vp8_loop_filter_bvs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    y_ptr += 4;
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
-
-void vp8_loop_filter_mbvs_neon(
-        unsigned char *y_ptr,
-        int y_stride,
-        const unsigned char *blimit) {
-    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
-    return;
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.asm
@@ -0,0 +1,469 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_mbloop_filter_horizontal_edge_y_neon|
+    EXPORT  |vp8_mbloop_filter_horizontal_edge_uv_neon|
+    EXPORT  |vp8_mbloop_filter_vertical_edge_y_neon|
+    EXPORT  |vp8_mbloop_filter_vertical_edge_uv_neon|
+    ARM
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; void vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch,
+;                                               const unsigned char *blimit,
+;                                               const unsigned char *limit,
+;                                               const unsigned char *thresh)
+; r0    unsigned char *src,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+|vp8_mbloop_filter_horizontal_edge_y_neon| PROC
+    push        {lr}
+    add         r1, r1, r1                  ; double stride
+    ldr         r12, [sp, #4]               ; load thresh
+    sub         r0, r0, r1, lsl #1          ; move src pointer down by 4 lines
+    vdup.u8     q2, r12                     ; thresh
+    add         r12, r0, r1,  lsr #1        ; move src pointer up by 1 line
+
+    vld1.u8     {q3}, [r0@128], r1              ; p3
+    vld1.u8     {q4}, [r12@128], r1             ; p2
+    vld1.u8     {q5}, [r0@128], r1              ; p1
+    vld1.u8     {q6}, [r12@128], r1             ; p0
+    vld1.u8     {q7}, [r0@128], r1              ; q0
+    vld1.u8     {q8}, [r12@128], r1             ; q1
+    vld1.u8     {q9}, [r0@128], r1              ; q2
+    vld1.u8     {q10}, [r12@128], r1            ; q3
+
+    bl          vp8_mbloop_filter_neon
+
+    sub         r12, r12, r1, lsl #2
+    add         r0, r12, r1, lsr #1
+
+    vst1.u8     {q4}, [r12@128],r1         ; store op2
+    vst1.u8     {q5}, [r0@128],r1          ; store op1
+    vst1.u8     {q6}, [r12@128], r1        ; store op0
+    vst1.u8     {q7}, [r0@128],r1          ; store oq0
+    vst1.u8     {q8}, [r12@128]            ; store oq1
+    vst1.u8     {q9}, [r0@128]             ; store oq2
+
+    pop         {pc}
+    ENDP        ; |vp8_mbloop_filter_horizontal_edge_y_neon|
+
+; void vp8_mbloop_filter_horizontal_edge_uv_neon(unsigned char *u, int pitch,
+;                                                const unsigned char *blimit,
+;                                                const unsigned char *limit,
+;                                                const unsigned char *thresh,
+;                                                unsigned char *v)
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+; sp+4  unsigned char *v
+
+|vp8_mbloop_filter_horizontal_edge_uv_neon| PROC
+    push        {lr}
+    ldr         r12, [sp, #4]                 ; load thresh
+    sub         r0, r0, r1, lsl #2            ; move u pointer down by 4 lines
+    vdup.u8     q2, r12                       ; thresh
+    ldr         r12, [sp, #8]                 ; load v ptr
+    sub         r12, r12, r1, lsl #2          ; move v pointer down by 4 lines
+
+    vld1.u8     {d6}, [r0@64], r1              ; p3
+    vld1.u8     {d7}, [r12@64], r1              ; p3
+    vld1.u8     {d8}, [r0@64], r1              ; p2
+    vld1.u8     {d9}, [r12@64], r1              ; p2
+    vld1.u8     {d10}, [r0@64], r1             ; p1
+    vld1.u8     {d11}, [r12@64], r1             ; p1
+    vld1.u8     {d12}, [r0@64], r1             ; p0
+    vld1.u8     {d13}, [r12@64], r1             ; p0
+    vld1.u8     {d14}, [r0@64], r1             ; q0
+    vld1.u8     {d15}, [r12@64], r1             ; q0
+    vld1.u8     {d16}, [r0@64], r1             ; q1
+    vld1.u8     {d17}, [r12@64], r1             ; q1
+    vld1.u8     {d18}, [r0@64], r1             ; q2
+    vld1.u8     {d19}, [r12@64], r1             ; q2
+    vld1.u8     {d20}, [r0@64], r1             ; q3
+    vld1.u8     {d21}, [r12@64], r1             ; q3
+
+    bl          vp8_mbloop_filter_neon
+
+    sub         r0, r0, r1, lsl #3
+    sub         r12, r12, r1, lsl #3
+
+    add         r0, r0, r1
+    add         r12, r12, r1
+
+    vst1.u8     {d8}, [r0@64], r1              ; store u op2
+    vst1.u8     {d9}, [r12@64], r1              ; store v op2
+    vst1.u8     {d10}, [r0@64], r1             ; store u op1
+    vst1.u8     {d11}, [r12@64], r1             ; store v op1
+    vst1.u8     {d12}, [r0@64], r1             ; store u op0
+    vst1.u8     {d13}, [r12@64], r1             ; store v op0
+    vst1.u8     {d14}, [r0@64], r1             ; store u oq0
+    vst1.u8     {d15}, [r12@64], r1             ; store v oq0
+    vst1.u8     {d16}, [r0@64], r1             ; store u oq1
+    vst1.u8     {d17}, [r12@64], r1             ; store v oq1
+    vst1.u8     {d18}, [r0@64], r1             ; store u oq2
+    vst1.u8     {d19}, [r12@64], r1             ; store v oq2
+
+    pop         {pc}
+    ENDP        ; |vp8_mbloop_filter_horizontal_edge_uv_neon|
+
+; void vp8_mbloop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
+;                                             const unsigned char *blimit,
+;                                             const unsigned char *limit,
+;                                             const unsigned char *thresh)
+; r0    unsigned char *src,
+; r1    int pitch,
+; r2    unsigned char blimit
+; r3    unsigned char limit
+; sp    unsigned char thresh,
+|vp8_mbloop_filter_vertical_edge_y_neon| PROC
+    push        {lr}
+    ldr         r12, [sp, #4]               ; load thresh
+    sub         r0, r0, #4                  ; move src pointer down by 4 columns
+    vdup.s8     q2, r12                     ; thresh
+    add         r12, r0, r1, lsl #3         ; move src pointer down by 8 lines
+
+    vld1.u8     {d6}, [r0], r1              ; load first 8-line src data
+    vld1.u8     {d7}, [r12], r1             ; load second 8-line src data
+    vld1.u8     {d8}, [r0], r1
+    vld1.u8     {d9}, [r12], r1
+    vld1.u8     {d10}, [r0], r1
+    vld1.u8     {d11}, [r12], r1
+    vld1.u8     {d12}, [r0], r1
+    vld1.u8     {d13}, [r12], r1
+    vld1.u8     {d14}, [r0], r1
+    vld1.u8     {d15}, [r12], r1
+    vld1.u8     {d16}, [r0], r1
+    vld1.u8     {d17}, [r12], r1
+    vld1.u8     {d18}, [r0], r1
+    vld1.u8     {d19}, [r12], r1
+    vld1.u8     {d20}, [r0], r1
+    vld1.u8     {d21}, [r12], r1
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    sub         r0, r0, r1, lsl #3
+
+    bl          vp8_mbloop_filter_neon
+
+    sub         r12, r12, r1, lsl #3
+
+    ;transpose to 16x8 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    ;store op2, op1, op0, oq0, oq1, oq2
+    vst1.8      {d6}, [r0], r1
+    vst1.8      {d7}, [r12], r1
+    vst1.8      {d8}, [r0], r1
+    vst1.8      {d9}, [r12], r1
+    vst1.8      {d10}, [r0], r1
+    vst1.8      {d11}, [r12], r1
+    vst1.8      {d12}, [r0], r1
+    vst1.8      {d13}, [r12], r1
+    vst1.8      {d14}, [r0], r1
+    vst1.8      {d15}, [r12], r1
+    vst1.8      {d16}, [r0], r1
+    vst1.8      {d17}, [r12], r1
+    vst1.8      {d18}, [r0], r1
+    vst1.8      {d19}, [r12], r1
+    vst1.8      {d20}, [r0]
+    vst1.8      {d21}, [r12]
+
+    pop         {pc}
+    ENDP        ; |vp8_mbloop_filter_vertical_edge_y_neon|
+
+; void vp8_mbloop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch,
+;                                              const unsigned char *blimit,
+;                                              const unsigned char *limit,
+;                                              const unsigned char *thresh,
+;                                              unsigned char *v)
+; r0    unsigned char *u,
+; r1    int pitch,
+; r2    const signed char *flimit,
+; r3    const signed char *limit,
+; sp    const signed char *thresh,
+; sp+4  unsigned char *v
+|vp8_mbloop_filter_vertical_edge_uv_neon| PROC
+    push        {lr}
+    ldr         r12, [sp, #4]               ; load thresh
+    sub         r0, r0, #4                  ; move u pointer down by 4 columns
+    vdup.u8     q2, r12                     ; thresh
+    ldr         r12, [sp, #8]               ; load v ptr
+    sub         r12, r12, #4                ; move v pointer down by 4 columns
+
+    vld1.u8     {d6}, [r0], r1              ;load u data
+    vld1.u8     {d7}, [r12], r1             ;load v data
+    vld1.u8     {d8}, [r0], r1
+    vld1.u8     {d9}, [r12], r1
+    vld1.u8     {d10}, [r0], r1
+    vld1.u8     {d11}, [r12], r1
+    vld1.u8     {d12}, [r0], r1
+    vld1.u8     {d13}, [r12], r1
+    vld1.u8     {d14}, [r0], r1
+    vld1.u8     {d15}, [r12], r1
+    vld1.u8     {d16}, [r0], r1
+    vld1.u8     {d17}, [r12], r1
+    vld1.u8     {d18}, [r0], r1
+    vld1.u8     {d19}, [r12], r1
+    vld1.u8     {d20}, [r0], r1
+    vld1.u8     {d21}, [r12], r1
+
+    ;transpose to 8x16 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    sub         r0, r0, r1, lsl #3
+
+    bl          vp8_mbloop_filter_neon
+
+    sub         r12, r12, r1, lsl #3
+
+    ;transpose to 16x8 matrix
+    vtrn.32     q3, q7
+    vtrn.32     q4, q8
+    vtrn.32     q5, q9
+    vtrn.32     q6, q10
+
+    vtrn.16     q3, q5
+    vtrn.16     q4, q6
+    vtrn.16     q7, q9
+    vtrn.16     q8, q10
+
+    vtrn.8      q3, q4
+    vtrn.8      q5, q6
+    vtrn.8      q7, q8
+    vtrn.8      q9, q10
+
+    ;store op2, op1, op0, oq0, oq1, oq2
+    vst1.8      {d6}, [r0], r1
+    vst1.8      {d7}, [r12], r1
+    vst1.8      {d8}, [r0], r1
+    vst1.8      {d9}, [r12], r1
+    vst1.8      {d10}, [r0], r1
+    vst1.8      {d11}, [r12], r1
+    vst1.8      {d12}, [r0], r1
+    vst1.8      {d13}, [r12], r1
+    vst1.8      {d14}, [r0], r1
+    vst1.8      {d15}, [r12], r1
+    vst1.8      {d16}, [r0], r1
+    vst1.8      {d17}, [r12], r1
+    vst1.8      {d18}, [r0], r1
+    vst1.8      {d19}, [r12], r1
+    vst1.8      {d20}, [r0]
+    vst1.8      {d21}, [r12]
+
+    pop         {pc}
+    ENDP        ; |vp8_mbloop_filter_vertical_edge_uv_neon|
+
+; void vp8_mbloop_filter_neon()
+; This is a helper function for the macroblock loopfilters. The individual
+; functions do the necessary load, transpose (if necessary), preserve (if
+; necessary) and store.
+
+; r0,r1 PRESERVE
+; r2    mblimit
+; r3    limit
+
+; q2    thresh
+; q3    p3 PRESERVE
+; q4    p2
+; q5    p1
+; q6    p0
+; q7    q0
+; q8    q1
+; q9    q2
+; q10   q3 PRESERVE
+
+|vp8_mbloop_filter_neon| PROC
+
+    ; vp8_filter_mask
+    vabd.u8     q11, q3, q4                 ; abs(p3 - p2)
+    vabd.u8     q12, q4, q5                 ; abs(p2 - p1)
+    vabd.u8     q13, q5, q6                 ; abs(p1 - p0)
+    vabd.u8     q14, q8, q7                 ; abs(q1 - q0)
+    vabd.u8     q1, q9, q8                  ; abs(q2 - q1)
+    vabd.u8     q0, q10, q9                 ; abs(q3 - q2)
+
+    vmax.u8     q11, q11, q12
+    vmax.u8     q12, q13, q14
+    vmax.u8     q1, q1, q0
+    vmax.u8     q15, q11, q12
+
+    vabd.u8     q12, q6, q7                 ; abs(p0 - q0)
+
+    ; vp8_hevmask
+    vcgt.u8     q13, q13, q2                ; (abs(p1 - p0) > thresh) * -1
+    vcgt.u8     q14, q14, q2                ; (abs(q1 - q0) > thresh) * -1
+    vmax.u8     q15, q15, q1
+
+    vdup.u8     q1, r3                      ; limit
+    vdup.u8     q2, r2                      ; mblimit
+
+    vmov.u8     q0, #0x80                   ; 0x80
+
+    vcge.u8     q15, q1, q15
+
+    vabd.u8     q1, q5, q8                  ; a = abs(p1 - q1)
+    vqadd.u8    q12, q12, q12               ; b = abs(p0 - q0) * 2
+    vmov.u16    q11, #3                     ; #3
+
+    ; vp8_filter
+    ; convert to signed
+    veor        q7, q7, q0                  ; qs0
+    vshr.u8     q1, q1, #1                  ; a = a / 2
+    veor        q6, q6, q0                  ; ps0
+    veor        q5, q5, q0                  ; ps1
+
+    vqadd.u8    q12, q12, q1                ; a = b + a
+
+    veor        q8, q8, q0                  ; qs1
+    veor        q4, q4, q0                  ; ps2
+    veor        q9, q9, q0                  ; qs2
+
+    vorr        q14, q13, q14               ; vp8_hevmask
+
+    vcge.u8     q12, q2, q12                ; (a > flimit * 2 + limit) * -1
+
+    vsubl.s8    q2, d14, d12                ; qs0 - ps0
+    vsubl.s8    q13, d15, d13
+
+    vqsub.s8    q1, q5, q8                  ; vp8_filter = clamp(ps1-qs1)
+
+    vmul.i16    q2, q2, q11                 ; 3 * ( qs0 - ps0)
+
+    vand        q15, q15, q12               ; vp8_filter_mask
+
+    vmul.i16    q13, q13, q11
+
+    vmov.u8     q12, #3                     ; #3
+
+    vaddw.s8    q2, q2, d2                  ; vp8_filter + 3 * ( qs0 - ps0)
+    vaddw.s8    q13, q13, d3
+
+    vmov.u8     q11, #4                     ; #4
+
+    ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+    vqmovn.s16  d2, q2
+    vqmovn.s16  d3, q13
+
+    vand        q1, q1, q15                 ; vp8_filter &= mask
+
+    vmov.u16    q15, #63                    ; #63
+
+    vand        q13, q1, q14                ; Filter2 &= hev
+
+    vqadd.s8    q2, q13, q11                ; Filter1 = clamp(Filter2+4)
+    vqadd.s8    q13, q13, q12               ; Filter2 = clamp(Filter2+3)
+
+    vmov        q0, q15
+
+    vshr.s8     q2, q2, #3                  ; Filter1 >>= 3
+    vshr.s8     q13, q13, #3                ; Filter2 >>= 3
+
+    vmov        q11, q15
+    vmov        q12, q15
+
+    vqsub.s8    q7, q7, q2                  ; qs0 = clamp(qs0 - Filter1)
+
+    vqadd.s8    q6, q6, q13                 ; ps0 = clamp(ps0 + Filter2)
+
+    vbic        q1, q1, q14                 ; vp8_filter &= ~hev
+
+    ; roughly 1/7th difference across boundary
+    ; roughly 2/7th difference across boundary
+    ; roughly 3/7th difference across boundary
+
+    vmov.u8     d5, #9                      ; #9
+    vmov.u8     d4, #18                     ; #18
+
+    vmov        q13, q15
+    vmov        q14, q15
+
+    vmlal.s8    q0, d2, d5                  ; 63 + Filter2 * 9
+    vmlal.s8    q11, d3, d5
+    vmov.u8     d5, #27                     ; #27
+    vmlal.s8    q12, d2, d4                 ; 63 + Filter2 * 18
+    vmlal.s8    q13, d3, d4
+    vmlal.s8    q14, d2, d5                 ; 63 + Filter2 * 27
+    vmlal.s8    q15, d3, d5
+
+    vqshrn.s16  d0, q0, #7                  ; u = clamp((63 + Filter2 * 9)>>7)
+    vqshrn.s16  d1, q11, #7
+    vqshrn.s16  d24, q12, #7                ; u = clamp((63 + Filter2 * 18)>>7)
+    vqshrn.s16  d25, q13, #7
+    vqshrn.s16  d28, q14, #7                ; u = clamp((63 + Filter2 * 27)>>7)
+    vqshrn.s16  d29, q15, #7
+
+    vmov.u8     q1, #0x80                   ; 0x80
+
+    vqsub.s8    q11, q9, q0                 ; s = clamp(qs2 - u)
+    vqadd.s8    q0, q4, q0                  ; s = clamp(ps2 + u)
+    vqsub.s8    q13, q8, q12                ; s = clamp(qs1 - u)
+    vqadd.s8    q12, q5, q12                ; s = clamp(ps1 + u)
+    vqsub.s8    q15, q7, q14                ; s = clamp(qs0 - u)
+    vqadd.s8    q14, q6, q14                ; s = clamp(ps0 + u)
+
+    veor        q9, q11, q1                 ; *oq2 = s^0x80
+    veor        q4, q0, q1                  ; *op2 = s^0x80
+    veor        q8, q13, q1                 ; *oq1 = s^0x80
+    veor        q5, q12, q1                 ; *op2 = s^0x80
+    veor        q7, q15, q1                 ; *oq0 = s^0x80
+    veor        q6, q14, q1                 ; *op0 = s^0x80
+
+    bx          lr
+    ENDP        ; |vp8_mbloop_filter_neon|
+
+;-----------------
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
+++ /dev/null
@@ -1,625 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "./vpx_config.h"
-
-static INLINE void vp8_mbloop_filter_neon(
-        uint8x16_t qblimit,  // mblimit
-        uint8x16_t qlimit,   // limit
-        uint8x16_t qthresh,  // thresh
-        uint8x16_t q3,       // p2
-        uint8x16_t q4,       // p2
-        uint8x16_t q5,       // p1
-        uint8x16_t q6,       // p0
-        uint8x16_t q7,       // q0
-        uint8x16_t q8,       // q1
-        uint8x16_t q9,       // q2
-        uint8x16_t q10,      // q3
-        uint8x16_t *q4r,     // p1
-        uint8x16_t *q5r,     // p1
-        uint8x16_t *q6r,     // p0
-        uint8x16_t *q7r,     // q0
-        uint8x16_t *q8r,     // q1
-        uint8x16_t *q9r) {   // q1
-    uint8x16_t q0u8, q1u8, q11u8, q12u8, q13u8, q14u8, q15u8;
-    int16x8_t q0s16, q2s16, q11s16, q12s16, q13s16, q14s16, q15s16;
-    int8x16_t q1s8, q6s8, q7s8, q2s8, q11s8, q13s8;
-    uint16x8_t q0u16, q11u16, q12u16, q13u16, q14u16, q15u16;
-    int8x16_t q0s8, q12s8, q14s8, q15s8;
-    int8x8_t d0, d1, d2, d3, d4, d5, d24, d25, d28, d29;
-
-    q11u8 = vabdq_u8(q3, q4);
-    q12u8 = vabdq_u8(q4, q5);
-    q13u8 = vabdq_u8(q5, q6);
-    q14u8 = vabdq_u8(q8, q7);
-    q1u8  = vabdq_u8(q9, q8);
-    q0u8  = vabdq_u8(q10, q9);
-
-    q11u8 = vmaxq_u8(q11u8, q12u8);
-    q12u8 = vmaxq_u8(q13u8, q14u8);
-    q1u8  = vmaxq_u8(q1u8, q0u8);
-    q15u8 = vmaxq_u8(q11u8, q12u8);
-
-    q12u8 = vabdq_u8(q6, q7);
-
-    // vp8_hevmask
-    q13u8 = vcgtq_u8(q13u8, qthresh);
-    q14u8 = vcgtq_u8(q14u8, qthresh);
-    q15u8 = vmaxq_u8(q15u8, q1u8);
-
-    q15u8 = vcgeq_u8(qlimit, q15u8);
-
-    q1u8 = vabdq_u8(q5, q8);
-    q12u8 = vqaddq_u8(q12u8, q12u8);
-
-    // vp8_filter() function
-    // convert to signed
-    q0u8 = vdupq_n_u8(0x80);
-    q9 = veorq_u8(q9, q0u8);
-    q8 = veorq_u8(q8, q0u8);
-    q7 = veorq_u8(q7, q0u8);
-    q6 = veorq_u8(q6, q0u8);
-    q5 = veorq_u8(q5, q0u8);
-    q4 = veorq_u8(q4, q0u8);
-
-    q1u8 = vshrq_n_u8(q1u8, 1);
-    q12u8 = vqaddq_u8(q12u8, q1u8);
-
-    q14u8 = vorrq_u8(q13u8, q14u8);
-    q12u8 = vcgeq_u8(qblimit, q12u8);
-
-    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
-                     vget_low_s8(vreinterpretq_s8_u8(q6)));
-    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
-                      vget_high_s8(vreinterpretq_s8_u8(q6)));
-
-    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
-                     vreinterpretq_s8_u8(q8));
-
-    q11s16 = vdupq_n_s16(3);
-    q2s16  = vmulq_s16(q2s16, q11s16);
-    q13s16 = vmulq_s16(q13s16, q11s16);
-
-    q15u8 = vandq_u8(q15u8, q12u8);
-
-    q2s16  = vaddw_s8(q2s16, vget_low_s8(q1s8));
-    q13s16 = vaddw_s8(q13s16, vget_high_s8(q1s8));
-
-    q12u8 = vdupq_n_u8(3);
-    q11u8 = vdupq_n_u8(4);
-    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    d2 = vqmovn_s16(q2s16);
-    d3 = vqmovn_s16(q13s16);
-    q1s8 = vcombine_s8(d2, d3);
-    q1s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q15u8));
-    q13s8 = vandq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q2s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q11u8));
-    q13s8 = vqaddq_s8(q13s8, vreinterpretq_s8_u8(q12u8));
-    q2s8 = vshrq_n_s8(q2s8, 3);
-    q13s8 = vshrq_n_s8(q13s8, 3);
-
-    q7s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q2s8);
-    q6s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q13s8);
-
-    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
-
-    q0u16 = q11u16 = q12u16 = q13u16 = q14u16 = q15u16 = vdupq_n_u16(63);
-    d5 = vdup_n_s8(9);
-    d4 = vdup_n_s8(18);
-
-    q0s16  = vmlal_s8(vreinterpretq_s16_u16(q0u16),  vget_low_s8(q1s8),  d5);
-    q11s16 = vmlal_s8(vreinterpretq_s16_u16(q11u16), vget_high_s8(q1s8), d5);
-    d5 = vdup_n_s8(27);
-    q12s16 = vmlal_s8(vreinterpretq_s16_u16(q12u16), vget_low_s8(q1s8),  d4);
-    q13s16 = vmlal_s8(vreinterpretq_s16_u16(q13u16), vget_high_s8(q1s8), d4);
-    q14s16 = vmlal_s8(vreinterpretq_s16_u16(q14u16), vget_low_s8(q1s8),  d5);
-    q15s16 = vmlal_s8(vreinterpretq_s16_u16(q15u16), vget_high_s8(q1s8), d5);
-
-    d0  = vqshrn_n_s16(q0s16 , 7);
-    d1  = vqshrn_n_s16(q11s16, 7);
-    d24 = vqshrn_n_s16(q12s16, 7);
-    d25 = vqshrn_n_s16(q13s16, 7);
-    d28 = vqshrn_n_s16(q14s16, 7);
-    d29 = vqshrn_n_s16(q15s16, 7);
-
-    q0s8  = vcombine_s8(d0, d1);
-    q12s8 = vcombine_s8(d24, d25);
-    q14s8 = vcombine_s8(d28, d29);
-
-    q11s8 = vqsubq_s8(vreinterpretq_s8_u8(q9), q0s8);
-    q0s8  = vqaddq_s8(vreinterpretq_s8_u8(q4), q0s8);
-    q13s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q12s8);
-    q12s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q12s8);
-    q15s8 = vqsubq_s8((q7s8), q14s8);
-    q14s8 = vqaddq_s8((q6s8), q14s8);
-
-    q1u8 = vdupq_n_u8(0x80);
-    *q9r = veorq_u8(vreinterpretq_u8_s8(q11s8), q1u8);
-    *q8r = veorq_u8(vreinterpretq_u8_s8(q13s8), q1u8);
-    *q7r = veorq_u8(vreinterpretq_u8_s8(q15s8), q1u8);
-    *q6r = veorq_u8(vreinterpretq_u8_s8(q14s8), q1u8);
-    *q5r = veorq_u8(vreinterpretq_u8_s8(q12s8), q1u8);
-    *q4r = veorq_u8(vreinterpretq_u8_s8(q0s8), q1u8);
-    return;
-}
-
-void vp8_mbloop_filter_horizontal_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    src -= (pitch << 2);
-
-    q3 = vld1q_u8(src);
-    src += pitch;
-    q4 = vld1q_u8(src);
-    src += pitch;
-    q5 = vld1q_u8(src);
-    src += pitch;
-    q6 = vld1q_u8(src);
-    src += pitch;
-    q7 = vld1q_u8(src);
-    src += pitch;
-    q8 = vld1q_u8(src);
-    src += pitch;
-    q9 = vld1q_u8(src);
-    src += pitch;
-    q10 = vld1q_u8(src);
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    src -= (pitch * 6);
-    vst1q_u8(src, q4);
-    src += pitch;
-    vst1q_u8(src, q5);
-    src += pitch;
-    vst1q_u8(src, q6);
-    src += pitch;
-    vst1q_u8(src, q7);
-    src += pitch;
-    vst1q_u8(src, q8);
-    src += pitch;
-    vst1q_u8(src, q9);
-    return;
-}
-
-void vp8_mbloop_filter_horizontal_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    u -= (pitch << 2);
-    v -= (pitch << 2);
-
-    d6 = vld1_u8(u);
-    u += pitch;
-    d7 = vld1_u8(v);
-    v += pitch;
-    d8 = vld1_u8(u);
-    u += pitch;
-    d9 = vld1_u8(v);
-    v += pitch;
-    d10 = vld1_u8(u);
-    u += pitch;
-    d11 = vld1_u8(v);
-    v += pitch;
-    d12 = vld1_u8(u);
-    u += pitch;
-    d13 = vld1_u8(v);
-    v += pitch;
-    d14 = vld1_u8(u);
-    u += pitch;
-    d15 = vld1_u8(v);
-    v += pitch;
-    d16 = vld1_u8(u);
-    u += pitch;
-    d17 = vld1_u8(v);
-    v += pitch;
-    d18 = vld1_u8(u);
-    u += pitch;
-    d19 = vld1_u8(v);
-    v += pitch;
-    d20 = vld1_u8(u);
-    d21 = vld1_u8(v);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    u -= (pitch * 6);
-    v -= (pitch * 6);
-    vst1_u8(u, vget_low_u8(q4));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q4));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q5));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q5));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q6));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q6));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q7));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q7));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q8));
-    u += pitch;
-    vst1_u8(v, vget_high_u8(q8));
-    v += pitch;
-    vst1_u8(u, vget_low_u8(q9));
-    vst1_u8(v, vget_high_u8(q9));
-    return;
-}
-
-void vp8_mbloop_filter_vertical_edge_y_neon(
-        unsigned char *src,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh) {
-    unsigned char *s1, *s2;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    s1 = src - 4;
-    s2 = s1 + 8 * pitch;
-    d6  = vld1_u8(s1);
-    s1 += pitch;
-    d7  = vld1_u8(s2);
-    s2 += pitch;
-    d8  = vld1_u8(s1);
-    s1 += pitch;
-    d9  = vld1_u8(s2);
-    s2 += pitch;
-    d10 = vld1_u8(s1);
-    s1 += pitch;
-    d11 = vld1_u8(s2);
-    s2 += pitch;
-    d12 = vld1_u8(s1);
-    s1 += pitch;
-    d13 = vld1_u8(s2);
-    s2 += pitch;
-    d14 = vld1_u8(s1);
-    s1 += pitch;
-    d15 = vld1_u8(s2);
-    s2 += pitch;
-    d16 = vld1_u8(s1);
-    s1 += pitch;
-    d17 = vld1_u8(s2);
-    s2 += pitch;
-    d18 = vld1_u8(s1);
-    s1 += pitch;
-    d19 = vld1_u8(s2);
-    s2 += pitch;
-    d20 = vld1_u8(s1);
-    d21 = vld1_u8(s2);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    s1 -= 7 * pitch;
-    s2 -= 7 * pitch;
-
-    vst1_u8(s1, vget_low_u8(q3));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q3));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q4));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q4));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q5));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q5));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q6));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q6));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q7));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q7));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q8));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q8));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q9));
-    s1 += pitch;
-    vst1_u8(s2, vget_high_u8(q9));
-    s2 += pitch;
-    vst1_u8(s1, vget_low_u8(q10));
-    vst1_u8(s2, vget_high_u8(q10));
-    return;
-}
-
-void vp8_mbloop_filter_vertical_edge_uv_neon(
-        unsigned char *u,
-        int pitch,
-        unsigned char blimit,
-        unsigned char limit,
-        unsigned char thresh,
-        unsigned char *v) {
-    unsigned char *us, *ud;
-    unsigned char *vs, *vd;
-    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
-    uint8x16_t q5, q6, q7, q8, q9, q10;
-    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
-    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
-    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
-    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
-    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
-
-    qblimit = vdupq_n_u8(blimit);
-    qlimit = vdupq_n_u8(limit);
-    qthresh = vdupq_n_u8(thresh);
-
-    us = u - 4;
-    vs = v - 4;
-    d6 = vld1_u8(us);
-    us += pitch;
-    d7 = vld1_u8(vs);
-    vs += pitch;
-    d8 = vld1_u8(us);
-    us += pitch;
-    d9 = vld1_u8(vs);
-    vs += pitch;
-    d10 = vld1_u8(us);
-    us += pitch;
-    d11 = vld1_u8(vs);
-    vs += pitch;
-    d12 = vld1_u8(us);
-    us += pitch;
-    d13 = vld1_u8(vs);
-    vs += pitch;
-    d14 = vld1_u8(us);
-    us += pitch;
-    d15 = vld1_u8(vs);
-    vs += pitch;
-    d16 = vld1_u8(us);
-    us += pitch;
-    d17 = vld1_u8(vs);
-    vs += pitch;
-    d18 = vld1_u8(us);
-    us += pitch;
-    d19 = vld1_u8(vs);
-    vs += pitch;
-    d20 = vld1_u8(us);
-    d21 = vld1_u8(vs);
-
-    q3 = vcombine_u8(d6, d7);
-    q4 = vcombine_u8(d8, d9);
-    q5 = vcombine_u8(d10, d11);
-    q6 = vcombine_u8(d12, d13);
-    q7 = vcombine_u8(d14, d15);
-    q8 = vcombine_u8(d16, d17);
-    q9 = vcombine_u8(d18, d19);
-    q10 = vcombine_u8(d20, d21);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    vp8_mbloop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
-                         q5, q6, q7, q8, q9, q10,
-                         &q4, &q5, &q6, &q7, &q8, &q9);
-
-    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
-    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
-    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
-    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
-
-    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
-                       vreinterpretq_u16_u32(q2tmp2.val[0]));
-    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
-                       vreinterpretq_u16_u32(q2tmp3.val[0]));
-    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
-                       vreinterpretq_u16_u32(q2tmp2.val[1]));
-    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
-                       vreinterpretq_u16_u32(q2tmp3.val[1]));
-
-    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
-                       vreinterpretq_u8_u16(q2tmp5.val[0]));
-    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
-                       vreinterpretq_u8_u16(q2tmp5.val[1]));
-    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
-                       vreinterpretq_u8_u16(q2tmp7.val[0]));
-    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
-                       vreinterpretq_u8_u16(q2tmp7.val[1]));
-
-    q3 = q2tmp8.val[0];
-    q4 = q2tmp8.val[1];
-    q5 = q2tmp9.val[0];
-    q6 = q2tmp9.val[1];
-    q7 = q2tmp10.val[0];
-    q8 = q2tmp10.val[1];
-    q9 = q2tmp11.val[0];
-    q10 = q2tmp11.val[1];
-
-    ud = u - 4;
-    vst1_u8(ud, vget_low_u8(q3));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q4));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q5));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q6));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q7));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q8));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q9));
-    ud += pitch;
-    vst1_u8(ud, vget_low_u8(q10));
-
-    vd = v - 4;
-    vst1_u8(vd, vget_high_u8(q3));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q4));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q5));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q6));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q7));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q8));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q9));
-    vd += pitch;
-    vst1_u8(vd, vget_high_u8(q10));
-    return;
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/reconintra_neon.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-#include "vp8/common/blockd.h"
-
-void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *x,
-                                           unsigned char * yabove_row,
-                                           unsigned char * yleft,
-                                           int left_stride,
-                                           unsigned char * ypred_ptr,
-                                           int y_stride) {
-  const int mode = x->mode_info_context->mbmi.mode;
-  int i;
-
-  switch (mode) {
-    case DC_PRED:
-    {
-      int shift = x->up_available + x->left_available;
-      uint8x16_t v_expected_dc = vdupq_n_u8(128);
-
-      if (shift) {
-        unsigned int average = 0;
-        int expected_dc;
-        if (x->up_available) {
-          const uint8x16_t v_above = vld1q_u8(yabove_row);
-          const uint16x8_t a = vpaddlq_u8(v_above);
-          const uint32x4_t b = vpaddlq_u16(a);
-          const uint64x2_t c = vpaddlq_u32(b);
-          const uint32x2_t d = vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)),
-                                        vreinterpret_u32_u64(vget_high_u64(c)));
-          average = vget_lane_u32(d, 0);
-        }
-        if (x->left_available) {
-          for (i = 0; i < 16; ++i) {
-              average += yleft[0];
-              yleft += left_stride;
-          }
-        }
-        shift += 3;
-        expected_dc = (average + (1 << (shift - 1))) >> shift;
-        v_expected_dc = vmovq_n_u8((uint8_t)expected_dc);
-      }
-      for (i = 0; i < 16; ++i) {
-        vst1q_u8(ypred_ptr, v_expected_dc);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case V_PRED:
-    {
-      const uint8x16_t v_above = vld1q_u8(yabove_row);
-      for (i = 0; i < 16; ++i) {
-        vst1q_u8(ypred_ptr, v_above);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case H_PRED:
-    {
-      for (i = 0; i < 16; ++i) {
-        const uint8x16_t v_yleft = vmovq_n_u8((uint8_t)yleft[0]);
-        yleft += left_stride;
-        vst1q_u8(ypred_ptr, v_yleft);
-        ypred_ptr += y_stride;
-      }
-    }
-    break;
-    case TM_PRED:
-    {
-      const uint16x8_t v_ytop_left = vmovq_n_u16((int16_t)yabove_row[-1]);
-      const uint8x16_t v_above = vld1q_u8(yabove_row);
-      for (i = 0; i < 16; ++i) {
-        const uint8x8_t v_yleft = vmov_n_u8((int8_t)yleft[0]);
-        const uint16x8_t a_lo = vaddl_u8(vget_low_u8(v_above), v_yleft);
-        const uint16x8_t a_hi = vaddl_u8(vget_high_u8(v_above), v_yleft);
-        const int16x8_t b_lo = vsubq_s16(vreinterpretq_s16_u16(a_lo),
-                                         vreinterpretq_s16_u16(v_ytop_left));
-        const int16x8_t b_hi = vsubq_s16(vreinterpretq_s16_u16(a_hi),
-                                         vreinterpretq_s16_u16(v_ytop_left));
-        const uint8x8_t pred_lo = vqmovun_s16(b_lo);
-        const uint8x8_t pred_hi = vqmovun_s16(b_hi);
-
-        vst1q_u8(ypred_ptr, vcombine_u8(pred_lo, pred_hi));
-        ypred_ptr += y_stride;
-        yleft += left_stride;
-      }
-    }
-    break;
-  }
-}
-
-void vp8_build_intra_predictors_mbuv_s_neon(MACROBLOCKD *x,
-                                            unsigned char * uabove_row,
-                                            unsigned char * vabove_row,
-                                            unsigned char * uleft,
-                                            unsigned char * vleft,
-                                            int left_stride,
-                                            unsigned char * upred_ptr,
-                                            unsigned char * vpred_ptr,
-                                            int pred_stride) {
-  const int mode = x->mode_info_context->mbmi.uv_mode;
-  int i;
-
-  switch (mode) {
-    case DC_PRED:
-    {
-      int shift = x->up_available + x->left_available;
-      uint8x8_t v_expected_udc = vdup_n_u8(128);
-      uint8x8_t v_expected_vdc = vdup_n_u8(128);
-
-      if (shift) {
-        unsigned int average_u = 0;
-        unsigned int average_v = 0;
-        int expected_udc;
-        int expected_vdc;
-        if (x->up_available) {
-          const uint8x8_t v_uabove = vld1_u8(uabove_row);
-          const uint8x8_t v_vabove = vld1_u8(vabove_row);
-          const uint16x8_t a = vpaddlq_u8(vcombine_u8(v_uabove, v_vabove));
-          const uint32x4_t b = vpaddlq_u16(a);
-          const uint64x2_t c = vpaddlq_u32(b);
-          average_u = vgetq_lane_u32(vreinterpretq_u32_u64((c)), 0);
-          average_v = vgetq_lane_u32(vreinterpretq_u32_u64((c)), 2);
-        }
-        if (x->left_available) {
-          for (i = 0; i < 8; ++i) {
-              average_u += uleft[0];
-              uleft += left_stride;
-              average_v += vleft[0];
-              vleft += left_stride;
-          }
-        }
-        shift += 2;
-        expected_udc = (average_u + (1 << (shift - 1))) >> shift;
-        expected_vdc = (average_v + (1 << (shift - 1))) >> shift;
-        v_expected_udc = vmov_n_u8((uint8_t)expected_udc);
-        v_expected_vdc = vmov_n_u8((uint8_t)expected_vdc);
-      }
-      for (i = 0; i < 8; ++i) {
-        vst1_u8(upred_ptr, v_expected_udc);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_expected_vdc);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case V_PRED:
-    {
-      const uint8x8_t v_uabove = vld1_u8(uabove_row);
-      const uint8x8_t v_vabove = vld1_u8(vabove_row);
-      for (i = 0; i < 8; ++i) {
-        vst1_u8(upred_ptr, v_uabove);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_vabove);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case H_PRED:
-    {
-      for (i = 0; i < 8; ++i) {
-        const uint8x8_t v_uleft = vmov_n_u8((uint8_t)uleft[0]);
-        const uint8x8_t v_vleft = vmov_n_u8((uint8_t)vleft[0]);
-        uleft += left_stride;
-        vleft += left_stride;
-        vst1_u8(upred_ptr, v_uleft);
-        upred_ptr += pred_stride;
-        vst1_u8(vpred_ptr, v_vleft);
-        vpred_ptr += pred_stride;
-      }
-    }
-    break;
-    case TM_PRED:
-    {
-      const uint16x8_t v_utop_left = vmovq_n_u16((int16_t)uabove_row[-1]);
-      const uint16x8_t v_vtop_left = vmovq_n_u16((int16_t)vabove_row[-1]);
-      const uint8x8_t v_uabove = vld1_u8(uabove_row);
-      const uint8x8_t v_vabove = vld1_u8(vabove_row);
-      for (i = 0; i < 8; ++i) {
-        const uint8x8_t v_uleft = vmov_n_u8((int8_t)uleft[0]);
-        const uint8x8_t v_vleft = vmov_n_u8((int8_t)vleft[0]);
-        const uint16x8_t a_u = vaddl_u8(v_uabove, v_uleft);
-        const uint16x8_t a_v = vaddl_u8(v_vabove, v_vleft);
-        const int16x8_t b_u = vsubq_s16(vreinterpretq_s16_u16(a_u),
-                                        vreinterpretq_s16_u16(v_utop_left));
-        const int16x8_t b_v = vsubq_s16(vreinterpretq_s16_u16(a_v),
-                                        vreinterpretq_s16_u16(v_vtop_left));
-        const uint8x8_t pred_u = vqmovun_s16(b_u);
-        const uint8x8_t pred_v = vqmovun_s16(b_v);
-
-        vst1_u8(upred_ptr, pred_u);
-        vst1_u8(vpred_ptr, pred_v);
-        upred_ptr += pred_stride;
-        vpred_ptr += pred_stride;
-        uleft += left_stride;
-        vleft += left_stride;
-      }
-    }
-    break;
-  }
-}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/sad16_neon.asm
@@ -0,0 +1,207 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_sad16x16_neon|
+    EXPORT  |vp8_sad16x8_neon|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+
+; r0    unsigned char *src_ptr
+; r1    int  src_stride
+; r2    unsigned char *ref_ptr
+; r3    int  ref_stride
+|vp8_sad16x16_neon| PROC
+;;
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabdl.u8        q12, d0, d8
+    vabdl.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0], r1
+    vld1.8          {q7}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+;;
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+    vabal.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0], r1
+    vld1.8          {q7}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+;;
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+    vabal.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0], r1
+    vld1.8          {q7}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+;;
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+    vabal.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0]
+    vld1.8          {q7}, [r2]
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vadd.u16        q0, q12, q13
+
+    vpaddl.u16      q1, q0
+    vpaddl.u32      q0, q1
+
+    vadd.u32        d0, d0, d1
+
+    vmov.32         r0, d0[0]
+
+    bx              lr
+
+    ENDP
+
+;==============================
+;unsigned int vp8_sad16x8_c(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *ref_ptr,
+;    int  ref_stride)
+|vp8_sad16x8_neon| PROC
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabdl.u8        q12, d0, d8
+    vabdl.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0], r1
+    vld1.8          {q7}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+    vld1.8          {q0}, [r0], r1
+    vld1.8          {q4}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vld1.8          {q1}, [r0], r1
+    vld1.8          {q5}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+    vabal.u8        q13, d1, d9
+
+    vld1.8          {q2}, [r0], r1
+    vld1.8          {q6}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+    vabal.u8        q13, d3, d11
+
+    vld1.8          {q3}, [r0], r1
+    vld1.8          {q7}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q13, d5, d13
+
+    vabal.u8        q12, d6, d14
+    vabal.u8        q13, d7, d15
+
+    vadd.u16        q0, q12, q13
+
+    vpaddl.u16      q1, q0
+    vpaddl.u32      q0, q1
+
+    vadd.u32        d0, d0, d1
+
+    vmov.32         r0, d0[0]
+
+    bx              lr
+
+    ENDP
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/sad8_neon.asm
@@ -0,0 +1,209 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_sad8x8_neon|
+    EXPORT  |vp8_sad8x16_neon|
+    EXPORT  |vp8_sad4x4_neon|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; unsigned int vp8_sad8x8_c(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *ref_ptr,
+;    int  ref_stride)
+
+|vp8_sad8x8_neon| PROC
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabdl.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q12, d6, d14
+
+    vpaddl.u16      q1, q12
+    vpaddl.u32      q0, q1
+    vadd.u32        d0, d0, d1
+
+    vmov.32         r0, d0[0]
+
+    bx              lr
+
+    ENDP
+
+;============================
+;unsigned int vp8_sad8x16_c(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *ref_ptr,
+;    int  ref_stride)
+
+|vp8_sad8x16_neon| PROC
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabdl.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vabal.u8        q12, d6, d14
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabal.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q12, d6, d14
+
+    vpaddl.u16      q1, q12
+    vpaddl.u32      q0, q1
+    vadd.u32        d0, d0, d1
+
+    vmov.32         r0, d0[0]
+
+    bx              lr
+
+    ENDP
+
+;===========================
+;unsigned int vp8_sad4x4_c(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *ref_ptr,
+;    int  ref_stride)
+
+|vp8_sad4x4_neon| PROC
+    vld1.8          {d0}, [r0], r1
+    vld1.8          {d8}, [r2], r3
+
+    vld1.8          {d2}, [r0], r1
+    vld1.8          {d10}, [r2], r3
+
+    vabdl.u8        q12, d0, d8
+
+    vld1.8          {d4}, [r0], r1
+    vld1.8          {d12}, [r2], r3
+
+    vabal.u8        q12, d2, d10
+
+    vld1.8          {d6}, [r0], r1
+    vld1.8          {d14}, [r2], r3
+
+    vabal.u8        q12, d4, d12
+    vabal.u8        q12, d6, d14
+
+    vpaddl.u16      d1, d24
+    vpaddl.u32      d0, d1
+    vmov.32         r0, d0[0]
+
+    bx              lr
+
+    ENDP
+
+    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/sad_neon.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-unsigned int vp8_sad8x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 7; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad8x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 15; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad4x4_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x2_t d1;
-    uint64x1_t d3;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 3; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    d1 = vpaddl_u16(vget_low_u16(q12));
-    d3 = vpaddl_u32(d1);
-
-    return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
-}
-
-unsigned int vp8_sad16x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 15; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad16x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 7; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)