Bug 1151175 - Update libvpx to 1.4.0. r=rillian
authorJan Gerber <j@mailb.org>
Wed, 17 Jun 2015 20:46:25 +0200
changeset 280440 de1ddaec57b7434b11528068e9a59c8c5716dd0f
parent 280439 13d883caaf74900fbe66a32a87bfc361daf3466a
child 280441 4aefa21c8eadfd1594267e0aacd9a2b6b3964d85
push id4932
push userjlund@mozilla.com
push dateMon, 10 Aug 2015 18:23:06 +0000
treeherdermozilla-beta@6dd5a4f5f745 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrillian
bugs1151175
milestone41.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1151175 - Update libvpx to 1.4.0. r=rillian vp9_thread.c has changed directories, which mach doesn't handle properly, resulting in "No rule to make target" for the old source file path. Bump clobber for libvpx file renames.
CLOBBER
media/libvpx/PATENTS
media/libvpx/README_MOZILLA
media/libvpx/build/make/obj_int_extract.c
media/libvpx/build/make/thumb.pm
media/libvpx/sources.mozbuild
media/libvpx/third_party/x86inc/x86inc.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
media/libvpx/vp8/common/arm/dequantize_arm.c
media/libvpx/vp8/common/arm/loopfilter_arm.c
media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
media/libvpx/vp8/common/arm/neon/copymem16x16_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x4_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x8_neon.asm
media/libvpx/vp8/common/arm/neon/copymem_neon.c
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.asm
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
media/libvpx/vp8/common/arm/neon/dequant_idct_neon.asm
media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
media/libvpx/vp8/common/arm/neon/dequantizeb_neon.asm
media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.asm
media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.asm
media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
media/libvpx/vp8/common/arm/neon/iwalsh_neon.asm
media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
media/libvpx/vp8/common/arm/neon/loopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.c
media/libvpx/vp8/common/arm/neon/reconintra_neon.c
media/libvpx/vp8/common/arm/neon/sad16_neon.asm
media/libvpx/vp8/common/arm/neon/sad8_neon.asm
media/libvpx/vp8/common/arm/neon/sad_neon.c
media/libvpx/vp8/common/arm/neon/save_reg_neon.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.c
media/libvpx/vp8/common/arm/neon/sixtappredict16x16_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict4x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x8_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict_neon.c
media/libvpx/vp8/common/arm/neon/variance_neon.asm
media/libvpx/vp8/common/arm/neon/variance_neon.c
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
media/libvpx/vp8/common/arm/reconintra_arm.c
media/libvpx/vp8/common/blockd.h
media/libvpx/vp8/common/common.h
media/libvpx/vp8/common/loopfilter.c
media/libvpx/vp8/common/onyx.h
media/libvpx/vp8/common/postproc.c
media/libvpx/vp8/common/postproc.h
media/libvpx/vp8/common/pragmas.h
media/libvpx/vp8/common/x86/loopfilter_block_sse2.asm
media/libvpx/vp8/common/x86/loopfilter_block_sse2_x86_64.asm
media/libvpx/vp8/common/x86/loopfilter_mmx.asm
media/libvpx/vp8/common/x86/loopfilter_sse2.asm
media/libvpx/vp8/common/x86/postproc_mmx.asm
media/libvpx/vp8/common/x86/postproc_sse2.asm
media/libvpx/vp8/common/x86/postproc_x86.c
media/libvpx/vp8/common/x86/recon_sse2.asm
media/libvpx/vp8/common/x86/variance_impl_mmx.asm
media/libvpx/vp8/common/x86/variance_mmx.c
media/libvpx/vp8/common/x86/variance_sse2.c
media/libvpx/vp8/common/x86/variance_ssse3.c
media/libvpx/vp8/decoder/dboolhuff.c
media/libvpx/vp8/decoder/dboolhuff.h
media/libvpx/vp8/decoder/decodeframe.c
media/libvpx/vp8/decoder/decodemv.c
media/libvpx/vp8/decoder/error_concealment.c
media/libvpx/vp8/decoder/onyxd_if.c
media/libvpx/vp8/decoder/onyxd_int.h
media/libvpx/vp8/decoder/threading.c
media/libvpx/vp8/encoder/arm/armv5te/boolhuff_armv5te.asm
media/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm
media/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm
media/libvpx/vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm
media/libvpx/vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm
media/libvpx/vp8/encoder/arm/armv6/vp8_subtract_armv6.asm
media/libvpx/vp8/encoder/arm/boolhuff_arm.c
media/libvpx/vp8/encoder/arm/neon/denoising_neon.c
media/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.asm
media/libvpx/vp8/encoder/arm/neon/fastquantizeb_neon.c
media/libvpx/vp8/encoder/arm/neon/picklpf_arm.c
media/libvpx/vp8/encoder/arm/neon/shortfdct_neon.asm
media/libvpx/vp8/encoder/arm/neon/shortfdct_neon.c
media/libvpx/vp8/encoder/arm/neon/subtract_neon.asm
media/libvpx/vp8/encoder/arm/neon/subtract_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_memcpy_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.c
media/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm
media/libvpx/vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c
media/libvpx/vp8/encoder/arm/quantize_arm.c
media/libvpx/vp8/encoder/bitstream.c
media/libvpx/vp8/encoder/bitstream.h
media/libvpx/vp8/encoder/block.h
media/libvpx/vp8/encoder/boolhuff.h
media/libvpx/vp8/encoder/denoising.c
media/libvpx/vp8/encoder/denoising.h
media/libvpx/vp8/encoder/encodeframe.c
media/libvpx/vp8/encoder/encodemb.c
media/libvpx/vp8/encoder/ethreading.c
media/libvpx/vp8/encoder/firstpass.c
media/libvpx/vp8/encoder/mcomp.c
media/libvpx/vp8/encoder/mr_dissim.c
media/libvpx/vp8/encoder/onyx_if.c
media/libvpx/vp8/encoder/onyx_int.h
media/libvpx/vp8/encoder/pickinter.c
media/libvpx/vp8/encoder/picklpf.c
media/libvpx/vp8/encoder/psnr.c
media/libvpx/vp8/encoder/psnr.h
media/libvpx/vp8/encoder/quantize.c
media/libvpx/vp8/encoder/quantize.h
media/libvpx/vp8/encoder/ratectrl.c
media/libvpx/vp8/encoder/rdopt.c
media/libvpx/vp8/encoder/rdopt.h
media/libvpx/vp8/encoder/temporal_filter.c
media/libvpx/vp8/encoder/tokenize.c
media/libvpx/vp8/encoder/vp8_asm_enc_offsets.c
media/libvpx/vp8/encoder/x86/denoising_sse2.c
media/libvpx/vp8/encoder/x86/quantize_sse2.c
media/libvpx/vp8/encoder/x86/quantize_sse4.asm
media/libvpx/vp8/encoder/x86/quantize_sse4.c
media/libvpx/vp8/encoder/x86/quantize_ssse3.asm
media/libvpx/vp8/encoder/x86/quantize_ssse3.c
media/libvpx/vp8/encoder/x86/ssim_opt_x86_64.asm
media/libvpx/vp8/vp8_cx_iface.c
media/libvpx/vp8/vp8_dx_iface.c
media/libvpx/vp8_rtcd_armv7-android-gcc.h
media/libvpx/vp8_rtcd_generic-gnu.h
media/libvpx/vp8_rtcd_x86-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86-linux-gcc.h
media/libvpx/vp8_rtcd_x86-win32-gcc.h
media/libvpx/vp8_rtcd_x86-win32-vs12.h
media/libvpx/vp8_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86_64-linux-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-vs12.h
media/libvpx/vp9/common/arm/neon/vp9_avg_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_avg_neon.c
media/libvpx/vp9/common/arm/neon/vp9_avg_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon.c
media/libvpx/vp9/common/arm/neon/vp9_convolve8_avg_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve8_neon.c
media/libvpx/vp9/common/arm/neon/vp9_convolve8_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c
media/libvpx/vp9/common/arm/neon/vp9_copy_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_copy_neon.c
media/libvpx/vp9/common/arm/neon/vp9_copy_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_add_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_iht4x4_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_iht8x8_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon.c
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_16_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_4_neon.c
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_4_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_8_neon.c
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_8_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_loopfilter_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.asm
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
media/libvpx/vp9/common/generic/vp9_systemdependent.c
media/libvpx/vp9/common/vp9_alloccommon.c
media/libvpx/vp9/common/vp9_alloccommon.h
media/libvpx/vp9/common/vp9_blockd.c
media/libvpx/vp9/common/vp9_blockd.h
media/libvpx/vp9/common/vp9_common.h
media/libvpx/vp9/common/vp9_common_data.c
media/libvpx/vp9/common/vp9_common_data.h
media/libvpx/vp9/common/vp9_convolve.c
media/libvpx/vp9/common/vp9_convolve.h
media/libvpx/vp9/common/vp9_debugmodes.c
media/libvpx/vp9/common/vp9_entropy.c
media/libvpx/vp9/common/vp9_entropy.h
media/libvpx/vp9/common/vp9_entropymode.c
media/libvpx/vp9/common/vp9_entropymode.h
media/libvpx/vp9/common/vp9_entropymv.c
media/libvpx/vp9/common/vp9_entropymv.h
media/libvpx/vp9/common/vp9_enums.h
media/libvpx/vp9/common/vp9_filter.c
media/libvpx/vp9/common/vp9_filter.h
media/libvpx/vp9/common/vp9_frame_buffers.c
media/libvpx/vp9/common/vp9_idct.c
media/libvpx/vp9/common/vp9_idct.h
media/libvpx/vp9/common/vp9_loopfilter.c
media/libvpx/vp9/common/vp9_loopfilter.h
media/libvpx/vp9/common/vp9_loopfilter_filters.c
media/libvpx/vp9/common/vp9_mfqe.c
media/libvpx/vp9/common/vp9_mfqe.h
media/libvpx/vp9/common/vp9_mv.h
media/libvpx/vp9/common/vp9_mvref_common.c
media/libvpx/vp9/common/vp9_mvref_common.h
media/libvpx/vp9/common/vp9_onyx.h
media/libvpx/vp9/common/vp9_onyxc_int.h
media/libvpx/vp9/common/vp9_postproc.c
media/libvpx/vp9/common/vp9_postproc.h
media/libvpx/vp9/common/vp9_ppflags.h
media/libvpx/vp9/common/vp9_pragmas.h
media/libvpx/vp9/common/vp9_pred_common.c
media/libvpx/vp9/common/vp9_pred_common.h
media/libvpx/vp9/common/vp9_prob.c
media/libvpx/vp9/common/vp9_prob.h
media/libvpx/vp9/common/vp9_quant_common.c
media/libvpx/vp9/common/vp9_quant_common.h
media/libvpx/vp9/common/vp9_reconinter.c
media/libvpx/vp9/common/vp9_reconinter.h
media/libvpx/vp9/common/vp9_reconintra.c
media/libvpx/vp9/common/vp9_reconintra.h
media/libvpx/vp9/common/vp9_rtcd.c
media/libvpx/vp9/common/vp9_scale.c
media/libvpx/vp9/common/vp9_scale.h
media/libvpx/vp9/common/vp9_scan.c
media/libvpx/vp9/common/vp9_scan.h
media/libvpx/vp9/common/vp9_systemdependent.h
media/libvpx/vp9/common/vp9_thread.c
media/libvpx/vp9/common/vp9_thread.h
media/libvpx/vp9/common/vp9_thread_common.c
media/libvpx/vp9/common/vp9_thread_common.h
media/libvpx/vp9/common/vp9_tile_common.c
media/libvpx/vp9/common/vp9_tile_common.h
media/libvpx/vp9/common/x86/vp9_asm_stubs.c
media/libvpx/vp9/common/x86/vp9_copy_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_intrapred_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_high_subpixel_8t_sse2.asm
media/libvpx/vp9/common/x86/vp9_high_subpixel_bilinear_sse2.asm
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.h
media/libvpx/vp9/common/x86/vp9_idct_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_idct_ssse3_x86_64.asm
media/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_loopfilter_mmx.asm
media/libvpx/vp9/common/x86/vp9_mfqe_sse2.asm
media/libvpx/vp9/common/x86/vp9_postproc_mmx.asm
media/libvpx/vp9/common/x86/vp9_postproc_sse2.asm
media/libvpx/vp9/common/x86/vp9_postproc_x86.h
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_ssse3.asm
media/libvpx/vp9/decoder/vp9_decodeframe.c
media/libvpx/vp9/decoder/vp9_decodeframe.h
media/libvpx/vp9/decoder/vp9_decodemv.c
media/libvpx/vp9/decoder/vp9_decodemv.h
media/libvpx/vp9/decoder/vp9_decoder.c
media/libvpx/vp9/decoder/vp9_decoder.h
media/libvpx/vp9/decoder/vp9_detokenize.c
media/libvpx/vp9/decoder/vp9_detokenize.h
media/libvpx/vp9/decoder/vp9_dsubexp.c
media/libvpx/vp9/decoder/vp9_dthread.c
media/libvpx/vp9/decoder/vp9_dthread.h
media/libvpx/vp9/decoder/vp9_onyxd.h
media/libvpx/vp9/decoder/vp9_onyxd_if.c
media/libvpx/vp9/decoder/vp9_onyxd_int.h
media/libvpx/vp9/decoder/vp9_read_bit_buffer.c
media/libvpx/vp9/decoder/vp9_read_bit_buffer.h
media/libvpx/vp9/decoder/vp9_reader.c
media/libvpx/vp9/decoder/vp9_reader.h
media/libvpx/vp9/decoder/vp9_thread.c
media/libvpx/vp9/decoder/vp9_thread.h
media/libvpx/vp9/encoder/arm/neon/vp9_dct_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad4d_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_subtract_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_variance_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9enc_avg_neon.c
media/libvpx/vp9/encoder/vp9_aq_complexity.c
media/libvpx/vp9/encoder/vp9_aq_complexity.h
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.h
media/libvpx/vp9/encoder/vp9_aq_variance.c
media/libvpx/vp9/encoder/vp9_aq_variance.h
media/libvpx/vp9/encoder/vp9_avg.c
media/libvpx/vp9/encoder/vp9_bitstream.c
media/libvpx/vp9/encoder/vp9_bitstream.h
media/libvpx/vp9/encoder/vp9_block.h
media/libvpx/vp9/encoder/vp9_context_tree.c
media/libvpx/vp9/encoder/vp9_context_tree.h
media/libvpx/vp9/encoder/vp9_cost.c
media/libvpx/vp9/encoder/vp9_cost.h
media/libvpx/vp9/encoder/vp9_dct.c
media/libvpx/vp9/encoder/vp9_dct.h
media/libvpx/vp9/encoder/vp9_denoiser.c
media/libvpx/vp9/encoder/vp9_denoiser.h
media/libvpx/vp9/encoder/vp9_encodeframe.c
media/libvpx/vp9/encoder/vp9_encodeframe.h
media/libvpx/vp9/encoder/vp9_encodemb.c
media/libvpx/vp9/encoder/vp9_encodemb.h
media/libvpx/vp9/encoder/vp9_encodemv.c
media/libvpx/vp9/encoder/vp9_encodemv.h
media/libvpx/vp9/encoder/vp9_encoder.c
media/libvpx/vp9/encoder/vp9_encoder.h
media/libvpx/vp9/encoder/vp9_ethread.c
media/libvpx/vp9/encoder/vp9_ethread.h
media/libvpx/vp9/encoder/vp9_extend.c
media/libvpx/vp9/encoder/vp9_firstpass.c
media/libvpx/vp9/encoder/vp9_firstpass.h
media/libvpx/vp9/encoder/vp9_lookahead.c
media/libvpx/vp9/encoder/vp9_lookahead.h
media/libvpx/vp9/encoder/vp9_mbgraph.c
media/libvpx/vp9/encoder/vp9_mbgraph.h
media/libvpx/vp9/encoder/vp9_mcomp.c
media/libvpx/vp9/encoder/vp9_mcomp.h
media/libvpx/vp9/encoder/vp9_onyx_if.c
media/libvpx/vp9/encoder/vp9_onyx_int.h
media/libvpx/vp9/encoder/vp9_picklpf.c
media/libvpx/vp9/encoder/vp9_picklpf.h
media/libvpx/vp9/encoder/vp9_pickmode.c
media/libvpx/vp9/encoder/vp9_pickmode.h
media/libvpx/vp9/encoder/vp9_psnr.c
media/libvpx/vp9/encoder/vp9_psnr.h
media/libvpx/vp9/encoder/vp9_quantize.c
media/libvpx/vp9/encoder/vp9_quantize.h
media/libvpx/vp9/encoder/vp9_ratectrl.c
media/libvpx/vp9/encoder/vp9_ratectrl.h
media/libvpx/vp9/encoder/vp9_rd.c
media/libvpx/vp9/encoder/vp9_rd.h
media/libvpx/vp9/encoder/vp9_rdopt.c
media/libvpx/vp9/encoder/vp9_rdopt.h
media/libvpx/vp9/encoder/vp9_resize.c
media/libvpx/vp9/encoder/vp9_resize.h
media/libvpx/vp9/encoder/vp9_sad.c
media/libvpx/vp9/encoder/vp9_segmentation.c
media/libvpx/vp9/encoder/vp9_segmentation.h
media/libvpx/vp9/encoder/vp9_skin_detection.c
media/libvpx/vp9/encoder/vp9_skin_detection.h
media/libvpx/vp9/encoder/vp9_speed_features.c
media/libvpx/vp9/encoder/vp9_speed_features.h
media/libvpx/vp9/encoder/vp9_ssim.h
media/libvpx/vp9/encoder/vp9_subexp.c
media/libvpx/vp9/encoder/vp9_subexp.h
media/libvpx/vp9/encoder/vp9_svc_layercontext.c
media/libvpx/vp9/encoder/vp9_svc_layercontext.h
media/libvpx/vp9/encoder/vp9_temporal_filter.c
media/libvpx/vp9/encoder/vp9_temporal_filter.h
media/libvpx/vp9/encoder/vp9_tokenize.c
media/libvpx/vp9/encoder/vp9_tokenize.h
media/libvpx/vp9/encoder/vp9_treewriter.c
media/libvpx/vp9/encoder/vp9_treewriter.h
media/libvpx/vp9/encoder/vp9_vaq.c
media/libvpx/vp9/encoder/vp9_vaq.h
media/libvpx/vp9/encoder/vp9_variance.c
media/libvpx/vp9/encoder/vp9_variance.h
media/libvpx/vp9/encoder/vp9_write_bit_buffer.c
media/libvpx/vp9/encoder/vp9_write_bit_buffer.h
media/libvpx/vp9/encoder/vp9_writer.c
media/libvpx/vp9/encoder/vp9_writer.h
media/libvpx/vp9/encoder/x86/vp9_avg_intrin_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct_impl_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_sse2.h
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c
media/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_highbd_block_error_intrin_sse2.c
media/libvpx/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
media/libvpx/vp9/encoder/x86/vp9_highbd_sad4d_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_sad_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_subpel_variance.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_variance_sse2.c
media/libvpx/vp9/encoder/x86/vp9_mcomp_x86.h
media/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3.asm
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_sad_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_sad_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_ssim_opt_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_variance_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_impl_mmx.asm
media/libvpx/vp9/encoder/x86/vp9_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_variance_mmx.c
media/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
media/libvpx/vp9/vp9_cx_iface.c
media/libvpx/vp9/vp9_dx_iface.c
media/libvpx/vp9/vp9_iface_common.h
media/libvpx/vp9_rtcd_armv7-android-gcc.h
media/libvpx/vp9_rtcd_generic-gnu.h
media/libvpx/vp9_rtcd_x86-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86-linux-gcc.h
media/libvpx/vp9_rtcd_x86-win32-gcc.h
media/libvpx/vp9_rtcd_x86-win32-vs12.h
media/libvpx/vp9_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86_64-linux-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx/internal/vpx_codec_internal.h
media/libvpx/vpx/internal/vpx_psnr.h
media/libvpx/vpx/src/svc_encodeframe.c
media/libvpx/vpx/src/vpx_codec.c
media/libvpx/vpx/src/vpx_decoder.c
media/libvpx/vpx/src/vpx_encoder.c
media/libvpx/vpx/src/vpx_image.c
media/libvpx/vpx/src/vpx_psnr.c
media/libvpx/vpx/svc_context.h
media/libvpx/vpx/vp8cx.h
media/libvpx/vpx/vp8dx.h
media/libvpx/vpx/vpx_codec.h
media/libvpx/vpx/vpx_decoder.h
media/libvpx/vpx/vpx_encoder.h
media/libvpx/vpx/vpx_frame_buffer.h
media/libvpx/vpx/vpx_image.h
media/libvpx/vpx/vpx_integer.h
media/libvpx/vpx_config_armv7-android-gcc.asm
media/libvpx/vpx_config_armv7-android-gcc.h
media/libvpx/vpx_config_generic-gnu.asm
media/libvpx/vpx_config_generic-gnu.h
media/libvpx/vpx_config_x86-darwin9-gcc.asm
media/libvpx/vpx_config_x86-darwin9-gcc.h
media/libvpx/vpx_config_x86-linux-gcc.asm
media/libvpx/vpx_config_x86-linux-gcc.h
media/libvpx/vpx_config_x86-win32-gcc.asm
media/libvpx/vpx_config_x86-win32-gcc.h
media/libvpx/vpx_config_x86-win32-vs12.asm
media/libvpx/vpx_config_x86-win32-vs12.h
media/libvpx/vpx_config_x86_64-darwin9-gcc.asm
media/libvpx/vpx_config_x86_64-darwin9-gcc.h
media/libvpx/vpx_config_x86_64-linux-gcc.asm
media/libvpx/vpx_config_x86_64-linux-gcc.h
media/libvpx/vpx_config_x86_64-win64-gcc.asm
media/libvpx/vpx_config_x86_64-win64-gcc.h
media/libvpx/vpx_config_x86_64-win64-vs12.asm
media/libvpx/vpx_config_x86_64-win64-vs12.h
media/libvpx/vpx_mem/vpx_mem.c
media/libvpx/vpx_mem/vpx_mem.h
media/libvpx/vpx_ports/arm.h
media/libvpx/vpx_ports/arm_cpudetect.c
media/libvpx/vpx_ports/asm_offsets.h
media/libvpx/vpx_ports/mem.h
media/libvpx/vpx_ports/mem_ops.h
media/libvpx/vpx_ports/mem_ops_aligned.h
media/libvpx/vpx_ports/vpx_once.h
media/libvpx/vpx_ports/vpx_timer.h
media/libvpx/vpx_ports/x86.h
media/libvpx/vpx_ports/x86_abi_support.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm
media/libvpx/vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm
media/libvpx/vpx_scale/arm/neon/yv12extend_arm.c
media/libvpx/vpx_scale/generic/yv12config.c
media/libvpx/vpx_scale/generic/yv12extend.c
media/libvpx/vpx_scale/vpx_scale_asm_offsets.c
media/libvpx/vpx_scale/yv12config.h
media/libvpx/vpx_scale_rtcd_armv7-android-gcc.h
media/libvpx/vpx_scale_rtcd_generic-gnu.h
media/libvpx/vpx_scale_rtcd_x86-darwin9-gcc.h
media/libvpx/vpx_scale_rtcd_x86-linux-gcc.h
media/libvpx/vpx_scale_rtcd_x86-win32-gcc.h
media/libvpx/vpx_scale_rtcd_x86-win32-vs12.h
media/libvpx/vpx_scale_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-linux-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-win64-gcc.h
media/libvpx/vpx_scale_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx_version.h
--- a/CLOBBER
+++ b/CLOBBER
@@ -17,9 +17,9 @@
 #
 # Modifying this file will now automatically clobber the buildbot machines \o/
 #
 
 # Are you updating CLOBBER because you think it's needed for your WebIDL
 # changes to stick? As of bug 928195, this shouldn't be necessary! Please
 # don't change CLOBBER for WebIDL changes any more.
 
-Bug 1167064: Switch to bluetooth APIv2.
+Bug 1151175 - Update libvpx to 1.4.0.
--- a/media/libvpx/PATENTS
+++ b/media/libvpx/PATENTS
@@ -1,22 +1,23 @@
 Additional IP Rights Grant (Patents)
+------------------------------------
 
-"This implementation" means the copyrightable works distributed by
-Google as part of the WebM Project.
+"These implementations" means the copyrightable works that implement the WebM
+codecs distributed by Google as part of the WebM Project.
 
-Google hereby grants to you a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer, and otherwise run, modify and propagate the contents of this
-implementation of VP8, where such license applies only to those patent
-claims, both currently owned by Google and acquired in the future,
-licensable by Google that are necessarily infringed by this
-implementation of VP8. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of VP8 or any code incorporated within this
-implementation of VP8 constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of VP8
-shall terminate as of the date such litigation is filed.
+Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge,
+royalty-free, irrevocable (except as stated in this section) patent license to
+make, have made, use, offer to sell, sell, import, transfer, and otherwise
+run, modify and propagate the contents of these implementations of WebM, where
+such license applies only to those patent claims, both currently owned by
+Google and acquired in the future, licensable by Google that are necessarily
+infringed by these implementations of WebM. This grant does not include claims
+that would be infringed only as a consequence of further modification of these
+implementations. If you or your agent or exclusive licensee institute or order
+or agree to the institution of patent litigation or any other patent
+enforcement activity against any entity (including a cross-claim or
+counterclaim in a lawsuit) alleging that any of these implementations of WebM
+or any code incorporated within any of these implementations of WebM
+constitutes direct or contributory patent infringement, or inducement of
+patent infringement, then any patent rights granted to you under this License
+for these implementations of WebM shall terminate as of the date such
+litigation is filed.
--- a/media/libvpx/README_MOZILLA
+++ b/media/libvpx/README_MOZILLA
@@ -3,9 +3,9 @@ git repository using the update.py scrip
 made were those applied by update.py and the addition of
 moz.build and Makefile.in build files for the
 Mozilla build system.
 
 The libvpx git repository is:
 
     https://gerrit.chromium.org/gerrit/webm/libvpx
 
-The git commit ID used was afad1a84c15b9af8298a37c0fa449e0af40931fd
+The git commit ID used was c74bf6d889992c3cabe017ec353ca85c323107cd
deleted file mode 100644
--- a/media/libvpx/build/make/obj_int_extract.c
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "vpx_config.h"
-#include "vpx/vpx_integer.h"
-
-typedef enum {
-  OUTPUT_FMT_PLAIN,
-  OUTPUT_FMT_RVDS,
-  OUTPUT_FMT_GAS,
-  OUTPUT_FMT_C_HEADER,
-} output_fmt_t;
-
-int log_msg(const char *fmt, ...) {
-  int res;
-  va_list ap;
-  va_start(ap, fmt);
-  res = vfprintf(stderr, fmt, ap);
-  va_end(ap);
-  return res;
-}
-
-#if defined(__GNUC__) && __GNUC__
-#if defined(__MACH__)
-
-#include <mach-o/loader.h>
-#include <mach-o/nlist.h>
-
-int print_macho_equ(output_fmt_t mode, uint8_t* name, int val) {
-  switch (mode) {
-    case OUTPUT_FMT_RVDS:
-      printf("%-40s EQU %5d\n", name, val);
-      return 0;
-    case OUTPUT_FMT_GAS:
-      printf(".set %-40s, %5d\n", name, val);
-      return 0;
-    case OUTPUT_FMT_C_HEADER:
-      printf("#define %-40s %5d\n", name, val);
-      return 0;
-    default:
-      log_msg("Unsupported mode: %d", mode);
-      return 1;
-  }
-}
-
-int parse_macho(uint8_t *base_buf, size_t sz, output_fmt_t mode) {
-  int i, j;
-  struct mach_header header;
-  uint8_t *buf = base_buf;
-  int base_data_section = 0;
-  int bits = 0;
-
-  /* We can read in mach_header for 32 and 64 bit architectures
-   * because it's identical to mach_header_64 except for the last
-   * element (uint32_t reserved), which we don't use. Then, when
-   * we know which architecture we're looking at, increment buf
-   * appropriately.
-   */
-  memcpy(&header, buf, sizeof(struct mach_header));
-
-  if (header.magic == MH_MAGIC) {
-    if (header.cputype == CPU_TYPE_ARM
-        || header.cputype == CPU_TYPE_X86) {
-      bits = 32;
-      buf += sizeof(struct mach_header);
-    } else {
-      log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_[ARM|X86].\n");
-      goto bail;
-    }
-  } else if (header.magic == MH_MAGIC_64) {
-    if (header.cputype == CPU_TYPE_X86_64) {
-      bits = 64;
-      buf += sizeof(struct mach_header_64);
-    } else {
-      log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_X86_64.\n");
-      goto bail;
-    }
-  } else {
-    log_msg("Bad magic number for object file. 0x%x or 0x%x expected, 0x%x found.\n",
-            MH_MAGIC, MH_MAGIC_64, header.magic);
-    goto bail;
-  }
-
-  if (header.filetype != MH_OBJECT) {
-    log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
-    goto bail;
-  }
-
-  for (i = 0; i < header.ncmds; i++) {
-    struct load_command lc;
-
-    memcpy(&lc, buf, sizeof(struct load_command));
-
-    if (lc.cmd == LC_SEGMENT) {
-      uint8_t *seg_buf = buf;
-      struct section s;
-      struct segment_command seg_c;
-
-      memcpy(&seg_c, seg_buf, sizeof(struct segment_command));
-      seg_buf += sizeof(struct segment_command);
-
-      /* Although each section is given it's own offset, nlist.n_value
-       * references the offset of the first section. This isn't
-       * apparent without debug information because the offset of the
-       * data section is the same as the first section. However, with
-       * debug sections mixed in, the offset of the debug section
-       * increases but n_value still references the first section.
-       */
-      if (seg_c.nsects < 1) {
-        log_msg("Not enough sections\n");
-        goto bail;
-      }
-
-      memcpy(&s, seg_buf, sizeof(struct section));
-      base_data_section = s.offset;
-    } else if (lc.cmd == LC_SEGMENT_64) {
-      uint8_t *seg_buf = buf;
-      struct section_64 s;
-      struct segment_command_64 seg_c;
-
-      memcpy(&seg_c, seg_buf, sizeof(struct segment_command_64));
-      seg_buf += sizeof(struct segment_command_64);
-
-      /* Explanation in LG_SEGMENT */
-      if (seg_c.nsects < 1) {
-        log_msg("Not enough sections\n");
-        goto bail;
-      }
-
-      memcpy(&s, seg_buf, sizeof(struct section_64));
-      base_data_section = s.offset;
-    } else if (lc.cmd == LC_SYMTAB) {
-      if (base_data_section != 0) {
-        struct symtab_command sc;
-        uint8_t *sym_buf = base_buf;
-        uint8_t *str_buf = base_buf;
-
-        memcpy(&sc, buf, sizeof(struct symtab_command));
-
-        if (sc.cmdsize != sizeof(struct symtab_command)) {
-          log_msg("Can't find symbol table!\n");
-          goto bail;
-        }
-
-        sym_buf += sc.symoff;
-        str_buf += sc.stroff;
-
-        for (j = 0; j < sc.nsyms; j++) {
-          /* Location of string is cacluated each time from the
-           * start of the string buffer.  On darwin the symbols
-           * are prefixed by "_", so we bump the pointer by 1.
-           * The target value is defined as an int in *_asm_*_offsets.c,
-           * which is 4 bytes on all targets we currently use.
-           */
-          if (bits == 32) {
-            struct nlist nl;
-            int val;
-
-            memcpy(&nl, sym_buf, sizeof(struct nlist));
-            sym_buf += sizeof(struct nlist);
-
-            memcpy(&val, base_buf + base_data_section + nl.n_value,
-                   sizeof(val));
-            print_macho_equ(mode, str_buf + nl.n_un.n_strx + 1, val);
-          } else { /* if (bits == 64) */
-            struct nlist_64 nl;
-            int val;
-
-            memcpy(&nl, sym_buf, sizeof(struct nlist_64));
-            sym_buf += sizeof(struct nlist_64);
-
-            memcpy(&val, base_buf + base_data_section + nl.n_value,
-                   sizeof(val));
-            print_macho_equ(mode, str_buf + nl.n_un.n_strx + 1, val);
-          }
-        }
-      }
-    }
-
-    buf += lc.cmdsize;
-  }
-
-  return 0;
-bail:
-  return 1;
-
-}
-
-#elif defined(__ELF__)
-#include "elf.h"
-
-#define COPY_STRUCT(dst, buf, ofst, sz) do {\
-    if(ofst + sizeof((*(dst))) > sz) goto bail;\
-    memcpy(dst, buf+ofst, sizeof((*(dst))));\
-  } while(0)
-
-#define ENDIAN_ASSIGN(val, memb) do {\
-    if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
-    (val) = (memb);\
-  } while(0)
-
-#define ENDIAN_ASSIGN_IN_PLACE(memb) do {\
-    ENDIAN_ASSIGN(memb, memb);\
-  } while(0)
-
-typedef struct {
-  uint8_t      *buf; /* Buffer containing ELF data */
-  size_t        sz;  /* Buffer size */
-  int           le_data; /* Data is little-endian */
-  unsigned char e_ident[EI_NIDENT]; /* Magic number and other info */
-  int           bits; /* 32 or 64 */
-  Elf32_Ehdr    hdr32;
-  Elf64_Ehdr    hdr64;
-} elf_obj_t;
-
-int parse_elf_header(elf_obj_t *elf) {
-  int res;
-  /* Verify ELF Magic numbers */
-  COPY_STRUCT(&elf->e_ident, elf->buf, 0, elf->sz);
-  res = elf->e_ident[EI_MAG0] == ELFMAG0;
-  res &= elf->e_ident[EI_MAG1] == ELFMAG1;
-  res &= elf->e_ident[EI_MAG2] == ELFMAG2;
-  res &= elf->e_ident[EI_MAG3] == ELFMAG3;
-  res &= elf->e_ident[EI_CLASS] == ELFCLASS32
-         || elf->e_ident[EI_CLASS] == ELFCLASS64;
-  res &= elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
-  if (!res) goto bail;
-
-  elf->le_data = elf->e_ident[EI_DATA] == ELFDATA2LSB;
-
-  /* Read in relevant values */
-  if (elf->e_ident[EI_CLASS] == ELFCLASS32) {
-    elf->bits = 32;
-    COPY_STRUCT(&elf->hdr32, elf->buf, 0, elf->sz);
-
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_type);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_machine);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_version);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_entry);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phoff);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shoff);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_flags);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_ehsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phentsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_phnum);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shentsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shnum);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr32.e_shstrndx);
-  } else { /* if (elf->e_ident[EI_CLASS] == ELFCLASS64) */
-    elf->bits = 64;
-    COPY_STRUCT(&elf->hdr64, elf->buf, 0, elf->sz);
-
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_type);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_machine);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_version);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_entry);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phoff);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shoff);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_flags);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_ehsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phentsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_phnum);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shentsize);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shnum);
-    ENDIAN_ASSIGN_IN_PLACE(elf->hdr64.e_shstrndx);
-  }
-
-  return 0;
-bail:
-  log_msg("Failed to parse ELF file header");
-  return 1;
-}
-
-int parse_elf_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr32, Elf64_Shdr *hdr64) {
-  if (hdr32) {
-    if (idx >= elf->hdr32.e_shnum)
-      goto bail;
-
-    COPY_STRUCT(hdr32, elf->buf, elf->hdr32.e_shoff + idx * elf->hdr32.e_shentsize,
-                elf->sz);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_name);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_type);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_flags);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addr);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_offset);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_size);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_link);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_info);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_addralign);
-    ENDIAN_ASSIGN_IN_PLACE(hdr32->sh_entsize);
-  } else { /* if (hdr64) */
-    if (idx >= elf->hdr64.e_shnum)
-      goto bail;
-
-    COPY_STRUCT(hdr64, elf->buf, elf->hdr64.e_shoff + idx * elf->hdr64.e_shentsize,
-                elf->sz);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_name);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_type);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_flags);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addr);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_offset);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_size);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_link);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_info);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_addralign);
-    ENDIAN_ASSIGN_IN_PLACE(hdr64->sh_entsize);
-  }
-
-  return 0;
-bail:
-  return 1;
-}
-
-const char *parse_elf_string_table(elf_obj_t *elf, int s_idx, int idx) {
-  if (elf->bits == 32) {
-    Elf32_Shdr shdr;
-
-    if (parse_elf_section(elf, s_idx, &shdr, NULL)) {
-      log_msg("Failed to parse ELF string table: section %d, index %d\n",
-              s_idx, idx);
-      return "";
-    }
-
-    return (char *)(elf->buf + shdr.sh_offset + idx);
-  } else { /* if (elf->bits == 64) */
-    Elf64_Shdr shdr;
-
-    if (parse_elf_section(elf, s_idx, NULL, &shdr)) {
-      log_msg("Failed to parse ELF string table: section %d, index %d\n",
-              s_idx, idx);
-      return "";
-    }
-
-    return (char *)(elf->buf + shdr.sh_offset + idx);
-  }
-}
-
-int parse_elf_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym32, Elf64_Sym *sym64) {
-  if (sym32) {
-    COPY_STRUCT(sym32, elf->buf, ofst, elf->sz);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_name);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_value);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_size);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_info);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_other);
-    ENDIAN_ASSIGN_IN_PLACE(sym32->st_shndx);
-  } else { /* if (sym64) */
-    COPY_STRUCT(sym64, elf->buf, ofst, elf->sz);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_name);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_value);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_size);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_info);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_other);
-    ENDIAN_ASSIGN_IN_PLACE(sym64->st_shndx);
-  }
-  return 0;
-bail:
-  return 1;
-}
-
-int parse_elf(uint8_t *buf, size_t sz, output_fmt_t mode) {
-  elf_obj_t    elf;
-  unsigned int ofst;
-  int          i;
-  Elf32_Off    strtab_off32;
-  Elf64_Off    strtab_off64; /* save String Table offset for later use */
-
-  memset(&elf, 0, sizeof(elf));
-  elf.buf = buf;
-  elf.sz = sz;
-
-  /* Parse Header */
-  if (parse_elf_header(&elf))
-    goto bail;
-
-  if (elf.bits == 32) {
-    Elf32_Shdr shdr;
-    for (i = 0; i < elf.hdr32.e_shnum; i++) {
-      parse_elf_section(&elf, i, &shdr, NULL);
-
-      if (shdr.sh_type == SHT_STRTAB) {
-        char strtsb_name[128];
-
-        strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
-        if (!(strcmp(strtsb_name, ".shstrtab"))) {
-          /* log_msg("found section: %s\n", strtsb_name); */
-          strtab_off32 = shdr.sh_offset;
-          break;
-        }
-      }
-    }
-  } else { /* if (elf.bits == 64) */
-    Elf64_Shdr shdr;
-    for (i = 0; i < elf.hdr64.e_shnum; i++) {
-      parse_elf_section(&elf, i, NULL, &shdr);
-
-      if (shdr.sh_type == SHT_STRTAB) {
-        char strtsb_name[128];
-
-        strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
-
-        if (!(strcmp(strtsb_name, ".shstrtab"))) {
-          /* log_msg("found section: %s\n", strtsb_name); */
-          strtab_off64 = shdr.sh_offset;
-          break;
-        }
-      }
-    }
-  }
-
-  /* Parse all Symbol Tables */
-  if (elf.bits == 32) {
-    Elf32_Shdr shdr;
-    for (i = 0; i < elf.hdr32.e_shnum; i++) {
-      parse_elf_section(&elf, i, &shdr, NULL);
-
-      if (shdr.sh_type == SHT_SYMTAB) {
-        for (ofst = shdr.sh_offset;
-             ofst < shdr.sh_offset + shdr.sh_size;
-             ofst += shdr.sh_entsize) {
-          Elf32_Sym sym;
-
-          parse_elf_symbol(&elf, ofst, &sym, NULL);
-
-          /* For all OBJECTS (data objects), extract the value from the
-           * proper data segment.
-           */
-          /* if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
-              log_msg("found data object %s\n",
-                      parse_elf_string_table(&elf,
-                                             shdr.sh_link,
-                                             sym.st_name));
-           */
-
-          if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
-              && sym.st_size == 4) {
-            Elf32_Shdr dhdr;
-            int val = 0;
-            char section_name[128];
-
-            parse_elf_section(&elf, sym.st_shndx, &dhdr, NULL);
-
-            /* For explanition - refer to _MSC_VER version of code */
-            strcpy(section_name, (char *)(elf.buf + strtab_off32 + dhdr.sh_name));
-            /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
-            if (strcmp(section_name, ".bss")) {
-              if (sizeof(val) != sym.st_size) {
-                /* The target value is declared as an int in
-                 * *_asm_*_offsets.c, which is 4 bytes on all
-                 * targets we currently use. Complain loudly if
-                 * this is not true.
-                 */
-                log_msg("Symbol size is wrong\n");
-                goto bail;
-              }
-
-              memcpy(&val,
-                     elf.buf + dhdr.sh_offset + sym.st_value,
-                     sym.st_size);
-            }
-
-            if (!elf.le_data) {
-              log_msg("Big Endian data not supported yet!\n");
-              goto bail;
-            }
-
-            switch (mode) {
-              case OUTPUT_FMT_RVDS:
-                printf("%-40s EQU %5d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-                break;
-              case OUTPUT_FMT_GAS:
-                printf(".equ %-40s, %5d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-                break;
-              case OUTPUT_FMT_C_HEADER:
-                printf("#define %-40s %5d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-                break;
-              default:
-                printf("%s = %d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-            }
-          }
-        }
-      }
-    }
-  } else { /* if (elf.bits == 64) */
-    Elf64_Shdr shdr;
-    for (i = 0; i < elf.hdr64.e_shnum; i++) {
-      parse_elf_section(&elf, i, NULL, &shdr);
-
-      if (shdr.sh_type == SHT_SYMTAB) {
-        for (ofst = shdr.sh_offset;
-             ofst < shdr.sh_offset + shdr.sh_size;
-             ofst += shdr.sh_entsize) {
-          Elf64_Sym sym;
-
-          parse_elf_symbol(&elf, ofst, NULL, &sym);
-
-          /* For all OBJECTS (data objects), extract the value from the
-           * proper data segment.
-           */
-          /* if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
-              log_msg("found data object %s\n",
-                      parse_elf_string_table(&elf,
-                                             shdr.sh_link,
-                                             sym.st_name));
-           */
-
-          if (ELF64_ST_TYPE(sym.st_info) == STT_OBJECT
-              && sym.st_size == 4) {
-            Elf64_Shdr dhdr;
-            int val = 0;
-            char section_name[128];
-
-            parse_elf_section(&elf, sym.st_shndx, NULL, &dhdr);
-
-            /* For explanition - refer to _MSC_VER version of code */
-            strcpy(section_name, (char *)(elf.buf + strtab_off64 + dhdr.sh_name));
-            /* log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type); */
-
-            if ((strcmp(section_name, ".bss"))) {
-              if (sizeof(val) != sym.st_size) {
-                /* The target value is declared as an int in
-                 * *_asm_*_offsets.c, which is 4 bytes on all
-                 * targets we currently use. Complain loudly if
-                 * this is not true.
-                 */
-                log_msg("Symbol size is wrong\n");
-                goto bail;
-              }
-
-              memcpy(&val,
-                     elf.buf + dhdr.sh_offset + sym.st_value,
-                     sym.st_size);
-            }
-
-            if (!elf.le_data) {
-              log_msg("Big Endian data not supported yet!\n");
-              goto bail;
-            }
-
-            switch (mode) {
-              case OUTPUT_FMT_RVDS:
-                printf("%-40s EQU %5d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-                break;
-              case OUTPUT_FMT_GAS:
-                printf(".equ %-40s, %5d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-                break;
-              default:
-                printf("%s = %d\n",
-                       parse_elf_string_table(&elf,
-                                              shdr.sh_link,
-                                              sym.st_name),
-                       val);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  if (mode == OUTPUT_FMT_RVDS)
-    printf("    END\n");
-
-  return 0;
-bail:
-  log_msg("Parse error: File does not appear to be valid ELF32 or ELF64\n");
-  return 1;
-}
-
-#endif
-#endif /* defined(__GNUC__) && __GNUC__ */
-
-
-#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__)
-/*  See "Microsoft Portable Executable and Common Object File Format Specification"
-    for reference.
-*/
-#define get_le32(x) ((*(x)) | (*(x+1)) << 8 |(*(x+2)) << 16 | (*(x+3)) << 24 )
-#define get_le16(x) ((*(x)) | (*(x+1)) << 8)
-
-int parse_coff(uint8_t *buf, size_t sz) {
-  unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
-  unsigned int sectionrawdata_ptr;
-  unsigned int i;
-  uint8_t *ptr;
-  uint32_t symoffset;
-
-  char **sectionlist;  // this array holds all section names in their correct order.
-  // it is used to check if the symbol is in .bss or .rdata section.
-
-  nsections = get_le16(buf + 2);
-  symtab_ptr = get_le32(buf + 8);
-  symtab_sz = get_le32(buf + 12);
-  strtab_ptr = symtab_ptr + symtab_sz * 18;
-
-  if (nsections > 96) {
-    log_msg("Too many sections\n");
-    return 1;
-  }
-
-  sectionlist = malloc(nsections * sizeof(sectionlist));
-
-  if (sectionlist == NULL) {
-    log_msg("Allocating first level of section list failed\n");
-    return 1;
-  }
-
-  // log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
-
-  /*
-  The size of optional header is always zero for an obj file. So, the section header
-  follows the file header immediately.
-  */
-
-  ptr = buf + 20;     // section header
-
-  for (i = 0; i < nsections; i++) {
-    char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
-    strncpy(sectionname, ptr, 8);
-    // log_msg("COFF: Parsing section %s\n",sectionname);
-
-    sectionlist[i] = malloc(strlen(sectionname) + 1);
-
-    if (sectionlist[i] == NULL) {
-      log_msg("Allocating storage for %s failed\n", sectionname);
-      goto bail;
-    }
-    strcpy(sectionlist[i], sectionname);
-
-    // check if it's .rdata and is not a COMDAT section.
-    if (!strcmp(sectionname, ".rdata") &&
-        (get_le32(ptr + 36) & 0x1000) == 0) {
-      sectionrawdata_ptr = get_le32(ptr + 20);
-    }
-
-    ptr += 40;
-  }
-
-  // log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
-  // log_msg("COFF: raw data pointer ofset for section .rdata is %u\n", sectionrawdata_ptr);
-
-  /*  The compiler puts the data with non-zero offset in .rdata section, but puts the data with
-      zero offset in .bss section. So, if the data in in .bss section, set offset=0.
-      Note from Wiki: In an object module compiled from C, the bss section contains
-      the local variables (but not functions) that were declared with the static keyword,
-      except for those with non-zero initial values. (In C, static variables are initialized
-      to zero by default.) It also contains the non-local (both extern and static) variables
-      that are also initialized to zero (either explicitly or by default).
-      */
-  // move to symbol table
-  /* COFF symbol table:
-      offset      field
-      0           Name(*)
-      8           Value
-      12          SectionNumber
-      14          Type
-      16          StorageClass
-      17          NumberOfAuxSymbols
-      */
-  ptr = buf + symtab_ptr;
-
-  for (i = 0; i < symtab_sz; i++) {
-    int16_t section = get_le16(ptr + 12); // section number
-
-    if (section > 0 && ptr[16] == 2) {
-      // if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
-
-      if (get_le32(ptr)) {
-        char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
-        strncpy(name, ptr, 8);
-        // log_msg("COFF: Parsing symbol %s\n",name);
-        /* The 64bit Windows compiler doesn't prefix with an _.
-         * Check what's there, and bump if necessary
-         */
-        if (name[0] == '_')
-          printf("%-40s EQU ", name + 1);
-        else
-          printf("%-40s EQU ", name);
-      } else {
-        // log_msg("COFF: Parsing symbol %s\n",
-        //        buf + strtab_ptr + get_le32(ptr+4));
-        if ((buf + strtab_ptr + get_le32(ptr + 4))[0] == '_')
-          printf("%-40s EQU ",
-                 buf + strtab_ptr + get_le32(ptr + 4) + 1);
-        else
-          printf("%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
-      }
-
-      if (!(strcmp(sectionlist[section - 1], ".bss"))) {
-        symoffset = 0;
-      } else {
-        symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
-      }
-
-      // log_msg("      Section: %d\n",section);
-      // log_msg("      Class:   %d\n",ptr[16]);
-      // log_msg("      Address: %u\n",get_le32(ptr+8));
-      // log_msg("      Offset: %u\n", symoffset);
-
-      printf("%5d\n", symoffset);
-    }
-
-    ptr += 18;
-  }
-
-  printf("    END\n");
-
-  for (i = 0; i < nsections; i++) {
-    free(sectionlist[i]);
-  }
-
-  free(sectionlist);
-
-  return 0;
-bail:
-
-  for (i = 0; i < nsections; i++) {
-    free(sectionlist[i]);
-  }
-
-  free(sectionlist);
-
-  return 1;
-}
-#endif /* defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__) */
-
-int main(int argc, char **argv) {
-  output_fmt_t mode = OUTPUT_FMT_PLAIN;
-  const char *f;
-  uint8_t *file_buf;
-  int res;
-  FILE *fp;
-  long int file_size;
-
-  if (argc < 2 || argc > 3) {
-    fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
-    fprintf(stderr, "  <obj file>\tobject file to parse\n");
-    fprintf(stderr, "Output Formats:\n");
-    fprintf(stderr, "  gas  - compatible with GNU assembler\n");
-    fprintf(stderr, "  rvds - compatible with armasm\n");
-    fprintf(stderr, "  cheader - c/c++ header file\n");
-    goto bail;
-  }
-
-  f = argv[2];
-
-  if (!strcmp(argv[1], "rvds"))
-    mode = OUTPUT_FMT_RVDS;
-  else if (!strcmp(argv[1], "gas"))
-    mode = OUTPUT_FMT_GAS;
-  else if (!strcmp(argv[1], "cheader"))
-    mode = OUTPUT_FMT_C_HEADER;
-  else
-    f = argv[1];
-
-  fp = fopen(f, "rb");
-
-  if (!fp) {
-    perror("Unable to open file");
-    goto bail;
-  }
-
-  if (fseek(fp, 0, SEEK_END)) {
-    perror("stat");
-    goto bail;
-  }
-
-  file_size = ftell(fp);
-  file_buf = malloc(file_size);
-
-  if (!file_buf) {
-    perror("malloc");
-    goto bail;
-  }
-
-  rewind(fp);
-
-  if (fread(file_buf, sizeof(char), file_size, fp) != file_size) {
-    perror("read");
-    goto bail;
-  }
-
-  if (fclose(fp)) {
-    perror("close");
-    goto bail;
-  }
-
-#if defined(__GNUC__) && __GNUC__
-#if defined(__MACH__)
-  res = parse_macho(file_buf, file_size, mode);
-#elif defined(__ELF__)
-  res = parse_elf(file_buf, file_size, mode);
-#endif
-#endif
-#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__CYGWIN__)
-  res = parse_coff(file_buf, file_size);
-#endif
-
-  free(file_buf);
-
-  if (!res)
-    return EXIT_SUCCESS;
-
-bail:
-  return EXIT_FAILURE;
-}
--- a/media/libvpx/build/make/thumb.pm
+++ b/media/libvpx/build/make/thumb.pm
@@ -1,9 +1,9 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 ##
 ##  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
 ##
 ##  Use of this source code is governed by a BSD-style license
 ##  that can be found in the LICENSE file in the root of the source
 ##  tree. An additional intellectual property rights grant can be found
 ##  in the file PATENTS.  All contributing project authors may
 ##  be found in the AUTHORS file in the root of the source tree.
@@ -46,17 +46,17 @@ sub FixThumbInstructions($$)
     # "addne src, src, pstep, lsl #1". In a couple of cases where
     # this is used, it's used for two subsequent load instructions,
     # where a hand-written version of it could merge two subsequent
     # add and sub instructions.
     s/^(\s*)((ldr|str|pld)(ne)?)(\s+)(r\d+,\s*)?\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6\[$7\]\n$1add$4$5$7, $7, $8/g;
 
     # Convert register post indexing to a separate add instruction.
     # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
-    # "add r0, r2".
+    # "addne r0, r0, r2".
     s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
 
     # Convert a conditional addition to the pc register into a series of
     # instructions. This converts "addlt pc, pc, r3, lsl #2" into
     # "itttt lt", "movlt.n r12, pc", "addlt.w r12, #12",
     # "addlt.w r12, r12, r3, lsl #2", "movlt.n pc, r12".
     # This assumes that r12 is free at this point.
     s/^(\s*)addlt(\s+)pc,\s*pc,\s*(\w+),\s*lsl\s*#(\d+)/$1itttt$2lt\n$1movlt.n$2r12, pc\n$1addlt.w$2r12, #12\n$1addlt.w$2r12, r12, $3, lsl #($4-$branch_shift_offset)\n$1movlt.n$2pc, r12/g;
--- a/media/libvpx/sources.mozbuild
+++ b/media/libvpx/sources.mozbuild
@@ -20,98 +20,84 @@ files = {
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm',
              'vp8/common/arm/bilinearfilter_arm.c',
              'vp8/common/arm/dequantize_arm.c',
              'vp8/common/arm/filter_arm.c',
              'vp8/common/arm/loopfilter_arm.c',
              'vp8/common/arm/neon/bilinearpredict_neon.c',
-             'vp8/common/arm/neon/buildintrapredictorsmby_neon.asm',
-             'vp8/common/arm/neon/copymem16x16_neon.asm',
-             'vp8/common/arm/neon/copymem8x4_neon.asm',
-             'vp8/common/arm/neon/copymem8x8_neon.asm',
-             'vp8/common/arm/neon/dc_only_idct_add_neon.asm',
-             'vp8/common/arm/neon/dequant_idct_neon.asm',
-             'vp8/common/arm/neon/dequantizeb_neon.asm',
+             'vp8/common/arm/neon/copymem_neon.c',
+             'vp8/common/arm/neon/dc_only_idct_add_neon.c',
+             'vp8/common/arm/neon/dequant_idct_neon.c',
+             'vp8/common/arm/neon/dequantizeb_neon.c',
              'vp8/common/arm/neon/idct_blk_neon.c',
-             'vp8/common/arm/neon/idct_dequant_0_2x_neon.asm',
-             'vp8/common/arm/neon/idct_dequant_full_2x_neon.asm',
-             'vp8/common/arm/neon/iwalsh_neon.asm',
-             'vp8/common/arm/neon/loopfilter_neon.asm',
-             'vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm',
-             'vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm',
-             'vp8/common/arm/neon/mbloopfilter_neon.asm',
-             'vp8/common/arm/neon/sad16_neon.asm',
-             'vp8/common/arm/neon/sad8_neon.asm',
-             'vp8/common/arm/neon/save_reg_neon.asm',
-             'vp8/common/arm/neon/shortidct4x4llm_neon.asm',
-             'vp8/common/arm/neon/sixtappredict16x16_neon.asm',
-             'vp8/common/arm/neon/sixtappredict4x4_neon.asm',
-             'vp8/common/arm/neon/sixtappredict8x4_neon.asm',
-             'vp8/common/arm/neon/sixtappredict8x8_neon.asm',
-             'vp8/common/arm/neon/variance_neon.asm',
-             'vp8/common/arm/neon/vp8_subpixelvariance16x16_neon.asm',
-             'vp8/common/arm/neon/vp8_subpixelvariance16x16s_neon.asm',
-             'vp8/common/arm/neon/vp8_subpixelvariance8x8_neon.asm',
-             'vp8/common/arm/reconintra_arm.c',
+             'vp8/common/arm/neon/idct_dequant_0_2x_neon.c',
+             'vp8/common/arm/neon/idct_dequant_full_2x_neon.c',
+             'vp8/common/arm/neon/iwalsh_neon.c',
+             'vp8/common/arm/neon/loopfilter_neon.c',
+             'vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c',
+             'vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c',
+             'vp8/common/arm/neon/mbloopfilter_neon.c',
+             'vp8/common/arm/neon/reconintra_neon.c',
+             'vp8/common/arm/neon/sad_neon.c',
+             'vp8/common/arm/neon/shortidct4x4llm_neon.c',
+             'vp8/common/arm/neon/sixtappredict_neon.c',
+             'vp8/common/arm/neon/variance_neon.c',
+             'vp8/common/arm/neon/vp8_subpixelvariance_neon.c',
              'vp8/common/arm/variance_arm.c',
-             'vp8/encoder/arm/armv5te/boolhuff_armv5te.asm',
-             'vp8/encoder/arm/armv5te/vp8_packtokens_armv5.asm',
-             'vp8/encoder/arm/armv5te/vp8_packtokens_mbrow_armv5.asm',
-             'vp8/encoder/arm/armv5te/vp8_packtokens_partitions_armv5.asm',
-             'vp8/encoder/arm/armv6/vp8_fast_quantize_b_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm',
-             'vp8/encoder/arm/armv6/vp8_subtract_armv6.asm',
              'vp8/encoder/arm/armv6/walsh_v6.asm',
-             'vp8/encoder/arm/boolhuff_arm.c',
              'vp8/encoder/arm/dct_arm.c',
              'vp8/encoder/arm/neon/denoising_neon.c',
-             'vp8/encoder/arm/neon/fastquantizeb_neon.asm',
-             'vp8/encoder/arm/neon/picklpf_arm.c',
-             'vp8/encoder/arm/neon/shortfdct_neon.asm',
-             'vp8/encoder/arm/neon/subtract_neon.asm',
-             'vp8/encoder/arm/neon/vp8_memcpy_neon.asm',
-             'vp8/encoder/arm/neon/vp8_mse16x16_neon.asm',
-             'vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.asm',
-             'vp8/encoder/arm/quantize_arm.c',
-             'vp9/common/arm/neon/vp9_avg_neon.asm',
-             'vp9/common/arm/neon/vp9_convolve8_avg_neon.asm',
-             'vp9/common/arm/neon/vp9_convolve8_neon.asm',
+             'vp8/encoder/arm/neon/fastquantizeb_neon.c',
+             'vp8/encoder/arm/neon/shortfdct_neon.c',
+             'vp8/encoder/arm/neon/subtract_neon.c',
+             'vp8/encoder/arm/neon/vp8_mse16x16_neon.c',
+             'vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c',
+             'vp9/common/arm/neon/vp9_avg_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_convolve8_avg_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_convolve8_neon_asm.asm',
              'vp9/common/arm/neon/vp9_convolve_neon.c',
-             'vp9/common/arm/neon/vp9_copy_neon.asm',
-             'vp9/common/arm/neon/vp9_dc_only_idct_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct16x16_1_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct16x16_add_neon.asm',
+             'vp9/common/arm/neon/vp9_copy_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct16x16_1_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct16x16_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct16x16_neon.c',
-             'vp9/common/arm/neon/vp9_idct32x32_1_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct32x32_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct4x4_1_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct4x4_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct8x8_1_add_neon.asm',
-             'vp9/common/arm/neon/vp9_idct8x8_add_neon.asm',
-             'vp9/common/arm/neon/vp9_iht4x4_add_neon.asm',
-             'vp9/common/arm/neon/vp9_iht8x8_add_neon.asm',
-             'vp9/common/arm/neon/vp9_loopfilter_16_neon.asm',
-             'vp9/common/arm/neon/vp9_loopfilter_16_neon.c',
-             'vp9/common/arm/neon/vp9_loopfilter_neon.asm',
+             'vp9/common/arm/neon/vp9_idct32x32_1_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct32x32_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct4x4_1_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct4x4_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct8x8_1_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_idct8x8_add_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_iht4x4_add_neon.c',
+             'vp9/common/arm/neon/vp9_iht8x8_add_neon.c',
+             'vp9/common/arm/neon/vp9_loopfilter_16_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_loopfilter_4_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_loopfilter_8_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_loopfilter_neon.c',
              'vp9/common/arm/neon/vp9_mb_lpf_neon.asm',
-             'vp9/common/arm/neon/vp9_reconintra_neon.asm',
+             'vp9/common/arm/neon/vp9_reconintra_neon_asm.asm',
              'vp9/common/arm/neon/vp9_save_reg_neon.asm',
-             'vpx_ports/arm_cpudetect.c',
-             'vpx_scale/arm/neon/vp8_vpxyv12_copy_y_neon.asm',
-             'vpx_scale/arm/neon/vp8_vpxyv12_copyframe_func_neon.asm',
-             'vpx_scale/arm/neon/vp8_vpxyv12_copysrcframe_func_neon.asm',
-             'vpx_scale/arm/neon/vp8_vpxyv12_extendframeborders_neon.asm',
-             'vpx_scale/arm/neon/yv12extend_arm.c'],
+             'vp9/encoder/arm/neon/vp9_dct_neon.c',
+             'vp9/encoder/arm/neon/vp9_quantize_neon.c',
+             'vp9/encoder/arm/neon/vp9_sad4d_neon.c',
+             'vp9/encoder/arm/neon/vp9_sad_neon.c',
+             'vp9/encoder/arm/neon/vp9_subtract_neon.c',
+             'vp9/encoder/arm/neon/vp9_variance_neon.c',
+             'vp9/encoder/arm/neon/vp9enc_avg_neon.c',
+             'vpx_ports/arm_cpudetect.c'],
  'AVX2': ['vp9/common/x86/vp9_loopfilter_intrin_avx2.c',
           'vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c',
           'vp9/encoder/x86/vp9_dct32x32_avx2.c',
           'vp9/encoder/x86/vp9_dct_avx2.c',
+          'vp9/encoder/x86/vp9_error_intrin_avx2.c',
+          'vp9/encoder/x86/vp9_sad4d_intrin_avx2.c',
+          'vp9/encoder/x86/vp9_sad_intrin_avx2.c',
+          'vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c',
           'vp9/encoder/x86/vp9_variance_avx2.c',
           'vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c'],
  'ERROR_CONCEALMENT': ['vp8/decoder/error_concealment.c'],
  'EXPORTS': ['vpx/vp8.h',
              'vpx/vp8cx.h',
              'vpx/vp8dx.h',
              'vpx/vpx_codec.h',
              'vpx/vpx_decoder.h',
@@ -171,27 +157,25 @@ files = {
              'vp8/encoder/firstpass.c',
              'vp8/encoder/lookahead.c',
              'vp8/encoder/mcomp.c',
              'vp8/encoder/modecosts.c',
              'vp8/encoder/mr_dissim.c',
              'vp8/encoder/onyx_if.c',
              'vp8/encoder/pickinter.c',
              'vp8/encoder/picklpf.c',
-             'vp8/encoder/psnr.c',
              'vp8/encoder/quantize.c',
              'vp8/encoder/ratectrl.c',
              'vp8/encoder/rdopt.c',
              'vp8/encoder/segmentation.c',
              'vp8/encoder/temporal_filter.c',
              'vp8/encoder/tokenize.c',
              'vp8/encoder/treewriter.c',
              'vp8/vp8_cx_iface.c',
              'vp8/vp8_dx_iface.c',
-             'vp9/common/generic/vp9_systemdependent.c',
              'vp9/common/vp9_alloccommon.c',
              'vp9/common/vp9_blockd.c',
              'vp9/common/vp9_common_data.c',
              'vp9/common/vp9_convolve.c',
              'vp9/common/vp9_debugmodes.c',
              'vp9/common/vp9_entropy.c',
              'vp9/common/vp9_entropymode.c',
              'vp9/common/vp9_entropymv.c',
@@ -205,85 +189,102 @@ files = {
              'vp9/common/vp9_prob.c',
              'vp9/common/vp9_quant_common.c',
              'vp9/common/vp9_reconinter.c',
              'vp9/common/vp9_reconintra.c',
              'vp9/common/vp9_rtcd.c',
              'vp9/common/vp9_scale.c',
              'vp9/common/vp9_scan.c',
              'vp9/common/vp9_seg_common.c',
+             'vp9/common/vp9_thread.c',
+             'vp9/common/vp9_thread_common.c',
              'vp9/common/vp9_tile_common.c',
              'vp9/decoder/vp9_decodeframe.c',
              'vp9/decoder/vp9_decodemv.c',
+             'vp9/decoder/vp9_decoder.c',
              'vp9/decoder/vp9_detokenize.c',
              'vp9/decoder/vp9_dsubexp.c',
              'vp9/decoder/vp9_dthread.c',
-             'vp9/decoder/vp9_onyxd_if.c',
+             'vp9/decoder/vp9_read_bit_buffer.c',
              'vp9/decoder/vp9_reader.c',
-             'vp9/decoder/vp9_thread.c',
+             'vp9/encoder/vp9_aq_complexity.c',
+             'vp9/encoder/vp9_aq_cyclicrefresh.c',
+             'vp9/encoder/vp9_aq_variance.c',
+             'vp9/encoder/vp9_avg.c',
              'vp9/encoder/vp9_bitstream.c',
+             'vp9/encoder/vp9_context_tree.c',
+             'vp9/encoder/vp9_cost.c',
              'vp9/encoder/vp9_dct.c',
              'vp9/encoder/vp9_encodeframe.c',
              'vp9/encoder/vp9_encodemb.c',
              'vp9/encoder/vp9_encodemv.c',
+             'vp9/encoder/vp9_encoder.c',
+             'vp9/encoder/vp9_ethread.c',
              'vp9/encoder/vp9_extend.c',
              'vp9/encoder/vp9_firstpass.c',
              'vp9/encoder/vp9_lookahead.c',
              'vp9/encoder/vp9_mbgraph.c',
              'vp9/encoder/vp9_mcomp.c',
-             'vp9/encoder/vp9_onyx_if.c',
              'vp9/encoder/vp9_picklpf.c',
              'vp9/encoder/vp9_pickmode.c',
-             'vp9/encoder/vp9_psnr.c',
              'vp9/encoder/vp9_quantize.c',
              'vp9/encoder/vp9_ratectrl.c',
+             'vp9/encoder/vp9_rd.c',
              'vp9/encoder/vp9_rdopt.c',
              'vp9/encoder/vp9_resize.c',
              'vp9/encoder/vp9_sad.c',
              'vp9/encoder/vp9_segmentation.c',
+             'vp9/encoder/vp9_skin_detection.c',
+             'vp9/encoder/vp9_speed_features.c',
              'vp9/encoder/vp9_subexp.c',
+             'vp9/encoder/vp9_svc_layercontext.c',
              'vp9/encoder/vp9_temporal_filter.c',
              'vp9/encoder/vp9_tokenize.c',
              'vp9/encoder/vp9_treewriter.c',
-             'vp9/encoder/vp9_vaq.c',
              'vp9/encoder/vp9_variance.c',
+             'vp9/encoder/vp9_write_bit_buffer.c',
              'vp9/encoder/vp9_writer.c',
              'vp9/vp9_cx_iface.c',
              'vp9/vp9_dx_iface.c',
-             'vpx/src/svc_encodeframe.c',
              'vpx/src/vpx_codec.c',
              'vpx/src/vpx_decoder.c',
              'vpx/src/vpx_encoder.c',
              'vpx/src/vpx_image.c',
+             'vpx/src/vpx_psnr.c',
              'vpx_mem/vpx_mem.c',
              'vpx_scale/generic/gen_scalers.c',
              'vpx_scale/generic/vpx_scale.c',
              'vpx_scale/generic/yv12config.c',
              'vpx_scale/generic/yv12extend.c',
              'vpx_scale/vpx_scale_rtcd.c'],
  'VP8_POSTPROC': ['vp8/common/mfqe.c', 'vp8/common/postproc.c'],
- 'VP9_POSTPROC': ['vp9/common/vp9_postproc.c'],
+ 'VP9_POSTPROC': ['vp9/common/vp9_mfqe.c',
+                  'vp9/common/vp9_postproc.c',
+                  'vp9/common/x86/vp9_mfqe_sse2.asm'],
  'X86-64_ASM': ['third_party/x86inc/x86inc.asm',
-                'vp8/common/x86/loopfilter_block_sse2.asm',
-                'vp9/encoder/x86/vp9_quantize_ssse3.asm'],
+                'vp8/common/x86/loopfilter_block_sse2_x86_64.asm',
+                'vp8/encoder/x86/ssim_opt_x86_64.asm',
+                'vp9/common/x86/vp9_idct_ssse3_x86_64.asm',
+                'vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm',
+                'vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm',
+                'vp9/encoder/x86/vp9_ssim_opt_x86_64.asm'],
  'X86_ASM': ['vp8/common/x86/dequantize_mmx.asm',
              'vp8/common/x86/filter_x86.c',
              'vp8/common/x86/idct_blk_mmx.c',
              'vp8/common/x86/idct_blk_sse2.c',
              'vp8/common/x86/idctllm_mmx.asm',
              'vp8/common/x86/idctllm_sse2.asm',
              'vp8/common/x86/iwalsh_mmx.asm',
              'vp8/common/x86/iwalsh_sse2.asm',
              'vp8/common/x86/loopfilter_mmx.asm',
              'vp8/common/x86/loopfilter_sse2.asm',
              'vp8/common/x86/loopfilter_x86.c',
              'vp8/common/x86/mfqe_sse2.asm',
              'vp8/common/x86/postproc_mmx.asm',
              'vp8/common/x86/postproc_sse2.asm',
-             'vp8/common/x86/postproc_x86.c',
              'vp8/common/x86/recon_mmx.asm',
              'vp8/common/x86/recon_sse2.asm',
              'vp8/common/x86/recon_wrapper_sse2.c',
              'vp8/common/x86/sad_mmx.asm',
              'vp8/common/x86/sad_sse2.asm',
              'vp8/common/x86/sad_sse3.asm',
              'vp8/common/x86/sad_sse4.asm',
              'vp8/common/x86/sad_ssse3.asm',
@@ -299,45 +300,47 @@ files = {
              'vp8/common/x86/vp8_asm_stubs.c',
              'vp8/encoder/x86/dct_mmx.asm',
              'vp8/encoder/x86/dct_sse2.asm',
              'vp8/encoder/x86/denoising_sse2.c',
              'vp8/encoder/x86/encodeopt.asm',
              'vp8/encoder/x86/fwalsh_sse2.asm',
              'vp8/encoder/x86/quantize_mmx.asm',
              'vp8/encoder/x86/quantize_sse2.c',
-             'vp8/encoder/x86/quantize_sse4.asm',
-             'vp8/encoder/x86/quantize_ssse3.asm',
+             'vp8/encoder/x86/quantize_sse4.c',
+             'vp8/encoder/x86/quantize_ssse3.c',
              'vp8/encoder/x86/subtract_mmx.asm',
              'vp8/encoder/x86/subtract_sse2.asm',
              'vp8/encoder/x86/temporal_filter_apply_sse2.asm',
              'vp8/encoder/x86/vp8_enc_stubs_mmx.c',
              'vp8/encoder/x86/vp8_enc_stubs_sse2.c',
              'vp9/common/x86/vp9_asm_stubs.c',
              'vp9/common/x86/vp9_copy_sse2.asm',
              'vp9/common/x86/vp9_idct_intrin_sse2.c',
+             'vp9/common/x86/vp9_idct_intrin_ssse3.c',
              'vp9/common/x86/vp9_intrapred_sse2.asm',
              'vp9/common/x86/vp9_intrapred_ssse3.asm',
              'vp9/common/x86/vp9_loopfilter_intrin_sse2.c',
              'vp9/common/x86/vp9_loopfilter_mmx.asm',
+             'vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c',
              'vp9/common/x86/vp9_subpixel_8t_sse2.asm',
              'vp9/common/x86/vp9_subpixel_8t_ssse3.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_sse2.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm',
+             'vp9/encoder/x86/vp9_avg_intrin_sse2.c',
              'vp9/encoder/x86/vp9_dct32x32_sse2.c',
+             'vp9/encoder/x86/vp9_dct_impl_sse2.c',
+             'vp9/encoder/x86/vp9_dct_mmx.asm',
              'vp9/encoder/x86/vp9_dct_sse2.c',
+             'vp9/encoder/x86/vp9_dct_ssse3.c',
              'vp9/encoder/x86/vp9_error_sse2.asm',
+             'vp9/encoder/x86/vp9_quantize_sse2.c',
              'vp9/encoder/x86/vp9_sad4d_sse2.asm',
-             'vp9/encoder/x86/vp9_sad_mmx.asm',
              'vp9/encoder/x86/vp9_sad_sse2.asm',
              'vp9/encoder/x86/vp9_sad_sse3.asm',
              'vp9/encoder/x86/vp9_sad_sse4.asm',
              'vp9/encoder/x86/vp9_sad_ssse3.asm',
              'vp9/encoder/x86/vp9_subpel_variance.asm',
-             'vp9/encoder/x86/vp9_subpel_variance_impl_sse2.asm',
              'vp9/encoder/x86/vp9_subtract_sse2.asm',
              'vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm',
-             'vp9/encoder/x86/vp9_variance_impl_mmx.asm',
-             'vp9/encoder/x86/vp9_variance_impl_sse2.asm',
-             'vp9/encoder/x86/vp9_variance_mmx.c',
              'vp9/encoder/x86/vp9_variance_sse2.c',
              'vpx_ports/emms.asm']
 }
--- a/media/libvpx/third_party/x86inc/x86inc.asm
+++ b/media/libvpx/third_party/x86inc/x86inc.asm
@@ -73,16 +73,19 @@
 ; and x264's strides are all positive), but is not guaranteed by the ABI.
 
 ; Name of the .rodata section.
 ; Kludge: Something on OS X fails to align .rodata even given an align attribute,
 ; so use a different read-only section.
 %macro SECTION_RODATA 0-1 16
     %ifidn __OUTPUT_FORMAT__,macho64
         SECTION .text align=%1
+    %elifidn __OUTPUT_FORMAT__,macho32
+        SECTION .text align=%1
+        fakegot:
     %elifidn __OUTPUT_FORMAT__,macho
         SECTION .text align=%1
         fakegot:
     %elifidn __OUTPUT_FORMAT__,aout
         section .text
     %else
         SECTION .rodata align=%1
     %endif
@@ -229,20 +232,20 @@ ALIGNMODE k7
     %define r%1d %3
     %define r%1w %4
     %define r%1b %5
     %if %0 == 5
         %define r%1m  %3
         %define r%1mp %2
     %elif ARCH_X86_64 ; memory
         %define r%1m [rsp + stack_offset + %6]
-        %define r%1mp qword r %+ %1m
+        %define r%1mp qword r %+ %1 %+ m
     %else
         %define r%1m [esp + stack_offset + %6]
-        %define r%1mp dword r %+ %1m
+        %define r%1mp dword r %+ %1 %+ m
     %endif
     %define r%1  %2
 %endmacro
 
 %macro DECLARE_REG_SIZE 2
     %define r%1q r%1
     %define e%1q r%1
     %define r%1d e%1
@@ -390,16 +393,33 @@ DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9
         CAT_XDEFINE arg_name, %%i, %1
         %assign %%i %%i+1
         %rotate 1
     %endrep
     %xdefine stack_offset %%stack_offset
     %assign n_arg_names %0
 %endmacro
 
+%if ARCH_X86_64
+%macro ALLOC_STACK 2  ; stack_size, num_regs
+  %assign %%stack_aligment ((mmsize + 15) & ~15)
+  %assign stack_size_padded %1
+
+  %assign %%reg_num (%2 - 1)
+  %xdefine rsp_tmp r %+ %%reg_num
+  mov  rsp_tmp, rsp
+  sub  rsp, stack_size_padded
+  and  rsp, ~(%%stack_aligment - 1)
+%endmacro
+
+%macro RESTORE_STACK 0  ; reset rsp register
+  mov  rsp, rsp_tmp
+%endmacro
+%endif
+
 %if WIN64 ; Windows x64 ;=================================================
 
 DECLARE_REG 0,  rcx, ecx,  cx,   cl
 DECLARE_REG 1,  rdx, edx,  dx,   dl
 DECLARE_REG 2,  R8,  R8D,  R8W,  R8B
 DECLARE_REG 3,  R9,  R9D,  R9W,  R9B
 DECLARE_REG 4,  R10, R10D, R10W, R10B, 40
 DECLARE_REG 5,  R11, R11D, R11W, R11B, 48
@@ -587,26 +607,38 @@ DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
 %endmacro
 %macro cglobal_internal 1-2+
     %ifndef cglobaled_%1
         %xdefine %1 mangle(program_name %+ _ %+ %1)
         %xdefine %1.skip_prologue %1 %+ .skip_prologue
         CAT_XDEFINE cglobaled_, %1, 1
     %endif
     %xdefine current_function %1
-    %ifidn __OUTPUT_FORMAT__,elf
-        global %1:function hidden
-    %elifidn __OUTPUT_FORMAT__,elf32
-        global %1:function hidden
-    %elifidn __OUTPUT_FORMAT__,elf64
-        global %1:function hidden
-    %elifidn __OUTPUT_FORMAT__,macho32
-        global %1:private_extern
-    %elifidn __OUTPUT_FORMAT__,macho64
-        global %1:private_extern
+    %ifdef CHROMIUM
+        %ifidn __OUTPUT_FORMAT__,elf
+            global %1:function hidden
+        %elifidn __OUTPUT_FORMAT__,elf32
+            global %1:function hidden
+        %elifidn __OUTPUT_FORMAT__,elf64
+            global %1:function hidden
+        %elifidn __OUTPUT_FORMAT__,macho32
+            %ifdef __NASM_VER__
+                global %1
+            %else
+                global %1:private_extern
+            %endif
+        %elifidn __OUTPUT_FORMAT__,macho64
+            %ifdef __NASM_VER__
+                global %1
+            %else
+                global %1:private_extern
+            %endif
+        %else
+            global %1
+        %endif
     %else
         global %1
     %endif
     align function_align
     %1:
     RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
     %assign stack_offset 0
     %if %0 > 1
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
@@ -48,17 +48,17 @@ loop
     sel     r6, r9, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; substract negative differences from sum
+    subs    r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -72,17 +72,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -96,17 +96,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -122,17 +122,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r10, r6, ror #8     ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r10, r10, r11  ; dual signed multiply, add and accumulate (2)
 
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
@@ -46,17 +46,17 @@ loop
     sel     r8, r9, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r6, r10, lr         ; calculate sum of positive differences
     usad8   r7, r8, lr          ; calculate sum of negative differences
     orr     r8, r8, r10         ; differences of all 4 pixels
     ; calculate total sum
     add    r4, r4, r6           ; add positive differences to sum
-    sub    r4, r4, r7           ; substract negative differences from sum
+    sub    r4, r4, r7           ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r7, r8              ; byte (two pixels) to halfwords
     uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
     smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r6, [r0, #0x4]      ; load 4 src pixels
@@ -72,17 +72,17 @@ loop
 
     ; calculate partial sums
     usad8   r6, r10, lr         ; calculate sum of positive differences
     usad8   r7, r8, lr          ; calculate sum of negative differences
     orr     r8, r8, r10         ; differences of all 4 pixels
 
     ; calculate total sum
     add     r4, r4, r6          ; add positive differences to sum
-    sub     r4, r4, r7          ; substract negative differences from sum
+    sub     r4, r4, r7          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r7, r8              ; byte (two pixels) to halfwords
     uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
     smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
     subs    r12, r12, #1        ; next row
     smlad   r5, r10, r10, r5    ; dual signed multiply, add and accumulate (2)
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm
@@ -53,17 +53,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; substract negative differences from sum
+    subs    r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -84,17 +84,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -115,17 +115,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -148,17 +148,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
     subs    r12, r12, #1
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm
@@ -64,17 +64,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; substract negative differences from sum
+    subs    r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load source pixels a, row N
@@ -106,17 +106,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load source pixels a, row N
@@ -148,17 +148,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load source pixels a, row N
@@ -190,17 +190,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     subs    r12, r12, #1
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm
@@ -54,17 +54,17 @@ loop
     sel     r6, r6, lr          ; select bytes with negative difference
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
     ; calculate total sum
     adds    r8, r8, r4          ; add positive differences to sum
-    subs    r8, r8, r5          ; substract negative differences from sum
+    subs    r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 2nd 4 pixels
     ldr     r4, [r0, #4]        ; load 4 src pixels
@@ -85,17 +85,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 3rd 4 pixels
     ldr     r4, [r0, #8]        ; load 4 src pixels
@@ -116,17 +116,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
 
     ; 4th 4 pixels
     ldr     r4, [r0, #12]       ; load 4 src pixels
@@ -149,17 +149,17 @@ loop
 
     ; calculate partial sums
     usad8   r4, r7, lr          ; calculate sum of positive differences
     usad8   r5, r6, lr          ; calculate sum of negative differences
     orr     r6, r6, r7          ; differences of all 4 pixels
 
     ; calculate total sum
     add     r8, r8, r4          ; add positive differences to sum
-    sub     r8, r8, r5          ; substract negative differences from sum
+    sub     r8, r8, r5          ; subtract negative differences from sum
 
     ; calculate sse
     uxtb16  r5, r6              ; byte (two pixels) to halfwords
     uxtb16  r7, r6, ror #8      ; another two pixels to halfwords
     smlad   r11, r5, r5, r11    ; dual signed multiply, add and accumulate (1)
     smlad   r11, r7, r7, r11    ; dual signed multiply, add and accumulate (2)
 
 
--- a/media/libvpx/vp8/common/arm/dequantize_arm.c
+++ b/media/libvpx/vp8/common/arm/dequantize_arm.c
@@ -7,36 +7,19 @@
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 
 #include "vpx_config.h"
 #include "vp8/common/blockd.h"
 
-#if HAVE_NEON
-extern void vp8_dequantize_b_loop_neon(short *Q, short *DQC, short *DQ);
-#endif
-
 #if HAVE_MEDIA
 extern void vp8_dequantize_b_loop_v6(short *Q, short *DQC, short *DQ);
-#endif
 
-#if HAVE_NEON
-
-void vp8_dequantize_b_neon(BLOCKD *d, short *DQC)
-{
-    short *DQ  = d->dqcoeff;
-    short *Q   = d->qcoeff;
-
-    vp8_dequantize_b_loop_neon(Q, DQC, DQ);
-}
-#endif
-
-#if HAVE_MEDIA
 void vp8_dequantize_b_v6(BLOCKD *d, short *DQC)
 {
     short *DQ  = d->dqcoeff;
     short *Q   = d->qcoeff;
 
     vp8_dequantize_b_loop_v6(Q, DQC, DQ);
 }
 #endif
--- a/media/libvpx/vp8/common/arm/loopfilter_arm.c
+++ b/media/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -29,21 +29,21 @@ extern prototype_loopfilter(vp8_mbloop_f
 typedef void loopfilter_y_neon(unsigned char *src, int pitch,
         unsigned char blimit, unsigned char limit, unsigned char thresh);
 typedef void loopfilter_uv_neon(unsigned char *u, int pitch,
         unsigned char blimit, unsigned char limit, unsigned char thresh,
         unsigned char *v);
 
 extern loopfilter_y_neon vp8_loop_filter_horizontal_edge_y_neon;
 extern loopfilter_y_neon vp8_loop_filter_vertical_edge_y_neon;
+extern loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
+extern loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
+
 extern loopfilter_y_neon vp8_mbloop_filter_horizontal_edge_y_neon;
 extern loopfilter_y_neon vp8_mbloop_filter_vertical_edge_y_neon;
-
-extern loopfilter_uv_neon vp8_loop_filter_horizontal_edge_uv_neon;
-extern loopfilter_uv_neon vp8_loop_filter_vertical_edge_uv_neon;
 extern loopfilter_uv_neon vp8_mbloop_filter_horizontal_edge_uv_neon;
 extern loopfilter_uv_neon vp8_mbloop_filter_vertical_edge_uv_neon;
 #endif
 
 #if HAVE_MEDIA
 /* ARMV6/MEDIA loopfilter functions*/
 /* Horizontal MB filtering */
 void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
--- a/media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict_neon.c
@@ -5,17 +5,17 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include <arm_neon.h>
 
-static const uint16_t bifilter4_coeff[8][2] = {
+static const uint8_t bifilter4_coeff[8][2] = {
     {128,   0},
     {112,  16},
     { 96,  32},
     { 80,  48},
     { 64,  64},
     { 48,  80},
     { 32,  96},
     { 16, 112}
@@ -25,25 +25,28 @@ void vp8_bilinear_predict4x4_neon(
         unsigned char *src_ptr,
         int src_pixels_per_line,
         int xoffset,
         int yoffset,
         unsigned char *dst_ptr,
         int dst_pitch) {
     uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8;
     uint8x8_t d26u8, d27u8, d28u8, d29u8, d30u8;
-    uint32x2_t d28u32, d29u32, d30u32;
     uint8x16_t q1u8, q2u8;
     uint16x8_t q1u16, q2u16;
     uint16x8_t q7u16, q8u16, q9u16;
     uint64x2_t q4u64, q5u64;
     uint64x1_t d12u64;
     uint32x2x2_t d0u32x2, d1u32x2, d2u32x2, d3u32x2;
 
     if (xoffset == 0) {  // skip_1stpass_filter
+        uint32x2_t d28u32 = vdup_n_u32(0);
+        uint32x2_t d29u32 = vdup_n_u32(0);
+        uint32x2_t d30u32 = vdup_n_u32(0);
+
         d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 0);
         src_ptr += src_pixels_per_line;
         d28u32 = vld1_lane_u32((const uint32_t *)src_ptr, d28u32, 1);
         src_ptr += src_pixels_per_line;
         d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 0);
         src_ptr += src_pixels_per_line;
         d29u32 = vld1_lane_u32((const uint32_t *)src_ptr, d29u32, 1);
         src_ptr += src_pixels_per_line;
@@ -56,18 +59,18 @@ void vp8_bilinear_predict4x4_neon(
         d3u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d4u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d5u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d6u8 = vld1_u8(src_ptr);
 
         q1u8 = vcombine_u8(d2u8, d3u8);
         q2u8 = vcombine_u8(d4u8, d5u8);
 
-        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
 
         q4u64  = vshrq_n_u64(vreinterpretq_u64_u8(q1u8), 8);
         q5u64  = vshrq_n_u64(vreinterpretq_u64_u8(q2u8), 8);
         d12u64 = vshr_n_u64(vreinterpret_u64_u8(d6u8), 8);
 
         d0u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q1u8)),
                            vreinterpret_u32_u8(vget_high_u8(q1u8)));
         d1u32x2 = vzip_u32(vreinterpret_u32_u8(vget_low_u8(q2u8)),
@@ -147,18 +150,18 @@ void vp8_bilinear_predict8x4_neon(
         d26u8 = vld1_u8(src_ptr);
     } else {
         q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q5u8 = vld1q_u8(src_ptr);
 
-        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
 
         q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
         q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
         q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
         q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
         q10u16 = vmull_u8(vget_low_u8(q5u8), d0u8);
 
         d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
@@ -237,18 +240,18 @@ void vp8_bilinear_predict8x8_neon(
         d29u8 = vld1_u8(src_ptr); src_ptr += src_pixels_per_line;
         d30u8 = vld1_u8(src_ptr);
     } else {
         q1u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q2u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q3u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
         q4u8 = vld1q_u8(src_ptr); src_ptr += src_pixels_per_line;
 
-        d0u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][0]);
-        d1u8 = vdup_n_u8((uint8_t)bifilter4_coeff[xoffset][1]);
+        d0u8 = vdup_n_u8(bifilter4_coeff[xoffset][0]);
+        d1u8 = vdup_n_u8(bifilter4_coeff[xoffset][1]);
 
         q6u16 = vmull_u8(vget_low_u8(q1u8), d0u8);
         q7u16 = vmull_u8(vget_low_u8(q2u8), d0u8);
         q8u16 = vmull_u8(vget_low_u8(q3u8), d0u8);
         q9u16 = vmull_u8(vget_low_u8(q4u8), d0u8);
 
         d3u8 = vext_u8(vget_low_u8(q1u8), vget_high_u8(q1u8), 1);
         d5u8 = vext_u8(vget_low_u8(q2u8), vget_high_u8(q2u8), 1);
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
+++ /dev/null
@@ -1,584 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_build_intra_predictors_mby_neon_func|
-    EXPORT  |vp8_build_intra_predictors_mby_s_neon_func|
-
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-; r0    unsigned char *y_buffer
-; r1    unsigned char *ypred_ptr
-; r2    int y_stride
-; r3    int mode
-; stack int Up
-; stack int Left
-
-|vp8_build_intra_predictors_mby_neon_func| PROC
-    push            {r4-r8, lr}
-
-    cmp             r3, #0
-    beq             case_dc_pred
-    cmp             r3, #1
-    beq             case_v_pred
-    cmp             r3, #2
-    beq             case_h_pred
-    cmp             r3, #3
-    beq             case_tm_pred
-
-case_dc_pred
-    ldr             r4, [sp, #24]       ; Up
-    ldr             r5, [sp, #28]       ; Left
-
-    ; Default the DC average to 128
-    mov             r12, #128
-    vdup.u8         q0, r12
-
-    ; Zero out running sum
-    mov             r12, #0
-
-    ; compute shift and jump
-    adds            r7, r4, r5
-    beq             skip_dc_pred_up_left
-
-    ; Load above row, if it exists
-    cmp             r4, #0
-    beq             skip_dc_pred_up
-
-    sub             r6, r0, r2
-    vld1.8          {q1}, [r6]
-    vpaddl.u8       q2, q1
-    vpaddl.u16      q3, q2
-    vpaddl.u32      q4, q3
-
-    vmov.32         r4, d8[0]
-    vmov.32         r6, d9[0]
-
-    add             r12, r4, r6
-
-    ; Move back to interger registers
-
-skip_dc_pred_up
-
-    cmp             r5, #0
-    beq             skip_dc_pred_left
-
-    sub             r0, r0, #1
-
-    ; Load left row, if it exists
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0]
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-skip_dc_pred_left
-    add             r7, r7, #3          ; Shift
-    sub             r4, r7, #1
-    mov             r5, #1
-    add             r12, r12, r5, lsl r4
-    mov             r5, r12, lsr r7     ; expected_dc
-
-    vdup.u8         q0, r5
-
-skip_dc_pred_up_left
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-
-    pop             {r4-r8,pc}
-case_v_pred
-    ; Copy down above row
-    sub             r6, r0, r2
-    vld1.8          {q0}, [r6]
-
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q0}, [r1]!
-    pop             {r4-r8,pc}
-
-case_h_pred
-    ; Load 4x yleft_col
-    sub             r0, r0, #1
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q1}, [r1]!
-    vst1.u8         {q2}, [r1]!
-    vst1.u8         {q3}, [r1]!
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q1}, [r1]!
-    vst1.u8         {q2}, [r1]!
-    vst1.u8         {q3}, [r1]!
-
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q1}, [r1]!
-    vst1.u8         {q2}, [r1]!
-    vst1.u8         {q3}, [r1]!
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q1}, [r1]!
-    vst1.u8         {q2}, [r1]!
-    vst1.u8         {q3}, [r1]!
-
-    pop             {r4-r8,pc}
-
-case_tm_pred
-    ; Load yabove_row
-    sub             r3, r0, r2
-    vld1.8          {q8}, [r3]
-
-    ; Load ytop_left
-    sub             r3, r3, #1
-    ldrb            r7, [r3]
-
-    vdup.u16        q7, r7
-
-    ; Compute yabove_row - ytop_left
-    mov             r3, #1
-    vdup.u8         q0, r3
-
-    vmull.u8        q4, d16, d0
-    vmull.u8        q5, d17, d0
-
-    vsub.s16        q4, q4, q7
-    vsub.s16        q5, q5, q7
-
-    ; Load 4x yleft_col
-    sub             r0, r0, #1
-    mov             r12, #4
-
-case_tm_pred_loop
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u16        q0, r3
-    vdup.u16        q1, r4
-    vdup.u16        q2, r5
-    vdup.u16        q3, r6
-
-    vqadd.s16       q8, q0, q4
-    vqadd.s16       q9, q0, q5
-
-    vqadd.s16       q10, q1, q4
-    vqadd.s16       q11, q1, q5
-
-    vqadd.s16       q12, q2, q4
-    vqadd.s16       q13, q2, q5
-
-    vqadd.s16       q14, q3, q4
-    vqadd.s16       q15, q3, q5
-
-    vqshrun.s16     d0, q8, #0
-    vqshrun.s16     d1, q9, #0
-
-    vqshrun.s16     d2, q10, #0
-    vqshrun.s16     d3, q11, #0
-
-    vqshrun.s16     d4, q12, #0
-    vqshrun.s16     d5, q13, #0
-
-    vqshrun.s16     d6, q14, #0
-    vqshrun.s16     d7, q15, #0
-
-    vst1.u8         {q0}, [r1]!
-    vst1.u8         {q1}, [r1]!
-    vst1.u8         {q2}, [r1]!
-    vst1.u8         {q3}, [r1]!
-
-    subs            r12, r12, #1
-    bne             case_tm_pred_loop
-
-    pop             {r4-r8,pc}
-
-    ENDP
-
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-; r0    unsigned char *y_buffer
-; r1    unsigned char *ypred_ptr
-; r2    int y_stride
-; r3    int mode
-; stack int Up
-; stack int Left
-
-|vp8_build_intra_predictors_mby_s_neon_func| PROC
-    push            {r4-r8, lr}
-
-    mov             r1, r0      ;   unsigned char *ypred_ptr = x->dst.y_buffer; //x->Predictor;
-
-    cmp             r3, #0
-    beq             case_dc_pred_s
-    cmp             r3, #1
-    beq             case_v_pred_s
-    cmp             r3, #2
-    beq             case_h_pred_s
-    cmp             r3, #3
-    beq             case_tm_pred_s
-
-case_dc_pred_s
-    ldr             r4, [sp, #24]       ; Up
-    ldr             r5, [sp, #28]       ; Left
-
-    ; Default the DC average to 128
-    mov             r12, #128
-    vdup.u8         q0, r12
-
-    ; Zero out running sum
-    mov             r12, #0
-
-    ; compute shift and jump
-    adds            r7, r4, r5
-    beq             skip_dc_pred_up_left_s
-
-    ; Load above row, if it exists
-    cmp             r4, #0
-    beq             skip_dc_pred_up_s
-
-    sub             r6, r0, r2
-    vld1.8          {q1}, [r6]
-    vpaddl.u8       q2, q1
-    vpaddl.u16      q3, q2
-    vpaddl.u32      q4, q3
-
-    vmov.32         r4, d8[0]
-    vmov.32         r6, d9[0]
-
-    add             r12, r4, r6
-
-    ; Move back to interger registers
-
-skip_dc_pred_up_s
-
-    cmp             r5, #0
-    beq             skip_dc_pred_left_s
-
-    sub             r0, r0, #1
-
-    ; Load left row, if it exists
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0]
-
-    add             r12, r12, r3
-    add             r12, r12, r4
-    add             r12, r12, r5
-    add             r12, r12, r6
-
-skip_dc_pred_left_s
-    add             r7, r7, #3          ; Shift
-    sub             r4, r7, #1
-    mov             r5, #1
-    add             r12, r12, r5, lsl r4
-    mov             r5, r12, lsr r7     ; expected_dc
-
-    vdup.u8         q0, r5
-
-skip_dc_pred_up_left_s
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-
-    pop             {r4-r8,pc}
-case_v_pred_s
-    ; Copy down above row
-    sub             r6, r0, r2
-    vld1.8          {q0}, [r6]
-
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q0}, [r1], r2
-    pop             {r4-r8,pc}
-
-case_h_pred_s
-    ; Load 4x yleft_col
-    sub             r0, r0, #1
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q1}, [r1], r2
-    vst1.u8         {q2}, [r1], r2
-    vst1.u8         {q3}, [r1], r2
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q1}, [r1], r2
-    vst1.u8         {q2}, [r1], r2
-    vst1.u8         {q3}, [r1], r2
-
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q1}, [r1], r2
-    vst1.u8         {q2}, [r1], r2
-    vst1.u8         {q3}, [r1], r2
-
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u8         q0, r3
-    vdup.u8         q1, r4
-    vdup.u8         q2, r5
-    vdup.u8         q3, r6
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q1}, [r1], r2
-    vst1.u8         {q2}, [r1], r2
-    vst1.u8         {q3}, [r1], r2
-
-    pop             {r4-r8,pc}
-
-case_tm_pred_s
-    ; Load yabove_row
-    sub             r3, r0, r2
-    vld1.8          {q8}, [r3]
-
-    ; Load ytop_left
-    sub             r3, r3, #1
-    ldrb            r7, [r3]
-
-    vdup.u16        q7, r7
-
-    ; Compute yabove_row - ytop_left
-    mov             r3, #1
-    vdup.u8         q0, r3
-
-    vmull.u8        q4, d16, d0
-    vmull.u8        q5, d17, d0
-
-    vsub.s16        q4, q4, q7
-    vsub.s16        q5, q5, q7
-
-    ; Load 4x yleft_col
-    sub             r0, r0, #1
-    mov             r12, #4
-
-case_tm_pred_loop_s
-    ldrb            r3, [r0], r2
-    ldrb            r4, [r0], r2
-    ldrb            r5, [r0], r2
-    ldrb            r6, [r0], r2
-    vdup.u16        q0, r3
-    vdup.u16        q1, r4
-    vdup.u16        q2, r5
-    vdup.u16        q3, r6
-
-    vqadd.s16       q8, q0, q4
-    vqadd.s16       q9, q0, q5
-
-    vqadd.s16       q10, q1, q4
-    vqadd.s16       q11, q1, q5
-
-    vqadd.s16       q12, q2, q4
-    vqadd.s16       q13, q2, q5
-
-    vqadd.s16       q14, q3, q4
-    vqadd.s16       q15, q3, q5
-
-    vqshrun.s16     d0, q8, #0
-    vqshrun.s16     d1, q9, #0
-
-    vqshrun.s16     d2, q10, #0
-    vqshrun.s16     d3, q11, #0
-
-    vqshrun.s16     d4, q12, #0
-    vqshrun.s16     d5, q13, #0
-
-    vqshrun.s16     d6, q14, #0
-    vqshrun.s16     d7, q15, #0
-
-    vst1.u8         {q0}, [r1], r2
-    vst1.u8         {q1}, [r1], r2
-    vst1.u8         {q2}, [r1], r2
-    vst1.u8         {q3}, [r1], r2
-
-    subs            r12, r12, #1
-    bne             case_tm_pred_loop_s
-
-    pop             {r4-r8,pc}
-
-    ENDP
-
-
-    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/copymem16x16_neon.asm
+++ /dev/null
@@ -1,59 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_copy_mem16x16_neon|
-    ; ARM
-    ; REQUIRE8
-    ; PRESERVE8
-
-    AREA    Block, CODE, READONLY ; name this block of code
-;void copy_mem16x16_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
-;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-|vp8_copy_mem16x16_neon| PROC
-
-    vld1.u8     {q0}, [r0], r1
-    vld1.u8     {q1}, [r0], r1
-    vld1.u8     {q2}, [r0], r1
-    vst1.u8     {q0}, [r2], r3
-    vld1.u8     {q3}, [r0], r1
-    vst1.u8     {q1}, [r2], r3
-    vld1.u8     {q4}, [r0], r1
-    vst1.u8     {q2}, [r2], r3
-    vld1.u8     {q5}, [r0], r1
-    vst1.u8     {q3}, [r2], r3
-    vld1.u8     {q6}, [r0], r1
-    vst1.u8     {q4}, [r2], r3
-    vld1.u8     {q7}, [r0], r1
-    vst1.u8     {q5}, [r2], r3
-    vld1.u8     {q8}, [r0], r1
-    vst1.u8     {q6}, [r2], r3
-    vld1.u8     {q9}, [r0], r1
-    vst1.u8     {q7}, [r2], r3
-    vld1.u8     {q10}, [r0], r1
-    vst1.u8     {q8}, [r2], r3
-    vld1.u8     {q11}, [r0], r1
-    vst1.u8     {q9}, [r2], r3
-    vld1.u8     {q12}, [r0], r1
-    vst1.u8     {q10}, [r2], r3
-    vld1.u8     {q13}, [r0], r1
-    vst1.u8     {q11}, [r2], r3
-    vld1.u8     {q14}, [r0], r1
-    vst1.u8     {q12}, [r2], r3
-    vld1.u8     {q15}, [r0], r1
-    vst1.u8     {q13}, [r2], r3
-    vst1.u8     {q14}, [r2], r3
-    vst1.u8     {q15}, [r2], r3
-
-    mov     pc, lr
-
-    ENDP  ; |vp8_copy_mem16x16_neon|
-
-    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/copymem8x4_neon.asm
+++ /dev/null
@@ -1,34 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_copy_mem8x4_neon|
-    ; ARM
-    ; REQUIRE8
-    ; PRESERVE8
-
-    AREA    Block, CODE, READONLY ; name this block of code
-;void copy_mem8x4_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
-;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-|vp8_copy_mem8x4_neon| PROC
-    vld1.u8     {d0}, [r0], r1
-    vld1.u8     {d1}, [r0], r1
-    vst1.u8     {d0}, [r2], r3
-    vld1.u8     {d2}, [r0], r1
-    vst1.u8     {d1}, [r2], r3
-    vld1.u8     {d3}, [r0], r1
-    vst1.u8     {d2}, [r2], r3
-    vst1.u8     {d3}, [r2], r3
-
-    mov     pc, lr
-
-    ENDP  ; |vp8_copy_mem8x4_neon|
-
-    END
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/copymem8x8_neon.asm
+++ /dev/null
@@ -1,43 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_copy_mem8x8_neon|
-    ; ARM
-    ; REQUIRE8
-    ; PRESERVE8
-
-    AREA    Block, CODE, READONLY ; name this block of code
-;void copy_mem8x8_neon( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
-;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
-|vp8_copy_mem8x8_neon| PROC
-
-    vld1.u8     {d0}, [r0], r1
-    vld1.u8     {d1}, [r0], r1
-    vst1.u8     {d0}, [r2], r3
-    vld1.u8     {d2}, [r0], r1
-    vst1.u8     {d1}, [r2], r3
-    vld1.u8     {d3}, [r0], r1
-    vst1.u8     {d2}, [r2], r3
-    vld1.u8     {d4}, [r0], r1
-    vst1.u8     {d3}, [r2], r3
-    vld1.u8     {d5}, [r0], r1
-    vst1.u8     {d4}, [r2], r3
-    vld1.u8     {d6}, [r0], r1
-    vst1.u8     {d5}, [r2], r3
-    vld1.u8     {d7}, [r0], r1
-    vst1.u8     {d6}, [r2], r3
-    vst1.u8     {d7}, [r2], r3
-
-    mov     pc, lr
-
-    ENDP  ; |vp8_copy_mem8x8_neon|
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/copymem_neon.c
@@ -0,0 +1,59 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+void vp8_copy_mem8x4_neon(
+        unsigned char *src,
+        int src_stride,
+        unsigned char *dst,
+        int dst_stride) {
+    uint8x8_t vtmp;
+    int r;
+
+    for (r = 0; r < 4; r++) {
+        vtmp = vld1_u8(src);
+        vst1_u8(dst, vtmp);
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+void vp8_copy_mem8x8_neon(
+        unsigned char *src,
+        int src_stride,
+        unsigned char *dst,
+        int dst_stride) {
+    uint8x8_t vtmp;
+    int r;
+
+    for (r = 0; r < 8; r++) {
+        vtmp = vld1_u8(src);
+        vst1_u8(dst, vtmp);
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+void vp8_copy_mem16x16_neon(
+        unsigned char *src,
+        int src_stride,
+        unsigned char *dst,
+        int dst_stride) {
+    int r;
+    uint8x16_t qtmp;
+
+    for (r = 0; r < 16; r++) {
+        qtmp = vld1q_u8(src);
+        vst1q_u8(dst, qtmp);
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.asm
+++ /dev/null
@@ -1,54 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license and patent
-;  grant that can be found in the LICENSE file in the root of the source
-;  tree. All contributing project authors may be found in the AUTHORS
-;  file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_dc_only_idct_add_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-;void vp8_dc_only_idct_add_c(short input_dc, unsigned char *pred_ptr,
-;                            int pred_stride, unsigned char *dst_ptr,
-;                            int dst_stride)
-
-; r0  input_dc
-; r1  pred_ptr
-; r2  pred_stride
-; r3  dst_ptr
-; sp  dst_stride
-
-|vp8_dc_only_idct_add_neon| PROC
-    add             r0, r0, #4
-    asr             r0, r0, #3
-    ldr             r12, [sp]
-    vdup.16         q0, r0
-
-    vld1.32         {d2[0]}, [r1], r2
-    vld1.32         {d2[1]}, [r1], r2
-    vld1.32         {d4[0]}, [r1], r2
-    vld1.32         {d4[1]}, [r1]
-
-    vaddw.u8        q1, q0, d2
-    vaddw.u8        q2, q0, d4
-
-    vqmovun.s16     d2, q1
-    vqmovun.s16     d4, q2
-
-    vst1.32         {d2[0]}, [r3], r12
-    vst1.32         {d2[1]}, [r3], r12
-    vst1.32         {d4[0]}, [r3], r12
-    vst1.32         {d4[1]}, [r3]
-
-    bx              lr
-
-    ENDP
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.c
@@ -0,0 +1,42 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+void vp8_dc_only_idct_add_neon(
+        int16_t input_dc,
+        unsigned char *pred_ptr,
+        int pred_stride,
+        unsigned char *dst_ptr,
+        int dst_stride) {
+    int i;
+    uint16_t a1 = ((input_dc + 4) >> 3);
+    uint32x2_t d2u32 = vdup_n_u32(0);
+    uint8x8_t d2u8;
+    uint16x8_t q1u16;
+    uint16x8_t qAdd;
+
+    qAdd = vdupq_n_u16(a1);
+
+    for (i = 0; i < 2; i++) {
+        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 0);
+        pred_ptr += pred_stride;
+        d2u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d2u32, 1);
+        pred_ptr += pred_stride;
+
+        q1u16 = vaddw_u8(qAdd, vreinterpret_u8_u32(d2u32));
+        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
+
+        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 0);
+        dst_ptr += dst_stride;
+        vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d2u8), 1);
+        dst_ptr += dst_stride;
+    }
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequant_idct_neon.asm
+++ /dev/null
@@ -1,131 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_dequant_idct_add_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-;void vp8_dequant_idct_add_neon(short *input, short *dq,
-;                           unsigned char *dest, int stride)
-; r0    short *input,
-; r1    short *dq,
-; r2    unsigned char *dest
-; r3    int stride
-
-|vp8_dequant_idct_add_neon| PROC
-    vld1.16         {q3, q4}, [r0]
-    vld1.16         {q5, q6}, [r1]
-
-    add             r1, r2, r3              ; r1 = dest + stride
-    lsl             r3, #1                  ; 2x stride
-
-    vld1.32         {d14[0]}, [r2], r3
-    vld1.32         {d14[1]}, [r1], r3
-    vld1.32         {d15[0]}, [r2]
-    vld1.32         {d15[1]}, [r1]
-
-    adr             r12, cospi8sqrt2minus1  ; pointer to the first constant
-
-    vmul.i16        q1, q3, q5              ;input for short_idct4x4llm_neon
-    vmul.i16        q2, q4, q6
-
-;|short_idct4x4llm_neon| PROC
-    vld1.16         {d0}, [r12]
-    vswp            d3, d4                  ;q2(vp[4] vp[12])
-
-    vqdmulh.s16     q3, q2, d0[2]
-    vqdmulh.s16     q4, q2, d0[0]
-
-    vqadd.s16       d12, d2, d3             ;a1
-    vqsub.s16       d13, d2, d3             ;b1
-
-    vshr.s16        q3, q3, #1
-    vshr.s16        q4, q4, #1
-
-    vqadd.s16       q3, q3, q2
-    vqadd.s16       q4, q4, q2
-
-    vqsub.s16       d10, d6, d9             ;c1
-    vqadd.s16       d11, d7, d8             ;d1
-
-    vqadd.s16       d2, d12, d11
-    vqadd.s16       d3, d13, d10
-    vqsub.s16       d4, d13, d10
-    vqsub.s16       d5, d12, d11
-
-    vtrn.32         d2, d4
-    vtrn.32         d3, d5
-    vtrn.16         d2, d3
-    vtrn.16         d4, d5
-
-; memset(input, 0, 32) -- 32bytes
-    vmov.i16        q14, #0
-
-    vswp            d3, d4
-    vqdmulh.s16     q3, q2, d0[2]
-    vqdmulh.s16     q4, q2, d0[0]
-
-    vqadd.s16       d12, d2, d3             ;a1
-    vqsub.s16       d13, d2, d3             ;b1
-
-    vmov            q15, q14
-
-    vshr.s16        q3, q3, #1
-    vshr.s16        q4, q4, #1
-
-    vqadd.s16       q3, q3, q2
-    vqadd.s16       q4, q4, q2
-
-    vqsub.s16       d10, d6, d9             ;c1
-    vqadd.s16       d11, d7, d8             ;d1
-
-    vqadd.s16       d2, d12, d11
-    vqadd.s16       d3, d13, d10
-    vqsub.s16       d4, d13, d10
-    vqsub.s16       d5, d12, d11
-
-    vst1.16         {q14, q15}, [r0]
-
-    vrshr.s16       d2, d2, #3
-    vrshr.s16       d3, d3, #3
-    vrshr.s16       d4, d4, #3
-    vrshr.s16       d5, d5, #3
-
-    vtrn.32         d2, d4
-    vtrn.32         d3, d5
-    vtrn.16         d2, d3
-    vtrn.16         d4, d5
-
-    vaddw.u8        q1, q1, d14
-    vaddw.u8        q2, q2, d15
-
-    sub             r2, r2, r3
-    sub             r1, r1, r3
-
-    vqmovun.s16     d0, q1
-    vqmovun.s16     d1, q2
-
-    vst1.32         {d0[0]}, [r2], r3
-    vst1.32         {d0[1]}, [r1], r3
-    vst1.32         {d1[0]}, [r2]
-    vst1.32         {d1[1]}, [r1]
-
-    bx             lr
-
-    ENDP           ; |vp8_dequant_idct_add_neon|
-
-; Constant Pool
-cospi8sqrt2minus1 DCD 0x4e7b4e7b
-sinpi8sqrt2       DCD 0x8a8c8a8c
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/dequant_idct_neon.c
@@ -0,0 +1,142 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+static const int16_t cospi8sqrt2minus1 = 20091;
+static const int16_t sinpi8sqrt2       = 35468;
+
+void vp8_dequant_idct_add_neon(
+        int16_t *input,
+        int16_t *dq,
+        unsigned char *dst,
+        int stride) {
+    unsigned char *dst0;
+    int32x2_t d14, d15;
+    int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
+    int16x8_t q1, q2, q3, q4, q5, q6;
+    int16x8_t qEmpty = vdupq_n_s16(0);
+    int32x2x2_t d2tmp0, d2tmp1;
+    int16x4x2_t d2tmp2, d2tmp3;
+
+    d14 = d15 = vdup_n_s32(0);
+
+    // load input
+    q3 = vld1q_s16(input);
+    vst1q_s16(input, qEmpty);
+    input += 8;
+    q4 = vld1q_s16(input);
+    vst1q_s16(input, qEmpty);
+
+    // load dq
+    q5 = vld1q_s16(dq);
+    dq += 8;
+    q6 = vld1q_s16(dq);
+
+    // load src from dst
+    dst0 = dst;
+    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
+    dst0 += stride;
+    d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
+    dst0 += stride;
+    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
+    dst0 += stride;
+    d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
+
+    q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3),
+                                         vreinterpretq_u16_s16(q5)));
+    q2 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q4),
+                                         vreinterpretq_u16_s16(q6)));
+
+    d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
+    d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
+
+    q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
+
+    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
+    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
+
+    q3 = vshrq_n_s16(q3, 1);
+    q4 = vshrq_n_s16(q4, 1);
+
+    q3 = vqaddq_s16(q3, q2);
+    q4 = vqaddq_s16(q4, q2);
+
+    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
+    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
+
+    d2 = vqadd_s16(d12, d11);
+    d3 = vqadd_s16(d13, d10);
+    d4 = vqsub_s16(d13, d10);
+    d5 = vqsub_s16(d12, d11);
+
+    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
+                      vreinterpret_s16_s32(d2tmp1.val[0]));
+    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
+                      vreinterpret_s16_s32(d2tmp1.val[1]));
+
+    // loop 2
+    q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
+
+    q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
+    q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
+
+    d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
+    d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
+
+    q3 = vshrq_n_s16(q3, 1);
+    q4 = vshrq_n_s16(q4, 1);
+
+    q3 = vqaddq_s16(q3, q2);
+    q4 = vqaddq_s16(q4, q2);
+
+    d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
+    d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
+
+    d2 = vqadd_s16(d12, d11);
+    d3 = vqadd_s16(d13, d10);
+    d4 = vqsub_s16(d13, d10);
+    d5 = vqsub_s16(d12, d11);
+
+    d2 = vrshr_n_s16(d2, 3);
+    d3 = vrshr_n_s16(d3, 3);
+    d4 = vrshr_n_s16(d4, 3);
+    d5 = vrshr_n_s16(d5, 3);
+
+    d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
+    d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
+    d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
+                      vreinterpret_s16_s32(d2tmp1.val[0]));
+    d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
+                      vreinterpret_s16_s32(d2tmp1.val[1]));
+
+    q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
+    q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
+
+    q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1),
+                                        vreinterpret_u8_s32(d14)));
+    q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2),
+                                        vreinterpret_u8_s32(d15)));
+
+    d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
+    d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
+
+    dst0 = dst;
+    vst1_lane_s32((int32_t *)dst0, d14, 0);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst0, d14, 1);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst0, d15, 0);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst0, d15, 1);
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/dequantizeb_neon.asm
+++ /dev/null
@@ -1,34 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_dequantize_b_loop_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-; r0    short *Q,
-; r1    short *DQC
-; r2    short *DQ
-|vp8_dequantize_b_loop_neon| PROC
-    vld1.16         {q0, q1}, [r0]
-    vld1.16         {q2, q3}, [r1]
-
-    vmul.i16        q4, q0, q2
-    vmul.i16        q5, q1, q3
-
-    vst1.16         {q4, q5}, [r2]
-
-    bx             lr
-
-    ENDP
-
-    END
rename from media/libvpx/vp9/common/generic/vp9_systemdependent.c
rename to media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
--- a/media/libvpx/vp9/common/generic/vp9_systemdependent.c
+++ b/media/libvpx/vp8/common/arm/neon/dequantizeb_neon.c
@@ -1,19 +1,25 @@
 /*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include <arm_neon.h>
 
-#include "./vpx_config.h"
-#include "./vp9_rtcd.h"
-#include "vp9/common/vp9_onyxc_int.h"
+#include "vp8/common/blockd.h"
+
+void vp8_dequantize_b_neon(BLOCKD *d, short *DQC) {
+    int16x8x2_t qQ, qDQC, qDQ;
 
-void vp9_machine_specific_config(VP9_COMMON *cm) {
-  (void)cm;
-  vp9_rtcd();
+    qQ   = vld2q_s16(d->qcoeff);
+    qDQC = vld2q_s16(DQC);
+
+    qDQ.val[0] = vmulq_s16(qQ.val[0], qDQC.val[0]);
+    qDQ.val[1] = vmulq_s16(qQ.val[1], qDQC.val[1]);
+
+    vst2q_s16(d->dqcoeff, qDQ);
 }
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.asm
+++ /dev/null
@@ -1,79 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license and patent
-;  grant that can be found in the LICENSE file in the root of the source
-;  tree. All contributing project authors may be found in the AUTHORS
-;  file in the root of the source tree.
-;
-
-
-    EXPORT  |idct_dequant_0_2x_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-;void idct_dequant_0_2x_neon(short *q, short dq,
-;                            unsigned char *dst, int stride);
-; r0   *q
-; r1   dq
-; r2   *dst
-; r3   stride
-|idct_dequant_0_2x_neon| PROC
-    push            {r4, r5}
-
-    add             r12, r2, #4
-    vld1.32         {d2[0]}, [r2], r3
-    vld1.32         {d8[0]}, [r12], r3
-    vld1.32         {d2[1]}, [r2], r3
-    vld1.32         {d8[1]}, [r12], r3
-    vld1.32         {d4[0]}, [r2], r3
-    vld1.32         {d10[0]}, [r12], r3
-    vld1.32         {d4[1]}, [r2], r3
-    vld1.32         {d10[1]}, [r12], r3
-
-    ldrh            r12, [r0]               ; lo q
-    ldrh            r4, [r0, #32]           ; hi q
-    mov             r5, #0
-    strh            r5, [r0]
-    strh            r5, [r0, #32]
-
-    sxth            r12, r12                ; lo
-    mul             r0, r12, r1
-    add             r0, r0, #4
-    asr             r0, r0, #3
-    vdup.16         q0, r0
-    sxth            r4, r4                  ; hi
-    mul             r0, r4, r1
-    add             r0, r0, #4
-    asr             r0, r0, #3
-    vdup.16         q3, r0
-
-    vaddw.u8        q1, q0, d2              ; lo
-    vaddw.u8        q2, q0, d4
-    vaddw.u8        q4, q3, d8              ; hi
-    vaddw.u8        q5, q3, d10
-
-    sub             r2, r2, r3, lsl #2      ; dst - 4*stride
-    add             r0, r2, #4
-
-    vqmovun.s16     d2, q1                  ; lo
-    vqmovun.s16     d4, q2
-    vqmovun.s16     d8, q4                  ; hi
-    vqmovun.s16     d10, q5
-
-    vst1.32         {d2[0]}, [r2], r3       ; lo
-    vst1.32         {d8[0]}, [r0], r3       ; hi
-    vst1.32         {d2[1]}, [r2], r3
-    vst1.32         {d8[1]}, [r0], r3
-    vst1.32         {d4[0]}, [r2], r3
-    vst1.32         {d10[0]}, [r0], r3
-    vst1.32         {d4[1]}, [r2]
-    vst1.32         {d10[1]}, [r0]
-
-    pop             {r4, r5}
-    bx              lr
-
-    ENDP            ; |idct_dequant_0_2x_neon|
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/idct_dequant_0_2x_neon.c
@@ -0,0 +1,62 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+void idct_dequant_0_2x_neon(
+        int16_t *q,
+        int16_t dq,
+        unsigned char *dst,
+        int stride) {
+    unsigned char *dst0;
+    int i, a0, a1;
+    int16x8x2_t q2Add;
+    int32x2_t d2s32, d4s32;
+    uint8x8_t d2u8, d4u8;
+    uint16x8_t q1u16, q2u16;
+
+    a0 = ((q[0] * dq) + 4) >> 3;
+    a1 = ((q[16] * dq) + 4) >> 3;
+    q[0] = q[16] = 0;
+    q2Add.val[0] = vdupq_n_s16((int16_t)a0);
+    q2Add.val[1] = vdupq_n_s16((int16_t)a1);
+
+    for (i = 0; i < 2; i++, dst += 4) {
+        dst0 = dst;
+        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 0);
+        dst0 += stride;
+        d2s32 = vld1_lane_s32((const int32_t *)dst0, d2s32, 1);
+        dst0 += stride;
+        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 0);
+        dst0 += stride;
+        d4s32 = vld1_lane_s32((const int32_t *)dst0, d4s32, 1);
+
+        q1u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
+                         vreinterpret_u8_s32(d2s32));
+        q2u16 = vaddw_u8(vreinterpretq_u16_s16(q2Add.val[i]),
+                         vreinterpret_u8_s32(d4s32));
+
+        d2u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
+        d4u8 = vqmovun_s16(vreinterpretq_s16_u16(q2u16));
+
+        d2s32 = vreinterpret_s32_u8(d2u8);
+        d4s32 = vreinterpret_s32_u8(d4u8);
+
+        dst0 = dst;
+        vst1_lane_s32((int32_t *)dst0, d2s32, 0);
+        dst0 += stride;
+        vst1_lane_s32((int32_t *)dst0, d2s32, 1);
+        dst0 += stride;
+        vst1_lane_s32((int32_t *)dst0, d4s32, 0);
+        dst0 += stride;
+        vst1_lane_s32((int32_t *)dst0, d4s32, 1);
+    }
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.asm
+++ /dev/null
@@ -1,196 +0,0 @@
-;
-;  Copyright (c) 2010 The Webm project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |idct_dequant_full_2x_neon|
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-;void idct_dequant_full_2x_neon(short *q, short *dq,
-;                               unsigned char *dst, int stride);
-; r0    *q,
-; r1    *dq,
-; r2    *dst
-; r3    stride
-|idct_dequant_full_2x_neon| PROC
-    vld1.16         {q0, q1}, [r1]          ; dq (same l/r)
-    vld1.16         {q2, q3}, [r0]          ; l q
-    add             r0, r0, #32
-    vld1.16         {q4, q5}, [r0]          ; r q
-    add             r12, r2, #4
-
-    ; interleave the predictors
-    vld1.32         {d28[0]}, [r2],  r3     ; l pre
-    vld1.32         {d28[1]}, [r12], r3     ; r pre
-    vld1.32         {d29[0]}, [r2],  r3
-    vld1.32         {d29[1]}, [r12], r3
-    vld1.32         {d30[0]}, [r2],  r3
-    vld1.32         {d30[1]}, [r12], r3
-    vld1.32         {d31[0]}, [r2],  r3
-    vld1.32         {d31[1]}, [r12]
-
-    adr             r1, cospi8sqrt2minus1   ; pointer to the first constant
-
-    ; dequant: q[i] = q[i] * dq[i]
-    vmul.i16        q2, q2, q0
-    vmul.i16        q3, q3, q1
-    vmul.i16        q4, q4, q0
-    vmul.i16        q5, q5, q1
-
-    vld1.16         {d0}, [r1]
-
-    ; q2: l0r0  q3: l8r8
-    ; q4: l4r4  q5: l12r12
-    vswp            d5, d8
-    vswp            d7, d10
-
-    ; _CONSTANTS_ * 4,12 >> 16
-    ; q6:  4 * sinpi : c1/temp1
-    ; q7: 12 * sinpi : d1/temp2
-    ; q8:  4 * cospi
-    ; q9: 12 * cospi
-    vqdmulh.s16     q6, q4, d0[2]           ; sinpi8sqrt2
-    vqdmulh.s16     q7, q5, d0[2]
-    vqdmulh.s16     q8, q4, d0[0]           ; cospi8sqrt2minus1
-    vqdmulh.s16     q9, q5, d0[0]
-
-    vqadd.s16       q10, q2, q3             ; a1 = 0 + 8
-    vqsub.s16       q11, q2, q3             ; b1 = 0 - 8
-
-    ; vqdmulh only accepts signed values. this was a problem because
-    ; our constant had the high bit set, and was treated as a negative value.
-    ; vqdmulh also doubles the value before it shifts by 16. we need to
-    ; compensate for this. in the case of sinpi8sqrt2, the lowest bit is 0,
-    ; so we can shift the constant without losing precision. this avoids
-    ; shift again afterward, but also avoids the sign issue. win win!
-    ; for cospi8sqrt2minus1 the lowest bit is 1, so we lose precision if we
-    ; pre-shift it
-    vshr.s16        q8, q8, #1
-    vshr.s16        q9, q9, #1
-
-    ; q4:  4 +  4 * cospi : d1/temp1
-    ; q5: 12 + 12 * cospi : c1/temp2
-    vqadd.s16       q4, q4, q8
-    vqadd.s16       q5, q5, q9
-
-    ; c1 = temp1 - temp2
-    ; d1 = temp1 + temp2
-    vqsub.s16       q2, q6, q5
-    vqadd.s16       q3, q4, q7
-
-    ; [0]: a1+d1
-    ; [1]: b1+c1
-    ; [2]: b1-c1
-    ; [3]: a1-d1
-    vqadd.s16       q4, q10, q3
-    vqadd.s16       q5, q11, q2
-    vqsub.s16       q6, q11, q2
-    vqsub.s16       q7, q10, q3
-
-    ; rotate
-    vtrn.32         q4, q6
-    vtrn.32         q5, q7
-    vtrn.16         q4, q5
-    vtrn.16         q6, q7
-    ; idct loop 2
-    ; q4: l 0, 4, 8,12 r 0, 4, 8,12
-    ; q5: l 1, 5, 9,13 r 1, 5, 9,13
-    ; q6: l 2, 6,10,14 r 2, 6,10,14
-    ; q7: l 3, 7,11,15 r 3, 7,11,15
-
-    ; q8:  1 * sinpi : c1/temp1
-    ; q9:  3 * sinpi : d1/temp2
-    ; q10: 1 * cospi
-    ; q11: 3 * cospi
-    vqdmulh.s16     q8, q5, d0[2]           ; sinpi8sqrt2
-    vqdmulh.s16     q9, q7, d0[2]
-    vqdmulh.s16     q10, q5, d0[0]          ; cospi8sqrt2minus1
-    vqdmulh.s16     q11, q7, d0[0]
-
-    vqadd.s16       q2, q4, q6             ; a1 = 0 + 2
-    vqsub.s16       q3, q4, q6             ; b1 = 0 - 2
-
-    ; see note on shifting above
-    vshr.s16        q10, q10, #1
-    vshr.s16        q11, q11, #1
-
-    ; q10: 1 + 1 * cospi : d1/temp1
-    ; q11: 3 + 3 * cospi : c1/temp2
-    vqadd.s16       q10, q5, q10
-    vqadd.s16       q11, q7, q11
-
-    ; q8: c1 = temp1 - temp2
-    ; q9: d1 = temp1 + temp2
-    vqsub.s16       q8, q8, q11
-    vqadd.s16       q9, q10, q9
-
-    ; a1+d1
-    ; b1+c1
-    ; b1-c1
-    ; a1-d1
-    vqadd.s16       q4, q2, q9
-    vqadd.s16       q5, q3, q8
-    vqsub.s16       q6, q3, q8
-    vqsub.s16       q7, q2, q9
-
-    ; +4 >> 3 (rounding)
-    vrshr.s16       q4, q4, #3              ; lo
-    vrshr.s16       q5, q5, #3
-    vrshr.s16       q6, q6, #3              ; hi
-    vrshr.s16       q7, q7, #3
-
-    vtrn.32         q4, q6
-    vtrn.32         q5, q7
-    vtrn.16         q4, q5
-    vtrn.16         q6, q7
-
-    ; adding pre
-    ; input is still packed. pre was read interleaved
-    vaddw.u8        q4, q4, d28
-    vaddw.u8        q5, q5, d29
-    vaddw.u8        q6, q6, d30
-    vaddw.u8        q7, q7, d31
-
-    vmov.i16        q14, #0
-    vmov            q15, q14
-    vst1.16         {q14, q15}, [r0]        ; write over high input
-    sub             r0, r0, #32
-    vst1.16         {q14, q15}, [r0]        ; write over low input
-
-    sub             r2, r2, r3, lsl #2      ; dst - 4*stride
-    add             r1, r2, #4              ; hi
-
-    ;saturate and narrow
-    vqmovun.s16     d0, q4                  ; lo
-    vqmovun.s16     d1, q5
-    vqmovun.s16     d2, q6                  ; hi
-    vqmovun.s16     d3, q7
-
-    vst1.32         {d0[0]}, [r2], r3       ; lo
-    vst1.32         {d0[1]}, [r1], r3       ; hi
-    vst1.32         {d1[0]}, [r2], r3
-    vst1.32         {d1[1]}, [r1], r3
-    vst1.32         {d2[0]}, [r2], r3
-    vst1.32         {d2[1]}, [r1], r3
-    vst1.32         {d3[0]}, [r2]
-    vst1.32         {d3[1]}, [r1]
-
-    bx             lr
-
-    ENDP           ; |idct_dequant_full_2x_neon|
-
-; Constant Pool
-cospi8sqrt2minus1 DCD 0x4e7b
-; because the lowest bit in 0x8a8c is 0, we can pre-shift this
-sinpi8sqrt2       DCD 0x4546
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/idct_dequant_full_2x_neon.c
@@ -0,0 +1,185 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+static const int16_t cospi8sqrt2minus1 = 20091;
+static const int16_t sinpi8sqrt2       = 17734;
+// because the lowest bit in 0x8a8c is 0, we can pre-shift this
+
+void idct_dequant_full_2x_neon(
+        int16_t *q,
+        int16_t *dq,
+        unsigned char *dst,
+        int stride) {
+    unsigned char *dst0, *dst1;
+    int32x2_t d28, d29, d30, d31;
+    int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
+    int16x8_t qEmpty = vdupq_n_s16(0);
+    int32x4x2_t q2tmp0, q2tmp1;
+    int16x8x2_t q2tmp2, q2tmp3;
+    int16x4_t dLow0, dLow1, dHigh0, dHigh1;
+
+    d28 = d29 = d30 = d31 = vdup_n_s32(0);
+
+    // load dq
+    q0 = vld1q_s16(dq);
+    dq += 8;
+    q1 = vld1q_s16(dq);
+
+    // load q
+    q2 = vld1q_s16(q);
+    vst1q_s16(q, qEmpty);
+    q += 8;
+    q3 = vld1q_s16(q);
+    vst1q_s16(q, qEmpty);
+    q += 8;
+    q4 = vld1q_s16(q);
+    vst1q_s16(q, qEmpty);
+    q += 8;
+    q5 = vld1q_s16(q);
+    vst1q_s16(q, qEmpty);
+
+    // load src from dst
+    dst0 = dst;
+    dst1 = dst + 4;
+    d28 = vld1_lane_s32((const int32_t *)dst0, d28, 0);
+    dst0 += stride;
+    d28 = vld1_lane_s32((const int32_t *)dst1, d28, 1);
+    dst1 += stride;
+    d29 = vld1_lane_s32((const int32_t *)dst0, d29, 0);
+    dst0 += stride;
+    d29 = vld1_lane_s32((const int32_t *)dst1, d29, 1);
+    dst1 += stride;
+
+    d30 = vld1_lane_s32((const int32_t *)dst0, d30, 0);
+    dst0 += stride;
+    d30 = vld1_lane_s32((const int32_t *)dst1, d30, 1);
+    dst1 += stride;
+    d31 = vld1_lane_s32((const int32_t *)dst0, d31, 0);
+    d31 = vld1_lane_s32((const int32_t *)dst1, d31, 1);
+
+    q2 = vmulq_s16(q2, q0);
+    q3 = vmulq_s16(q3, q1);
+    q4 = vmulq_s16(q4, q0);
+    q5 = vmulq_s16(q5, q1);
+
+    // vswp
+    dLow0 = vget_low_s16(q2);
+    dHigh0 = vget_high_s16(q2);
+    dLow1 = vget_low_s16(q4);
+    dHigh1 = vget_high_s16(q4);
+    q2 = vcombine_s16(dLow0, dLow1);
+    q4 = vcombine_s16(dHigh0, dHigh1);
+
+    dLow0 = vget_low_s16(q3);
+    dHigh0 = vget_high_s16(q3);
+    dLow1 = vget_low_s16(q5);
+    dHigh1 = vget_high_s16(q5);
+    q3 = vcombine_s16(dLow0, dLow1);
+    q5 = vcombine_s16(dHigh0, dHigh1);
+
+    q6 = vqdmulhq_n_s16(q4, sinpi8sqrt2);
+    q7 = vqdmulhq_n_s16(q5, sinpi8sqrt2);
+    q8 = vqdmulhq_n_s16(q4, cospi8sqrt2minus1);
+    q9 = vqdmulhq_n_s16(q5, cospi8sqrt2minus1);
+
+    q10 = vqaddq_s16(q2, q3);
+    q11 = vqsubq_s16(q2, q3);
+
+    q8 = vshrq_n_s16(q8, 1);
+    q9 = vshrq_n_s16(q9, 1);
+
+    q4 = vqaddq_s16(q4, q8);
+    q5 = vqaddq_s16(q5, q9);
+
+    q2 = vqsubq_s16(q6, q5);
+    q3 = vqaddq_s16(q7, q4);
+
+    q4 = vqaddq_s16(q10, q3);
+    q5 = vqaddq_s16(q11, q2);
+    q6 = vqsubq_s16(q11, q2);
+    q7 = vqsubq_s16(q10, q3);
+
+    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
+    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
+    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
+                       vreinterpretq_s16_s32(q2tmp1.val[0]));
+    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
+                       vreinterpretq_s16_s32(q2tmp1.val[1]));
+
+    // loop 2
+    q8  = vqdmulhq_n_s16(q2tmp2.val[1], sinpi8sqrt2);
+    q9  = vqdmulhq_n_s16(q2tmp3.val[1], sinpi8sqrt2);
+    q10 = vqdmulhq_n_s16(q2tmp2.val[1], cospi8sqrt2minus1);
+    q11 = vqdmulhq_n_s16(q2tmp3.val[1], cospi8sqrt2minus1);
+
+    q2 = vqaddq_s16(q2tmp2.val[0], q2tmp3.val[0]);
+    q3 = vqsubq_s16(q2tmp2.val[0], q2tmp3.val[0]);
+
+    q10 = vshrq_n_s16(q10, 1);
+    q11 = vshrq_n_s16(q11, 1);
+
+    q10 = vqaddq_s16(q2tmp2.val[1], q10);
+    q11 = vqaddq_s16(q2tmp3.val[1], q11);
+
+    q8 = vqsubq_s16(q8, q11);
+    q9 = vqaddq_s16(q9, q10);
+
+    q4 = vqaddq_s16(q2, q9);
+    q5 = vqaddq_s16(q3, q8);
+    q6 = vqsubq_s16(q3, q8);
+    q7 = vqsubq_s16(q2, q9);
+
+    q4 = vrshrq_n_s16(q4, 3);
+    q5 = vrshrq_n_s16(q5, 3);
+    q6 = vrshrq_n_s16(q6, 3);
+    q7 = vrshrq_n_s16(q7, 3);
+
+    q2tmp0 = vtrnq_s32(vreinterpretq_s32_s16(q4), vreinterpretq_s32_s16(q6));
+    q2tmp1 = vtrnq_s32(vreinterpretq_s32_s16(q5), vreinterpretq_s32_s16(q7));
+    q2tmp2 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[0]),
+                       vreinterpretq_s16_s32(q2tmp1.val[0]));
+    q2tmp3 = vtrnq_s16(vreinterpretq_s16_s32(q2tmp0.val[1]),
+                       vreinterpretq_s16_s32(q2tmp1.val[1]));
+
+    q4 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[0]),
+                                          vreinterpret_u8_s32(d28)));
+    q5 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp2.val[1]),
+                                          vreinterpret_u8_s32(d29)));
+    q6 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[0]),
+                                          vreinterpret_u8_s32(d30)));
+    q7 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2tmp3.val[1]),
+                                          vreinterpret_u8_s32(d31)));
+
+    d28 = vreinterpret_s32_u8(vqmovun_s16(q4));
+    d29 = vreinterpret_s32_u8(vqmovun_s16(q5));
+    d30 = vreinterpret_s32_u8(vqmovun_s16(q6));
+    d31 = vreinterpret_s32_u8(vqmovun_s16(q7));
+
+    dst0 = dst;
+    dst1 = dst + 4;
+    vst1_lane_s32((int32_t *)dst0, d28, 0);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst1, d28, 1);
+    dst1 += stride;
+    vst1_lane_s32((int32_t *)dst0, d29, 0);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst1, d29, 1);
+    dst1 += stride;
+
+    vst1_lane_s32((int32_t *)dst0, d30, 0);
+    dst0 += stride;
+    vst1_lane_s32((int32_t *)dst1, d30, 1);
+    dst1 += stride;
+    vst1_lane_s32((int32_t *)dst0, d31, 0);
+    vst1_lane_s32((int32_t *)dst1, d31, 1);
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/iwalsh_neon.asm
+++ /dev/null
@@ -1,87 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-    EXPORT  |vp8_short_inv_walsh4x4_neon|
-
-    ARM
-    REQUIRE8
-    PRESERVE8
-
-    AREA    |.text|, CODE, READONLY  ; name this block of code
-
-;short vp8_short_inv_walsh4x4_neon(short *input, short *mb_dqcoeff)
-|vp8_short_inv_walsh4x4_neon| PROC
-
-    ; read in all four lines of values: d0->d3
-    vld1.i16 {q0-q1}, [r0@128]
-
-    ; first for loop
-    vadd.s16 d4, d0, d3 ;a = [0] + [12]
-    vadd.s16 d6, d1, d2 ;b = [4] + [8]
-    vsub.s16 d5, d0, d3 ;d = [0] - [12]
-    vsub.s16 d7, d1, d2 ;c = [4] - [8]
-
-    vadd.s16 q0, q2, q3 ; a+b d+c
-    vsub.s16 q1, q2, q3 ; a-b d-c
-
-    vtrn.32 d0, d2 ;d0:  0  1  8  9
-                   ;d2:  2  3 10 11
-    vtrn.32 d1, d3 ;d1:  4  5 12 13
-                   ;d3:  6  7 14 15
-
-    vtrn.16 d0, d1 ;d0:  0  4  8 12
-                   ;d1:  1  5  9 13
-    vtrn.16 d2, d3 ;d2:  2  6 10 14
-                   ;d3:  3  7 11 15
-
-    ; second for loop
-
-    vadd.s16 d4, d0, d3 ;a = [0] + [3]
-    vadd.s16 d6, d1, d2 ;b = [1] + [2]
-    vsub.s16 d5, d0, d3 ;d = [0] - [3]
-    vsub.s16 d7, d1, d2 ;c = [1] - [2]
-
-    vmov.i16 q8, #3
-
-    vadd.s16 q0, q2, q3 ; a+b d+c
-    vsub.s16 q1, q2, q3 ; a-b d-c
-
-    vadd.i16 q0, q0, q8 ;e/f += 3
-    vadd.i16 q1, q1, q8 ;g/h += 3
-
-    vshr.s16 q0, q0, #3 ;e/f >> 3
-    vshr.s16 q1, q1, #3 ;g/h >> 3
-
-    mov      r2, #64
-    add      r3, r1, #32
-
-    vst1.i16 d0[0], [r1],r2
-    vst1.i16 d1[0], [r3],r2
-    vst1.i16 d2[0], [r1],r2
-    vst1.i16 d3[0], [r3],r2
-
-    vst1.i16 d0[1], [r1],r2
-    vst1.i16 d1[1], [r3],r2
-    vst1.i16 d2[1], [r1],r2
-    vst1.i16 d3[1], [r3],r2
-
-    vst1.i16 d0[2], [r1],r2
-    vst1.i16 d1[2], [r3],r2
-    vst1.i16 d2[2], [r1],r2
-    vst1.i16 d3[2], [r3],r2
-
-    vst1.i16 d0[3], [r1],r2
-    vst1.i16 d1[3], [r3],r2
-    vst1.i16 d2[3], [r1]
-    vst1.i16 d3[3], [r3]
-
-    bx lr
-    ENDP    ; |vp8_short_inv_walsh4x4_neon|
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/iwalsh_neon.c
@@ -0,0 +1,102 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+
+void vp8_short_inv_walsh4x4_neon(
+        int16_t *input,
+        int16_t *mb_dqcoeff) {
+    int16x8_t q0s16, q1s16, q2s16, q3s16;
+    int16x4_t d4s16, d5s16, d6s16, d7s16;
+    int16x4x2_t v2tmp0, v2tmp1;
+    int32x2x2_t v2tmp2, v2tmp3;
+    int16x8_t qAdd3;
+
+    q0s16 = vld1q_s16(input);
+    q1s16 = vld1q_s16(input + 8);
+
+    // 1st for loop
+    d4s16 = vadd_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
+    d6s16 = vadd_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
+    d5s16 = vsub_s16(vget_low_s16(q0s16), vget_high_s16(q1s16));
+    d7s16 = vsub_s16(vget_high_s16(q0s16), vget_low_s16(q1s16));
+
+    q2s16 = vcombine_s16(d4s16, d5s16);
+    q3s16 = vcombine_s16(d6s16, d7s16);
+
+    q0s16 = vaddq_s16(q2s16, q3s16);
+    q1s16 = vsubq_s16(q2s16, q3s16);
+
+    v2tmp2 = vtrn_s32(vreinterpret_s32_s16(vget_low_s16(q0s16)),
+                      vreinterpret_s32_s16(vget_low_s16(q1s16)));
+    v2tmp3 = vtrn_s32(vreinterpret_s32_s16(vget_high_s16(q0s16)),
+                      vreinterpret_s32_s16(vget_high_s16(q1s16)));
+    v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),
+                      vreinterpret_s16_s32(v2tmp3.val[0]));
+    v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),
+                      vreinterpret_s16_s32(v2tmp3.val[1]));
+
+    // 2nd for loop
+    d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[1]);
+    d6s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[0]);
+    d5s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[1]);
+    d7s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[0]);
+    q2s16 = vcombine_s16(d4s16, d5s16);
+    q3s16 = vcombine_s16(d6s16, d7s16);
+
+    qAdd3 = vdupq_n_s16(3);
+
+    q0s16 = vaddq_s16(q2s16, q3s16);
+    q1s16 = vsubq_s16(q2s16, q3s16);
+
+    q0s16 = vaddq_s16(q0s16, qAdd3);
+    q1s16 = vaddq_s16(q1s16, qAdd3);
+
+    q0s16 = vshrq_n_s16(q0s16, 3);
+    q1s16 = vshrq_n_s16(q1s16, 3);
+
+    // store
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  0);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 0);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  0);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 0);
+    mb_dqcoeff += 16;
+
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  1);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 1);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  1);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 1);
+    mb_dqcoeff += 16;
+
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  2);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 2);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  2);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 2);
+    mb_dqcoeff += 16;
+
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q0s16),  3);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q0s16), 3);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_low_s16(q1s16),  3);
+    mb_dqcoeff += 16;
+    vst1_lane_s16(mb_dqcoeff, vget_high_s16(q1s16), 3);
+    mb_dqcoeff += 16;
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfilter_neon.asm
+++ /dev/null
@@ -1,397 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_loop_filter_horizontal_edge_y_neon|
-    EXPORT  |vp8_loop_filter_horizontal_edge_uv_neon|
-    EXPORT  |vp8_loop_filter_vertical_edge_y_neon|
-    EXPORT  |vp8_loop_filter_vertical_edge_uv_neon|
-    ARM
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0    unsigned char *src
-; r1    int pitch
-; r2    unsigned char blimit
-; r3    unsigned char limit
-; sp    unsigned char thresh,
-|vp8_loop_filter_horizontal_edge_y_neon| PROC
-    push        {lr}
-    vdup.u8     q0, r2                     ; duplicate blimit
-    vdup.u8     q1, r3                     ; duplicate limit
-    sub         r2, r0, r1, lsl #2         ; move src pointer down by 4 lines
-    ldr         r3, [sp, #4]               ; load thresh
-    add         r12, r2, r1
-    add         r1, r1, r1
-
-    vdup.u8     q2, r3                     ; duplicate thresh
-
-    vld1.u8     {q3}, [r2@128], r1              ; p3
-    vld1.u8     {q4}, [r12@128], r1             ; p2
-    vld1.u8     {q5}, [r2@128], r1              ; p1
-    vld1.u8     {q6}, [r12@128], r1             ; p0
-    vld1.u8     {q7}, [r2@128], r1              ; q0
-    vld1.u8     {q8}, [r12@128], r1             ; q1
-    vld1.u8     {q9}, [r2@128]                  ; q2
-    vld1.u8     {q10}, [r12@128]                ; q3
-
-    sub         r2, r2, r1, lsl #1
-    sub         r12, r12, r1, lsl #1
-
-    bl          vp8_loop_filter_neon
-
-    vst1.u8     {q5}, [r2@128], r1              ; store op1
-    vst1.u8     {q6}, [r12@128], r1             ; store op0
-    vst1.u8     {q7}, [r2@128], r1              ; store oq0
-    vst1.u8     {q8}, [r12@128], r1             ; store oq1
-
-    pop         {pc}
-    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
-
-
-; r0    unsigned char *u,
-; r1    int pitch,
-; r2    unsigned char blimit
-; r3    unsigned char limit
-; sp    unsigned char thresh,
-; sp+4  unsigned char *v
-|vp8_loop_filter_horizontal_edge_uv_neon| PROC
-    push        {lr}
-    vdup.u8     q0, r2                      ; duplicate blimit
-    vdup.u8     q1, r3                      ; duplicate limit
-    ldr         r12, [sp, #4]               ; load thresh
-    ldr         r2, [sp, #8]                ; load v ptr
-    vdup.u8     q2, r12                     ; duplicate thresh
-
-    sub         r3, r0, r1, lsl #2          ; move u pointer down by 4 lines
-    sub         r12, r2, r1, lsl #2         ; move v pointer down by 4 lines
-
-    vld1.u8     {d6}, [r3@64], r1              ; p3
-    vld1.u8     {d7}, [r12@64], r1             ; p3
-    vld1.u8     {d8}, [r3@64], r1              ; p2
-    vld1.u8     {d9}, [r12@64], r1             ; p2
-    vld1.u8     {d10}, [r3@64], r1             ; p1
-    vld1.u8     {d11}, [r12@64], r1            ; p1
-    vld1.u8     {d12}, [r3@64], r1             ; p0
-    vld1.u8     {d13}, [r12@64], r1            ; p0
-    vld1.u8     {d14}, [r3@64], r1             ; q0
-    vld1.u8     {d15}, [r12@64], r1            ; q0
-    vld1.u8     {d16}, [r3@64], r1             ; q1
-    vld1.u8     {d17}, [r12@64], r1            ; q1
-    vld1.u8     {d18}, [r3@64], r1             ; q2
-    vld1.u8     {d19}, [r12@64], r1            ; q2
-    vld1.u8     {d20}, [r3@64]                 ; q3
-    vld1.u8     {d21}, [r12@64]                ; q3
-
-    bl          vp8_loop_filter_neon
-
-    sub         r0, r0, r1, lsl #1
-    sub         r2, r2, r1, lsl #1
-
-    vst1.u8     {d10}, [r0@64], r1             ; store u op1
-    vst1.u8     {d11}, [r2@64], r1             ; store v op1
-    vst1.u8     {d12}, [r0@64], r1             ; store u op0
-    vst1.u8     {d13}, [r2@64], r1             ; store v op0
-    vst1.u8     {d14}, [r0@64], r1             ; store u oq0
-    vst1.u8     {d15}, [r2@64], r1             ; store v oq0
-    vst1.u8     {d16}, [r0@64]                 ; store u oq1
-    vst1.u8     {d17}, [r2@64]                 ; store v oq1
-
-    pop         {pc}
-    ENDP        ; |vp8_loop_filter_horizontal_edge_uv_neon|
-
-; void vp8_loop_filter_vertical_edge_y_neon(unsigned char *src, int pitch,
-;                                           const signed char *flimit,
-;                                           const signed char *limit,
-;                                           const signed char *thresh,
-;                                           int count)
-; r0    unsigned char *src
-; r1    int pitch
-; r2    unsigned char blimit
-; r3    unsigned char limit
-; sp    unsigned char thresh,
-
-|vp8_loop_filter_vertical_edge_y_neon| PROC
-    push        {lr}
-    vdup.u8     q0, r2                     ; duplicate blimit
-    vdup.u8     q1, r3                     ; duplicate limit
-    sub         r2, r0, #4                 ; src ptr down by 4 columns
-    add         r1, r1, r1
-    ldr         r3, [sp, #4]               ; load thresh
-    add         r12, r2, r1, asr #1
-
-    vld1.u8     {d6}, [r2], r1
-    vld1.u8     {d8}, [r12], r1
-    vld1.u8     {d10}, [r2], r1
-    vld1.u8     {d12}, [r12], r1
-    vld1.u8     {d14}, [r2], r1
-    vld1.u8     {d16}, [r12], r1
-    vld1.u8     {d18}, [r2], r1
-    vld1.u8     {d20}, [r12], r1
-
-    vld1.u8     {d7}, [r2], r1              ; load second 8-line src data
-    vld1.u8     {d9}, [r12], r1
-    vld1.u8     {d11}, [r2], r1
-    vld1.u8     {d13}, [r12], r1
-    vld1.u8     {d15}, [r2], r1
-    vld1.u8     {d17}, [r12], r1
-    vld1.u8     {d19}, [r2]
-    vld1.u8     {d21}, [r12]
-
-    ;transpose to 8x16 matrix
-    vtrn.32     q3, q7
-    vtrn.32     q4, q8
-    vtrn.32     q5, q9
-    vtrn.32     q6, q10
-
-    vdup.u8     q2, r3                     ; duplicate thresh
-
-    vtrn.16     q3, q5
-    vtrn.16     q4, q6
-    vtrn.16     q7, q9
-    vtrn.16     q8, q10
-
-    vtrn.8      q3, q4
-    vtrn.8      q5, q6
-    vtrn.8      q7, q8
-    vtrn.8      q9, q10
-
-    bl          vp8_loop_filter_neon
-
-    vswp        d12, d11
-    vswp        d16, d13
-
-    sub         r0, r0, #2                 ; dst ptr
-
-    vswp        d14, d12
-    vswp        d16, d15
-
-    add         r12, r0, r1, asr #1
-
-    ;store op1, op0, oq0, oq1
-    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
-    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r12], r1
-    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
-    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r12], r1
-    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
-    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r12], r1
-    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
-    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r12], r1
-
-    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r0], r1
-    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r12], r1
-    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r0], r1
-    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r12], r1
-    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r0], r1
-    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r12], r1
-    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r0]
-    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r12]
-
-    pop         {pc}
-    ENDP        ; |vp8_loop_filter_vertical_edge_y_neon|
-
-; void vp8_loop_filter_vertical_edge_uv_neon(unsigned char *u, int pitch
-;                                            const signed char *flimit,
-;                                            const signed char *limit,
-;                                            const signed char *thresh,
-;                                            unsigned char *v)
-; r0    unsigned char *u,
-; r1    int pitch,
-; r2    unsigned char blimit
-; r3    unsigned char limit
-; sp    unsigned char thresh,
-; sp+4  unsigned char *v
-|vp8_loop_filter_vertical_edge_uv_neon| PROC
-    push        {lr}
-    vdup.u8     q0, r2                      ; duplicate blimit
-    sub         r12, r0, #4                 ; move u pointer down by 4 columns
-    ldr         r2, [sp, #8]                ; load v ptr
-    vdup.u8     q1, r3                      ; duplicate limit
-    sub         r3, r2, #4                  ; move v pointer down by 4 columns
-
-    vld1.u8     {d6}, [r12], r1             ;load u data
-    vld1.u8     {d7}, [r3], r1              ;load v data
-    vld1.u8     {d8}, [r12], r1
-    vld1.u8     {d9}, [r3], r1
-    vld1.u8     {d10}, [r12], r1
-    vld1.u8     {d11}, [r3], r1
-    vld1.u8     {d12}, [r12], r1
-    vld1.u8     {d13}, [r3], r1
-    vld1.u8     {d14}, [r12], r1
-    vld1.u8     {d15}, [r3], r1
-    vld1.u8     {d16}, [r12], r1
-    vld1.u8     {d17}, [r3], r1
-    vld1.u8     {d18}, [r12], r1
-    vld1.u8     {d19}, [r3], r1
-    vld1.u8     {d20}, [r12]
-    vld1.u8     {d21}, [r3]
-
-    ldr        r12, [sp, #4]               ; load thresh
-
-    ;transpose to 8x16 matrix
-    vtrn.32     q3, q7
-    vtrn.32     q4, q8
-    vtrn.32     q5, q9
-    vtrn.32     q6, q10
-
-    vdup.u8     q2, r12                     ; duplicate thresh
-
-    vtrn.16     q3, q5
-    vtrn.16     q4, q6
-    vtrn.16     q7, q9
-    vtrn.16     q8, q10
-
-    vtrn.8      q3, q4
-    vtrn.8      q5, q6
-    vtrn.8      q7, q8
-    vtrn.8      q9, q10
-
-    bl          vp8_loop_filter_neon
-
-    vswp        d12, d11
-    vswp        d16, d13
-    vswp        d14, d12
-    vswp        d16, d15
-
-    sub         r0, r0, #2
-    sub         r2, r2, #2
-
-    ;store op1, op0, oq0, oq1
-    vst4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r1
-    vst4.8      {d14[0], d15[0], d16[0], d17[0]}, [r2], r1
-    vst4.8      {d10[1], d11[1], d12[1], d13[1]}, [r0], r1
-    vst4.8      {d14[1], d15[1], d16[1], d17[1]}, [r2], r1
-    vst4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r1
-    vst4.8      {d14[2], d15[2], d16[2], d17[2]}, [r2], r1
-    vst4.8      {d10[3], d11[3], d12[3], d13[3]}, [r0], r1
-    vst4.8      {d14[3], d15[3], d16[3], d17[3]}, [r2], r1
-    vst4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r1
-    vst4.8      {d14[4], d15[4], d16[4], d17[4]}, [r2], r1
-    vst4.8      {d10[5], d11[5], d12[5], d13[5]}, [r0], r1
-    vst4.8      {d14[5], d15[5], d16[5], d17[5]}, [r2], r1
-    vst4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r1
-    vst4.8      {d14[6], d15[6], d16[6], d17[6]}, [r2], r1
-    vst4.8      {d10[7], d11[7], d12[7], d13[7]}, [r0]
-    vst4.8      {d14[7], d15[7], d16[7], d17[7]}, [r2]
-
-    pop         {pc}
-    ENDP        ; |vp8_loop_filter_vertical_edge_uv_neon|
-
-; void vp8_loop_filter_neon();
-; This is a helper function for the loopfilters. The invidual functions do the
-; necessary load, transpose (if necessary) and store.
-
-; r0-r3 PRESERVE
-; q0    flimit
-; q1    limit
-; q2    thresh
-; q3    p3
-; q4    p2
-; q5    p1
-; q6    p0
-; q7    q0
-; q8    q1
-; q9    q2
-; q10   q3
-|vp8_loop_filter_neon| PROC
-
-    ; vp8_filter_mask
-    vabd.u8     q11, q3, q4                 ; abs(p3 - p2)
-    vabd.u8     q12, q4, q5                 ; abs(p2 - p1)
-    vabd.u8     q13, q5, q6                 ; abs(p1 - p0)
-    vabd.u8     q14, q8, q7                 ; abs(q1 - q0)
-    vabd.u8     q3, q9, q8                  ; abs(q2 - q1)
-    vabd.u8     q4, q10, q9                 ; abs(q3 - q2)
-
-    vmax.u8     q11, q11, q12
-    vmax.u8     q12, q13, q14
-    vmax.u8     q3, q3, q4
-    vmax.u8     q15, q11, q12
-
-    vabd.u8     q9, q6, q7                  ; abs(p0 - q0)
-
-    ; vp8_hevmask
-    vcgt.u8     q13, q13, q2                ; (abs(p1 - p0) > thresh)*-1
-    vcgt.u8     q14, q14, q2                ; (abs(q1 - q0) > thresh)*-1
-    vmax.u8     q15, q15, q3
-
-    vmov.u8     q10, #0x80                   ; 0x80
-
-    vabd.u8     q2, q5, q8                  ; a = abs(p1 - q1)
-    vqadd.u8    q9, q9, q9                  ; b = abs(p0 - q0) * 2
-
-    vcge.u8     q15, q1, q15
-
-    ; vp8_filter() function
-    ; convert to signed
-    veor        q7, q7, q10                 ; qs0
-    vshr.u8     q2, q2, #1                  ; a = a / 2
-    veor        q6, q6, q10                 ; ps0
-
-    veor        q5, q5, q10                 ; ps1
-    vqadd.u8    q9, q9, q2                  ; a = b + a
-
-    veor        q8, q8, q10                 ; qs1
-
-    vmov.u8     q10, #3                     ; #3
-
-    vsubl.s8    q2, d14, d12                ; ( qs0 - ps0)
-    vsubl.s8    q11, d15, d13
-
-    vcge.u8     q9, q0, q9                  ; (a > flimit * 2 + limit) * -1
-
-    vmovl.u8    q4, d20
-
-    vqsub.s8    q1, q5, q8                  ; vp8_filter = clamp(ps1-qs1)
-    vorr        q14, q13, q14               ; vp8_hevmask
-
-    vmul.i16    q2, q2, q4                  ; 3 * ( qs0 - ps0)
-    vmul.i16    q11, q11, q4
-
-    vand        q1, q1, q14                 ; vp8_filter &= hev
-    vand        q15, q15, q9                ; vp8_filter_mask
-
-    vaddw.s8    q2, q2, d2
-    vaddw.s8    q11, q11, d3
-
-    vmov.u8     q9, #4                      ; #4
-
-    ; vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
-    vqmovn.s16  d2, q2
-    vqmovn.s16  d3, q11
-    vand        q1, q1, q15                 ; vp8_filter &= mask
-
-    vqadd.s8    q2, q1, q10                 ; Filter2 = clamp(vp8_filter+3)
-    vqadd.s8    q1, q1, q9                  ; Filter1 = clamp(vp8_filter+4)
-    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
-    vshr.s8     q1, q1, #3                  ; Filter1 >>= 3
-
-
-    vqadd.s8    q11, q6, q2                 ; u = clamp(ps0 + Filter2)
-    vqsub.s8    q10, q7, q1                 ; u = clamp(qs0 - Filter1)
-
-    ; outer tap adjustments: ++vp8_filter >> 1
-    vrshr.s8    q1, q1, #1
-    vbic        q1, q1, q14                 ; vp8_filter &= ~hev
-    vmov.u8     q0, #0x80                   ; 0x80
-    vqadd.s8    q13, q5, q1                 ; u = clamp(ps1 + vp8_filter)
-    vqsub.s8    q12, q8, q1                 ; u = clamp(qs1 - vp8_filter)
-
-    veor        q6, q11, q0                 ; *op0 = u^0x80
-    veor        q7, q10, q0                 ; *oq0 = u^0x80
-    veor        q5, q13, q0                 ; *op1 = u^0x80
-    veor        q8, q12, q0                 ; *oq1 = u^0x80
-
-    bx          lr
-    ENDP        ; |vp8_loop_filter_horizontal_edge_y_neon|
-
-;-----------------
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfilter_neon.c
@@ -0,0 +1,550 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "./vpx_config.h"
+#include "vpx_ports/arm.h"
+
+static INLINE void vp8_loop_filter_neon(
+        uint8x16_t qblimit,  // flimit
+        uint8x16_t qlimit,   // limit
+        uint8x16_t qthresh,  // thresh
+        uint8x16_t q3,       // p3
+        uint8x16_t q4,       // p2
+        uint8x16_t q5,       // p1
+        uint8x16_t q6,       // p0
+        uint8x16_t q7,       // q0
+        uint8x16_t q8,       // q1
+        uint8x16_t q9,       // q2
+        uint8x16_t q10,      // q3
+        uint8x16_t *q5r,     // p1
+        uint8x16_t *q6r,     // p0
+        uint8x16_t *q7r,     // q0
+        uint8x16_t *q8r) {   // q1
+    uint8x16_t q0u8, q1u8, q2u8, q11u8, q12u8, q13u8, q14u8, q15u8;
+    int16x8_t q2s16, q11s16;
+    uint16x8_t q4u16;
+    int8x16_t q1s8, q2s8, q10s8, q11s8, q12s8, q13s8;
+    int8x8_t d2s8, d3s8;
+
+    q11u8 = vabdq_u8(q3, q4);
+    q12u8 = vabdq_u8(q4, q5);
+    q13u8 = vabdq_u8(q5, q6);
+    q14u8 = vabdq_u8(q8, q7);
+    q3    = vabdq_u8(q9, q8);
+    q4    = vabdq_u8(q10, q9);
+
+    q11u8 = vmaxq_u8(q11u8, q12u8);
+    q12u8 = vmaxq_u8(q13u8, q14u8);
+    q3    = vmaxq_u8(q3, q4);
+    q15u8 = vmaxq_u8(q11u8, q12u8);
+
+    q9 = vabdq_u8(q6, q7);
+
+    // vp8_hevmask
+    q13u8 = vcgtq_u8(q13u8, qthresh);
+    q14u8 = vcgtq_u8(q14u8, qthresh);
+    q15u8 = vmaxq_u8(q15u8, q3);
+
+    q2u8 = vabdq_u8(q5, q8);
+    q9 = vqaddq_u8(q9, q9);
+
+    q15u8 = vcgeq_u8(qlimit, q15u8);
+
+    // vp8_filter() function
+    // convert to signed
+    q10 = vdupq_n_u8(0x80);
+    q8 = veorq_u8(q8, q10);
+    q7 = veorq_u8(q7, q10);
+    q6 = veorq_u8(q6, q10);
+    q5 = veorq_u8(q5, q10);
+
+    q2u8 = vshrq_n_u8(q2u8, 1);
+    q9 = vqaddq_u8(q9, q2u8);
+
+    q10 = vdupq_n_u8(3);
+
+    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7)),
+                     vget_low_s8(vreinterpretq_s8_u8(q6)));
+    q11s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7)),
+                      vget_high_s8(vreinterpretq_s8_u8(q6)));
+
+    q9 = vcgeq_u8(qblimit, q9);
+
+    q1s8 = vqsubq_s8(vreinterpretq_s8_u8(q5),
+                    vreinterpretq_s8_u8(q8));
+
+    q14u8 = vorrq_u8(q13u8, q14u8);
+
+    q4u16 = vmovl_u8(vget_low_u8(q10));
+    q2s16 = vmulq_s16(q2s16, vreinterpretq_s16_u16(q4u16));
+    q11s16 = vmulq_s16(q11s16, vreinterpretq_s16_u16(q4u16));
+
+    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q14u8);
+    q15u8 = vandq_u8(q15u8, q9);
+
+    q1s8 = vreinterpretq_s8_u8(q1u8);
+    q2s16 = vaddw_s8(q2s16, vget_low_s8(q1s8));
+    q11s16 = vaddw_s8(q11s16, vget_high_s8(q1s8));
+
+    q9 = vdupq_n_u8(4);
+    // vp8_filter = clamp(vp8_filter + 3 * ( qs0 - ps0))
+    d2s8 = vqmovn_s16(q2s16);
+    d3s8 = vqmovn_s16(q11s16);
+    q1s8 = vcombine_s8(d2s8, d3s8);
+    q1u8 = vandq_u8(vreinterpretq_u8_s8(q1s8), q15u8);
+    q1s8 = vreinterpretq_s8_u8(q1u8);
+
+    q2s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q10));
+    q1s8 = vqaddq_s8(q1s8, vreinterpretq_s8_u8(q9));
+    q2s8 = vshrq_n_s8(q2s8, 3);
+    q1s8 = vshrq_n_s8(q1s8, 3);
+
+    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6), q2s8);
+    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7), q1s8);
+
+    q1s8 = vrshrq_n_s8(q1s8, 1);
+    q1s8 = vbicq_s8(q1s8, vreinterpretq_s8_u8(q14u8));
+
+    q13s8 = vqaddq_s8(vreinterpretq_s8_u8(q5), q1s8);
+    q12s8 = vqsubq_s8(vreinterpretq_s8_u8(q8), q1s8);
+
+    q0u8 = vdupq_n_u8(0x80);
+    *q8r = veorq_u8(vreinterpretq_u8_s8(q12s8), q0u8);
+    *q7r = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+    *q6r = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+    *q5r = veorq_u8(vreinterpretq_u8_s8(q13s8), q0u8);
+    return;
+}
+
+void vp8_loop_filter_horizontal_edge_y_neon(
+        unsigned char *src,
+        int pitch,
+        unsigned char blimit,
+        unsigned char limit,
+        unsigned char thresh) {
+    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+    uint8x16_t q5, q6, q7, q8, q9, q10;
+
+    qblimit = vdupq_n_u8(blimit);
+    qlimit  = vdupq_n_u8(limit);
+    qthresh = vdupq_n_u8(thresh);
+    src -= (pitch << 2);
+
+    q3 = vld1q_u8(src);
+    src += pitch;
+    q4 = vld1q_u8(src);
+    src += pitch;
+    q5 = vld1q_u8(src);
+    src += pitch;
+    q6 = vld1q_u8(src);
+    src += pitch;
+    q7 = vld1q_u8(src);
+    src += pitch;
+    q8 = vld1q_u8(src);
+    src += pitch;
+    q9 = vld1q_u8(src);
+    src += pitch;
+    q10 = vld1q_u8(src);
+
+    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
+                         q5, q6, q7, q8, q9, q10,
+                         &q5, &q6, &q7, &q8);
+
+    src -= (pitch * 5);
+    vst1q_u8(src, q5);
+    src += pitch;
+    vst1q_u8(src, q6);
+    src += pitch;
+    vst1q_u8(src, q7);
+    src += pitch;
+    vst1q_u8(src, q8);
+    return;
+}
+
+void vp8_loop_filter_horizontal_edge_uv_neon(
+        unsigned char *u,
+        int pitch,
+        unsigned char blimit,
+        unsigned char limit,
+        unsigned char thresh,
+        unsigned char *v) {
+    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+    uint8x16_t q5, q6, q7, q8, q9, q10;
+    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+
+    qblimit = vdupq_n_u8(blimit);
+    qlimit  = vdupq_n_u8(limit);
+    qthresh = vdupq_n_u8(thresh);
+
+    u -= (pitch << 2);
+    v -= (pitch << 2);
+
+    d6  = vld1_u8(u);
+    u += pitch;
+    d7  = vld1_u8(v);
+    v += pitch;
+    d8  = vld1_u8(u);
+    u += pitch;
+    d9  = vld1_u8(v);
+    v += pitch;
+    d10 = vld1_u8(u);
+    u += pitch;
+    d11 = vld1_u8(v);
+    v += pitch;
+    d12 = vld1_u8(u);
+    u += pitch;
+    d13 = vld1_u8(v);
+    v += pitch;
+    d14 = vld1_u8(u);
+    u += pitch;
+    d15 = vld1_u8(v);
+    v += pitch;
+    d16 = vld1_u8(u);
+    u += pitch;
+    d17 = vld1_u8(v);
+    v += pitch;
+    d18 = vld1_u8(u);
+    u += pitch;
+    d19 = vld1_u8(v);
+    v += pitch;
+    d20 = vld1_u8(u);
+    d21 = vld1_u8(v);
+
+    q3 = vcombine_u8(d6, d7);
+    q4 = vcombine_u8(d8, d9);
+    q5 = vcombine_u8(d10, d11);
+    q6 = vcombine_u8(d12, d13);
+    q7 = vcombine_u8(d14, d15);
+    q8 = vcombine_u8(d16, d17);
+    q9 = vcombine_u8(d18, d19);
+    q10 = vcombine_u8(d20, d21);
+
+    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
+                         q5, q6, q7, q8, q9, q10,
+                         &q5, &q6, &q7, &q8);
+
+    u -= (pitch * 5);
+    vst1_u8(u, vget_low_u8(q5));
+    u += pitch;
+    vst1_u8(u, vget_low_u8(q6));
+    u += pitch;
+    vst1_u8(u, vget_low_u8(q7));
+    u += pitch;
+    vst1_u8(u, vget_low_u8(q8));
+
+    v -= (pitch * 5);
+    vst1_u8(v, vget_high_u8(q5));
+    v += pitch;
+    vst1_u8(v, vget_high_u8(q6));
+    v += pitch;
+    vst1_u8(v, vget_high_u8(q7));
+    v += pitch;
+    vst1_u8(v, vget_high_u8(q8));
+    return;
+}
+
+static INLINE void write_4x8(unsigned char *dst, int pitch,
+                             const uint8x8x4_t result) {
+#ifdef VPX_INCOMPATIBLE_GCC
+    /*
+     * uint8x8x4_t result
+    00 01 02 03 | 04 05 06 07
+    10 11 12 13 | 14 15 16 17
+    20 21 22 23 | 24 25 26 27
+    30 31 32 33 | 34 35 36 37
+    ---
+    * after vtrn_u16
+    00 01 20 21 | 04 05 24 25
+    02 03 22 23 | 06 07 26 27
+    10 11 30 31 | 14 15 34 35
+    12 13 32 33 | 16 17 36 37
+    ---
+    * after vtrn_u8
+    00 10 20 30 | 04 14 24 34
+    01 11 21 31 | 05 15 25 35
+    02 12 22 32 | 06 16 26 36
+    03 13 23 33 | 07 17 27 37
+    */
+    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[0]),
+                                          vreinterpret_u16_u8(result.val[2]));
+    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u8(result.val[1]),
+                                          vreinterpret_u16_u8(result.val[3]));
+    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
+                                       vreinterpret_u8_u16(r13_u16.val[0]));
+    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
+                                       vreinterpret_u8_u16(r13_u16.val[1]));
+    const uint32x2_t x_0_4 = vreinterpret_u32_u8(r01_u8.val[0]);
+    const uint32x2_t x_1_5 = vreinterpret_u32_u8(r01_u8.val[1]);
+    const uint32x2_t x_2_6 = vreinterpret_u32_u8(r23_u8.val[0]);
+    const uint32x2_t x_3_7 = vreinterpret_u32_u8(r23_u8.val[1]);
+    vst1_lane_u32((uint32_t *)dst, x_0_4, 0);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_1_5, 0);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_2_6, 0);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_3_7, 0);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_0_4, 1);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_1_5, 1);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_2_6, 1);
+    dst += pitch;
+    vst1_lane_u32((uint32_t *)dst, x_3_7, 1);
+#else
+    vst4_lane_u8(dst, result, 0);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 1);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 2);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 3);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 4);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 5);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 6);
+    dst += pitch;
+    vst4_lane_u8(dst, result, 7);
+#endif  // VPX_INCOMPATIBLE_GCC
+}
+
+void vp8_loop_filter_vertical_edge_y_neon(
+        unsigned char *src,
+        int pitch,
+        unsigned char blimit,
+        unsigned char limit,
+        unsigned char thresh) {
+    unsigned char *s, *d;
+    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+    uint8x16_t q5, q6, q7, q8, q9, q10;
+    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+    uint8x8x4_t q4ResultH, q4ResultL;
+
+    qblimit = vdupq_n_u8(blimit);
+    qlimit  = vdupq_n_u8(limit);
+    qthresh = vdupq_n_u8(thresh);
+
+    s = src - 4;
+    d6  = vld1_u8(s);
+    s += pitch;
+    d8  = vld1_u8(s);
+    s += pitch;
+    d10 = vld1_u8(s);
+    s += pitch;
+    d12 = vld1_u8(s);
+    s += pitch;
+    d14 = vld1_u8(s);
+    s += pitch;
+    d16 = vld1_u8(s);
+    s += pitch;
+    d18 = vld1_u8(s);
+    s += pitch;
+    d20 = vld1_u8(s);
+    s += pitch;
+    d7  = vld1_u8(s);
+    s += pitch;
+    d9  = vld1_u8(s);
+    s += pitch;
+    d11 = vld1_u8(s);
+    s += pitch;
+    d13 = vld1_u8(s);
+    s += pitch;
+    d15 = vld1_u8(s);
+    s += pitch;
+    d17 = vld1_u8(s);
+    s += pitch;
+    d19 = vld1_u8(s);
+    s += pitch;
+    d21 = vld1_u8(s);
+
+    q3 = vcombine_u8(d6, d7);
+    q4 = vcombine_u8(d8, d9);
+    q5 = vcombine_u8(d10, d11);
+    q6 = vcombine_u8(d12, d13);
+    q7 = vcombine_u8(d14, d15);
+    q8 = vcombine_u8(d16, d17);
+    q9 = vcombine_u8(d18, d19);
+    q10 = vcombine_u8(d20, d21);
+
+    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+                       vreinterpretq_u16_u32(q2tmp2.val[0]));
+    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+                       vreinterpretq_u16_u32(q2tmp3.val[0]));
+    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+                       vreinterpretq_u16_u32(q2tmp2.val[1]));
+    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+                       vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+                       vreinterpretq_u8_u16(q2tmp5.val[0]));
+    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+                       vreinterpretq_u8_u16(q2tmp5.val[1]));
+    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+                       vreinterpretq_u8_u16(q2tmp7.val[0]));
+    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+                       vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+    q3 = q2tmp8.val[0];
+    q4 = q2tmp8.val[1];
+    q5 = q2tmp9.val[0];
+    q6 = q2tmp9.val[1];
+    q7 = q2tmp10.val[0];
+    q8 = q2tmp10.val[1];
+    q9 = q2tmp11.val[0];
+    q10 = q2tmp11.val[1];
+
+    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
+                         q5, q6, q7, q8, q9, q10,
+                         &q5, &q6, &q7, &q8);
+
+    q4ResultL.val[0] = vget_low_u8(q5);   // d10
+    q4ResultL.val[1] = vget_low_u8(q6);   // d12
+    q4ResultL.val[2] = vget_low_u8(q7);   // d14
+    q4ResultL.val[3] = vget_low_u8(q8);   // d16
+    q4ResultH.val[0] = vget_high_u8(q5);  // d11
+    q4ResultH.val[1] = vget_high_u8(q6);  // d13
+    q4ResultH.val[2] = vget_high_u8(q7);  // d15
+    q4ResultH.val[3] = vget_high_u8(q8);  // d17
+
+    d = src - 2;
+    write_4x8(d, pitch, q4ResultL);
+    d += pitch * 8;
+    write_4x8(d, pitch, q4ResultH);
+}
+
+void vp8_loop_filter_vertical_edge_uv_neon(
+        unsigned char *u,
+        int pitch,
+        unsigned char blimit,
+        unsigned char limit,
+        unsigned char thresh,
+        unsigned char *v) {
+    unsigned char *us, *ud;
+    unsigned char *vs, *vd;
+    uint8x16_t qblimit, qlimit, qthresh, q3, q4;
+    uint8x16_t q5, q6, q7, q8, q9, q10;
+    uint8x8_t d6, d7, d8, d9, d10, d11, d12, d13, d14;
+    uint8x8_t d15, d16, d17, d18, d19, d20, d21;
+    uint32x4x2_t q2tmp0, q2tmp1, q2tmp2, q2tmp3;
+    uint16x8x2_t q2tmp4, q2tmp5, q2tmp6, q2tmp7;
+    uint8x16x2_t q2tmp8, q2tmp9, q2tmp10, q2tmp11;
+    uint8x8x4_t q4ResultH, q4ResultL;
+
+    qblimit = vdupq_n_u8(blimit);
+    qlimit  = vdupq_n_u8(limit);
+    qthresh = vdupq_n_u8(thresh);
+
+    us = u - 4;
+    d6 = vld1_u8(us);
+    us += pitch;
+    d8 = vld1_u8(us);
+    us += pitch;
+    d10 = vld1_u8(us);
+    us += pitch;
+    d12 = vld1_u8(us);
+    us += pitch;
+    d14 = vld1_u8(us);
+    us += pitch;
+    d16 = vld1_u8(us);
+    us += pitch;
+    d18 = vld1_u8(us);
+    us += pitch;
+    d20 = vld1_u8(us);
+
+    vs = v - 4;
+    d7 = vld1_u8(vs);
+    vs += pitch;
+    d9 = vld1_u8(vs);
+    vs += pitch;
+    d11 = vld1_u8(vs);
+    vs += pitch;
+    d13 = vld1_u8(vs);
+    vs += pitch;
+    d15 = vld1_u8(vs);
+    vs += pitch;
+    d17 = vld1_u8(vs);
+    vs += pitch;
+    d19 = vld1_u8(vs);
+    vs += pitch;
+    d21 = vld1_u8(vs);
+
+    q3 = vcombine_u8(d6, d7);
+    q4 = vcombine_u8(d8, d9);
+    q5 = vcombine_u8(d10, d11);
+    q6 = vcombine_u8(d12, d13);
+    q7 = vcombine_u8(d14, d15);
+    q8 = vcombine_u8(d16, d17);
+    q9 = vcombine_u8(d18, d19);
+    q10 = vcombine_u8(d20, d21);
+
+    q2tmp0 = vtrnq_u32(vreinterpretq_u32_u8(q3), vreinterpretq_u32_u8(q7));
+    q2tmp1 = vtrnq_u32(vreinterpretq_u32_u8(q4), vreinterpretq_u32_u8(q8));
+    q2tmp2 = vtrnq_u32(vreinterpretq_u32_u8(q5), vreinterpretq_u32_u8(q9));
+    q2tmp3 = vtrnq_u32(vreinterpretq_u32_u8(q6), vreinterpretq_u32_u8(q10));
+
+    q2tmp4 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[0]),
+                       vreinterpretq_u16_u32(q2tmp2.val[0]));
+    q2tmp5 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[0]),
+                       vreinterpretq_u16_u32(q2tmp3.val[0]));
+    q2tmp6 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp0.val[1]),
+                       vreinterpretq_u16_u32(q2tmp2.val[1]));
+    q2tmp7 = vtrnq_u16(vreinterpretq_u16_u32(q2tmp1.val[1]),
+                       vreinterpretq_u16_u32(q2tmp3.val[1]));
+
+    q2tmp8  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[0]),
+                       vreinterpretq_u8_u16(q2tmp5.val[0]));
+    q2tmp9  = vtrnq_u8(vreinterpretq_u8_u16(q2tmp4.val[1]),
+                       vreinterpretq_u8_u16(q2tmp5.val[1]));
+    q2tmp10 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[0]),
+                       vreinterpretq_u8_u16(q2tmp7.val[0]));
+    q2tmp11 = vtrnq_u8(vreinterpretq_u8_u16(q2tmp6.val[1]),
+                       vreinterpretq_u8_u16(q2tmp7.val[1]));
+
+    q3 = q2tmp8.val[0];
+    q4 = q2tmp8.val[1];
+    q5 = q2tmp9.val[0];
+    q6 = q2tmp9.val[1];
+    q7 = q2tmp10.val[0];
+    q8 = q2tmp10.val[1];
+    q9 = q2tmp11.val[0];
+    q10 = q2tmp11.val[1];
+
+    vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4,
+                         q5, q6, q7, q8, q9, q10,
+                         &q5, &q6, &q7, &q8);
+
+    q4ResultL.val[0] = vget_low_u8(q5);   // d10
+    q4ResultL.val[1] = vget_low_u8(q6);   // d12
+    q4ResultL.val[2] = vget_low_u8(q7);   // d14
+    q4ResultL.val[3] = vget_low_u8(q8);   // d16
+    ud = u - 2;
+    write_4x8(ud, pitch, q4ResultL);
+
+    q4ResultH.val[0] = vget_high_u8(q5);  // d11
+    q4ResultH.val[1] = vget_high_u8(q6);  // d13
+    q4ResultH.val[2] = vget_high_u8(q7);  // d15
+    q4ResultH.val[3] = vget_high_u8(q8);  // d17
+    vd = v - 2;
+    write_4x8(vd, pitch, q4ResultH);
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
+++ /dev/null
@@ -1,117 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    ;EXPORT  |vp8_loop_filter_simple_horizontal_edge_neon|
-    EXPORT  |vp8_loop_filter_bhs_neon|
-    EXPORT  |vp8_loop_filter_mbhs_neon|
-    ARM
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0    unsigned char *s, PRESERVE
-; r1    int p, PRESERVE
-; q1    limit, PRESERVE
-
-|vp8_loop_filter_simple_horizontal_edge_neon| PROC
-
-    sub         r3, r0, r1, lsl #1          ; move src pointer down by 2 lines
-
-    vld1.u8     {q7}, [r0@128], r1          ; q0
-    vld1.u8     {q5}, [r3@128], r1          ; p0
-    vld1.u8     {q8}, [r0@128]              ; q1
-    vld1.u8     {q6}, [r3@128]              ; p1
-
-    vabd.u8     q15, q6, q7                 ; abs(p0 - q0)
-    vabd.u8     q14, q5, q8                 ; abs(p1 - q1)
-
-    vqadd.u8    q15, q15, q15               ; abs(p0 - q0) * 2
-    vshr.u8     q14, q14, #1                ; abs(p1 - q1) / 2
-    vmov.u8     q0, #0x80                   ; 0x80
-    vmov.s16    q13, #3
-    vqadd.u8    q15, q15, q14               ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
-
-    veor        q7, q7, q0                  ; qs0: q0 offset to convert to a signed value
-    veor        q6, q6, q0                  ; ps0: p0 offset to convert to a signed value
-    veor        q5, q5, q0                  ; ps1: p1 offset to convert to a signed value
-    veor        q8, q8, q0                  ; qs1: q1 offset to convert to a signed value
-
-    vcge.u8     q15, q1, q15                ; (abs(p0 - q0)*2 + abs(p1-q1)/2 > limit)*-1
-
-    vsubl.s8    q2, d14, d12                ; ( qs0 - ps0)
-    vsubl.s8    q3, d15, d13
-
-    vqsub.s8    q4, q5, q8                  ; q4: vp8_filter = vp8_signed_char_clamp(ps1-qs1)
-
-    vmul.s16    q2, q2, q13                 ;  3 * ( qs0 - ps0)
-    vmul.s16    q3, q3, q13
-
-    vmov.u8     q10, #0x03                  ; 0x03
-    vmov.u8     q9, #0x04                   ; 0x04
-
-    vaddw.s8    q2, q2, d8                  ; vp8_filter + 3 * ( qs0 - ps0)
-    vaddw.s8    q3, q3, d9
-
-    vqmovn.s16  d8, q2                      ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
-    vqmovn.s16  d9, q3
-
-    vand        q14, q4, q15                ; vp8_filter &= mask
-
-    vqadd.s8    q2, q14, q10                ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
-    vqadd.s8    q3, q14, q9                 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4)
-    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
-    vshr.s8     q4, q3, #3                  ; Filter1 >>= 3
-
-    sub         r0, r0, r1
-
-    ;calculate output
-    vqadd.s8    q11, q6, q2                 ; u = vp8_signed_char_clamp(ps0 + Filter2)
-    vqsub.s8    q10, q7, q4                 ; u = vp8_signed_char_clamp(qs0 - Filter1)
-
-    veor        q6, q11, q0                 ; *op0 = u^0x80
-    veor        q7, q10, q0                 ; *oq0 = u^0x80
-
-    vst1.u8     {q6}, [r3@128]              ; store op0
-    vst1.u8     {q7}, [r0@128]              ; store oq0
-
-    bx          lr
-    ENDP        ; |vp8_loop_filter_simple_horizontal_edge_neon|
-
-; r0    unsigned char *y
-; r1    int ystride
-; r2    const unsigned char *blimit
-
-|vp8_loop_filter_bhs_neon| PROC
-    push        {r4, lr}
-    ldrb        r3, [r2]                    ; load blim from mem
-    vdup.s8     q1, r3                      ; duplicate blim
-
-    add         r0, r0, r1, lsl #2          ; src = y_ptr + 4 * y_stride
-    bl          vp8_loop_filter_simple_horizontal_edge_neon
-    ; vp8_loop_filter_simple_horizontal_edge_neon preserves r0, r1 and q1
-    add         r0, r0, r1, lsl #2          ; src = y_ptr + 8* y_stride
-    bl          vp8_loop_filter_simple_horizontal_edge_neon
-    add         r0, r0, r1, lsl #2          ; src = y_ptr + 12 * y_stride
-    pop         {r4, lr}
-    b           vp8_loop_filter_simple_horizontal_edge_neon
-    ENDP        ;|vp8_loop_filter_bhs_neon|
-
-; r0    unsigned char *y
-; r1    int ystride
-; r2    const unsigned char *blimit
-
-|vp8_loop_filter_mbhs_neon| PROC
-    ldrb        r3, [r2]                   ; load blim from mem
-    vdup.s8     q1, r3                     ; duplicate mblim
-    b           vp8_loop_filter_simple_horizontal_edge_neon
-    ENDP        ;|vp8_loop_filter_bhs_neon|
-
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c
@@ -0,0 +1,111 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "./vpx_config.h"
+
+static INLINE void vp8_loop_filter_simple_horizontal_edge_neon(
+        unsigned char *s,
+        int p,
+        const unsigned char *blimit) {
+    uint8_t *sp;
+    uint8x16_t qblimit, q0u8;
+    uint8x16_t q5u8, q6u8, q7u8, q8u8, q9u8, q10u8, q14u8, q15u8;
+    int16x8_t q2s16, q3s16, q13s16;
+    int8x8_t d8s8, d9s8;
+    int8x16_t q2s8, q3s8, q4s8, q10s8, q11s8, q14s8;
+
+    qblimit = vdupq_n_u8(*blimit);
+
+    sp = s - (p << 1);
+    q5u8 = vld1q_u8(sp);
+    sp += p;
+    q6u8 = vld1q_u8(sp);
+    sp += p;
+    q7u8 = vld1q_u8(sp);
+    sp += p;
+    q8u8 = vld1q_u8(sp);
+
+    q15u8 = vabdq_u8(q6u8, q7u8);
+    q14u8 = vabdq_u8(q5u8, q8u8);
+
+    q15u8 = vqaddq_u8(q15u8, q15u8);
+    q14u8 = vshrq_n_u8(q14u8, 1);
+    q0u8 = vdupq_n_u8(0x80);
+    q13s16 = vdupq_n_s16(3);
+    q15u8 = vqaddq_u8(q15u8, q14u8);
+
+    q5u8 = veorq_u8(q5u8, q0u8);
+    q6u8 = veorq_u8(q6u8, q0u8);
+    q7u8 = veorq_u8(q7u8, q0u8);
+    q8u8 = veorq_u8(q8u8, q0u8);
+
+    q15u8 = vcgeq_u8(qblimit, q15u8);
+
+    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q7u8)),
+                     vget_low_s8(vreinterpretq_s8_u8(q6u8)));
+    q3s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q7u8)),
+                     vget_high_s8(vreinterpretq_s8_u8(q6u8)));
+
+    q4s8 = vqsubq_s8(vreinterpretq_s8_u8(q5u8),
+                     vreinterpretq_s8_u8(q8u8));
+
+    q2s16 = vmulq_s16(q2s16, q13s16);
+    q3s16 = vmulq_s16(q3s16, q13s16);
+
+    q10u8 = vdupq_n_u8(3);
+    q9u8 = vdupq_n_u8(4);
+
+    q2s16 = vaddw_s8(q2s16, vget_low_s8(q4s8));
+    q3s16 = vaddw_s8(q3s16, vget_high_s8(q4s8));
+
+    d8s8 = vqmovn_s16(q2s16);
+    d9s8 = vqmovn_s16(q3s16);
+    q4s8 = vcombine_s8(d8s8, d9s8);
+
+    q14s8 = vandq_s8(q4s8, vreinterpretq_s8_u8(q15u8));
+
+    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q10u8));
+    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q9u8));
+    q2s8 = vshrq_n_s8(q2s8, 3);
+    q3s8 = vshrq_n_s8(q3s8, 3);
+
+    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q6u8), q2s8);
+    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q7u8), q3s8);
+
+    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+
+    vst1q_u8(s, q7u8);
+    s -= p;
+    vst1q_u8(s, q6u8);
+    return;
+}
+
+void vp8_loop_filter_bhs_neon(
+        unsigned char *y_ptr,
+        int y_stride,
+        const unsigned char *blimit) {
+    y_ptr += y_stride * 4;
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+    y_ptr += y_stride * 4;
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+    y_ptr += y_stride * 4;
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+    return;
+}
+
+void vp8_loop_filter_mbhs_neon(
+        unsigned char *y_ptr,
+        int y_stride,
+        const unsigned char *blimit) {
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, blimit);
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
+++ /dev/null
@@ -1,154 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    ;EXPORT  |vp8_loop_filter_simple_vertical_edge_neon|
-    EXPORT |vp8_loop_filter_bvs_neon|
-    EXPORT |vp8_loop_filter_mbvs_neon|
-    ARM
-    PRESERVE8
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0    unsigned char *s, PRESERVE
-; r1    int p, PRESERVE
-; q1    limit, PRESERVE
-
-|vp8_loop_filter_simple_vertical_edge_neon| PROC
-    sub         r0, r0, #2                  ; move src pointer down by 2 columns
-    add         r12, r1, r1
-    add         r3, r0, r1
-
-    vld4.8      {d6[0], d7[0], d8[0], d9[0]}, [r0], r12
-    vld4.8      {d6[1], d7[1], d8[1], d9[1]}, [r3], r12
-    vld4.8      {d6[2], d7[2], d8[2], d9[2]}, [r0], r12
-    vld4.8      {d6[3], d7[3], d8[3], d9[3]}, [r3], r12
-    vld4.8      {d6[4], d7[4], d8[4], d9[4]}, [r0], r12
-    vld4.8      {d6[5], d7[5], d8[5], d9[5]}, [r3], r12
-    vld4.8      {d6[6], d7[6], d8[6], d9[6]}, [r0], r12
-    vld4.8      {d6[7], d7[7], d8[7], d9[7]}, [r3], r12
-
-    vld4.8      {d10[0], d11[0], d12[0], d13[0]}, [r0], r12
-    vld4.8      {d10[1], d11[1], d12[1], d13[1]}, [r3], r12
-    vld4.8      {d10[2], d11[2], d12[2], d13[2]}, [r0], r12
-    vld4.8      {d10[3], d11[3], d12[3], d13[3]}, [r3], r12
-    vld4.8      {d10[4], d11[4], d12[4], d13[4]}, [r0], r12
-    vld4.8      {d10[5], d11[5], d12[5], d13[5]}, [r3], r12
-    vld4.8      {d10[6], d11[6], d12[6], d13[6]}, [r0], r12
-    vld4.8      {d10[7], d11[7], d12[7], d13[7]}, [r3]
-
-    vswp        d7, d10
-    vswp        d12, d9
-
-    ;vp8_filter_mask() function
-    ;vp8_hevmask() function
-    sub         r0, r0, r1, lsl #4
-    vabd.u8     q15, q5, q4                 ; abs(p0 - q0)
-    vabd.u8     q14, q3, q6                 ; abs(p1 - q1)
-
-    vqadd.u8    q15, q15, q15               ; abs(p0 - q0) * 2
-    vshr.u8     q14, q14, #1                ; abs(p1 - q1) / 2
-    vmov.u8     q0, #0x80                   ; 0x80
-    vmov.s16    q11, #3
-    vqadd.u8    q15, q15, q14               ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2
-
-    veor        q4, q4, q0                  ; qs0: q0 offset to convert to a signed value
-    veor        q5, q5, q0                  ; ps0: p0 offset to convert to a signed value
-    veor        q3, q3, q0                  ; ps1: p1 offset to convert to a signed value
-    veor        q6, q6, q0                  ; qs1: q1 offset to convert to a signed value
-
-    vcge.u8     q15, q1, q15                ; abs(p0 - q0)*2 + abs(p1-q1)/2 > flimit*2 + limit)*-1
-
-    vsubl.s8    q2, d8, d10                 ; ( qs0 - ps0)
-    vsubl.s8    q13, d9, d11
-
-    vqsub.s8    q14, q3, q6                  ; vp8_filter = vp8_signed_char_clamp(ps1-qs1)
-
-    vmul.s16    q2, q2, q11                 ;  3 * ( qs0 - ps0)
-    vmul.s16    q13, q13, q11
-
-    vmov.u8     q11, #0x03                  ; 0x03
-    vmov.u8     q12, #0x04                  ; 0x04
-
-    vaddw.s8    q2, q2, d28                  ; vp8_filter + 3 * ( qs0 - ps0)
-    vaddw.s8    q13, q13, d29
-
-    vqmovn.s16  d28, q2                      ; vp8_filter = vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
-    vqmovn.s16  d29, q13
-
-    add         r0, r0, #1
-    add         r3, r0, r1
-
-    vand        q14, q14, q15                 ; vp8_filter &= mask
-
-    vqadd.s8    q2, q14, q11                 ; Filter2 = vp8_signed_char_clamp(vp8_filter+3)
-    vqadd.s8    q3, q14, q12                 ; Filter1 = vp8_signed_char_clamp(vp8_filter+4)
-    vshr.s8     q2, q2, #3                  ; Filter2 >>= 3
-    vshr.s8     q14, q3, #3                  ; Filter1 >>= 3
-
-    ;calculate output
-    vqadd.s8    q11, q5, q2                 ; u = vp8_signed_char_clamp(ps0 + Filter2)
-    vqsub.s8    q10, q4, q14                 ; u = vp8_signed_char_clamp(qs0 - Filter1)
-
-    veor        q6, q11, q0                 ; *op0 = u^0x80
-    veor        q7, q10, q0                 ; *oq0 = u^0x80
-    add         r12, r1, r1
-    vswp        d13, d14
-
-    ;store op1, op0, oq0, oq1
-    vst2.8      {d12[0], d13[0]}, [r0], r12
-    vst2.8      {d12[1], d13[1]}, [r3], r12
-    vst2.8      {d12[2], d13[2]}, [r0], r12
-    vst2.8      {d12[3], d13[3]}, [r3], r12
-    vst2.8      {d12[4], d13[4]}, [r0], r12
-    vst2.8      {d12[5], d13[5]}, [r3], r12
-    vst2.8      {d12[6], d13[6]}, [r0], r12
-    vst2.8      {d12[7], d13[7]}, [r3], r12
-    vst2.8      {d14[0], d15[0]}, [r0], r12
-    vst2.8      {d14[1], d15[1]}, [r3], r12
-    vst2.8      {d14[2], d15[2]}, [r0], r12
-    vst2.8      {d14[3], d15[3]}, [r3], r12
-    vst2.8      {d14[4], d15[4]}, [r0], r12
-    vst2.8      {d14[5], d15[5]}, [r3], r12
-    vst2.8      {d14[6], d15[6]}, [r0], r12
-    vst2.8      {d14[7], d15[7]}, [r3]
-
-    bx          lr
-    ENDP        ; |vp8_loop_filter_simple_vertical_edge_neon|
-
-; r0    unsigned char *y
-; r1    int ystride
-; r2    const unsigned char *blimit
-
-|vp8_loop_filter_bvs_neon| PROC
-    push        {r4, lr}
-    ldrb        r3, [r2]                   ; load blim from mem
-    mov         r4, r0
-    add         r0, r0, #4
-    vdup.s8     q1, r3                     ; duplicate blim
-    bl          vp8_loop_filter_simple_vertical_edge_neon
-    ; vp8_loop_filter_simple_vertical_edge_neon preserves  r1 and q1
-    add         r0, r4, #8
-    bl          vp8_loop_filter_simple_vertical_edge_neon
-    add         r0, r4, #12
-    pop         {r4, lr}
-    b           vp8_loop_filter_simple_vertical_edge_neon
-    ENDP        ;|vp8_loop_filter_bvs_neon|
-
-; r0    unsigned char *y
-; r1    int ystride
-; r2    const unsigned char *blimit
-
-|vp8_loop_filter_mbvs_neon| PROC
-    ldrb        r3, [r2]                   ; load mblim from mem
-    vdup.s8     q1, r3                     ; duplicate mblim
-    b           vp8_loop_filter_simple_vertical_edge_neon
-    ENDP        ;|vp8_loop_filter_bvs_neon|
-    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c
@@ -0,0 +1,280 @@
+/*
+ *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <arm_neon.h>
+#include "./vpx_config.h"
+#include "vpx_ports/arm.h"
+
+#ifdef VPX_INCOMPATIBLE_GCC
+static INLINE void write_2x4(unsigned char *dst, int pitch,
+                             const uint8x8x2_t result) {
+    /*
+     * uint8x8x2_t result
+    00 01 02 03 | 04 05 06 07
+    10 11 12 13 | 14 15 16 17
+    ---
+    * after vtrn_u8
+    00 10 02 12 | 04 14 06 16
+    01 11 03 13 | 05 15 07 17
+    */
+    const uint8x8x2_t r01_u8 = vtrn_u8(result.val[0],
+                                       result.val[1]);
+    const uint16x4_t x_0_4 = vreinterpret_u16_u8(r01_u8.val[0]);
+    const uint16x4_t x_1_5 = vreinterpret_u16_u8(r01_u8.val[1]);
+    vst1_lane_u16((uint16_t *)dst, x_0_4, 0);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_1_5, 0);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_0_4, 1);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_1_5, 1);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_0_4, 2);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_1_5, 2);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_0_4, 3);
+    dst += pitch;
+    vst1_lane_u16((uint16_t *)dst, x_1_5, 3);
+}
+
+static INLINE void write_2x8(unsigned char *dst, int pitch,
+                             const uint8x8x2_t result,
+                             const uint8x8x2_t result2) {
+  write_2x4(dst, pitch, result);
+  dst += pitch * 8;
+  write_2x4(dst, pitch, result2);
+}
+#else
+static INLINE void write_2x8(unsigned char *dst, int pitch,
+                             const uint8x8x2_t result,
+                             const uint8x8x2_t result2) {
+  vst2_lane_u8(dst, result, 0);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 1);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 2);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 3);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 4);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 5);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 6);
+  dst += pitch;
+  vst2_lane_u8(dst, result, 7);
+  dst += pitch;
+
+  vst2_lane_u8(dst, result2, 0);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 1);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 2);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 3);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 4);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 5);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 6);
+  dst += pitch;
+  vst2_lane_u8(dst, result2, 7);
+}
+#endif  // VPX_INCOMPATIBLE_GCC
+
+
+#ifdef VPX_INCOMPATIBLE_GCC
+static INLINE
+uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
+    const uint8x8_t a = vld1_u8(src);
+    const uint8x8_t b = vld1_u8(src + pitch * 1);
+    const uint8x8_t c = vld1_u8(src + pitch * 2);
+    const uint8x8_t d = vld1_u8(src + pitch * 3);
+    const uint8x8_t e = vld1_u8(src + pitch * 4);
+    const uint8x8_t f = vld1_u8(src + pitch * 5);
+    const uint8x8_t g = vld1_u8(src + pitch * 6);
+    const uint8x8_t h = vld1_u8(src + pitch * 7);
+    const uint32x2x2_t r04_u32 = vtrn_u32(vreinterpret_u32_u8(a),
+                                          vreinterpret_u32_u8(e));
+    const uint32x2x2_t r15_u32 = vtrn_u32(vreinterpret_u32_u8(b),
+                                          vreinterpret_u32_u8(f));
+    const uint32x2x2_t r26_u32 = vtrn_u32(vreinterpret_u32_u8(c),
+                                          vreinterpret_u32_u8(g));
+    const uint32x2x2_t r37_u32 = vtrn_u32(vreinterpret_u32_u8(d),
+                                          vreinterpret_u32_u8(h));
+    const uint16x4x2_t r02_u16 = vtrn_u16(vreinterpret_u16_u32(r04_u32.val[0]),
+                                          vreinterpret_u16_u32(r26_u32.val[0]));
+    const uint16x4x2_t r13_u16 = vtrn_u16(vreinterpret_u16_u32(r15_u32.val[0]),
+                                          vreinterpret_u16_u32(r37_u32.val[0]));
+    const uint8x8x2_t r01_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[0]),
+                                       vreinterpret_u8_u16(r13_u16.val[0]));
+    const uint8x8x2_t r23_u8 = vtrn_u8(vreinterpret_u8_u16(r02_u16.val[1]),
+                                       vreinterpret_u8_u16(r13_u16.val[1]));
+    /*
+     * after vtrn_u32
+    00 01 02 03 | 40 41 42 43
+    10 11 12 13 | 50 51 52 53
+    20 21 22 23 | 60 61 62 63
+    30 31 32 33 | 70 71 72 73
+    ---
+    * after vtrn_u16
+    00 01 20 21 | 40 41 60 61
+    02 03 22 23 | 42 43 62 63
+    10 11 30 31 | 50 51 70 71
+    12 13 32 33 | 52 52 72 73
+
+    00 01 20 21 | 40 41 60 61
+    10 11 30 31 | 50 51 70 71
+    02 03 22 23 | 42 43 62 63
+    12 13 32 33 | 52 52 72 73
+    ---
+    * after vtrn_u8
+    00 10 20 30 | 40 50 60 70
+    01 11 21 31 | 41 51 61 71
+    02 12 22 32 | 42 52 62 72
+    03 13 23 33 | 43 53 63 73
+    */
+    x.val[0] = r01_u8.val[0];
+    x.val[1] = r01_u8.val[1];
+    x.val[2] = r23_u8.val[0];
+    x.val[3] = r23_u8.val[1];
+
+    return x;
+}
+#else
+static INLINE
+uint8x8x4_t read_4x8(unsigned char *src, int pitch, uint8x8x4_t x) {
+    x = vld4_lane_u8(src, x, 0);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 1);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 2);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 3);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 4);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 5);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 6);
+    src += pitch;
+    x = vld4_lane_u8(src, x, 7);
+    return x;
+}
+#endif  // VPX_INCOMPATIBLE_GCC
+
+static INLINE void vp8_loop_filter_simple_vertical_edge_neon(
+        unsigned char *s,
+        int p,
+        const unsigned char *blimit) {
+    unsigned char *src1;
+    uint8x16_t qblimit, q0u8;
+    uint8x16_t q3u8, q4u8, q5u8, q6u8, q7u8, q11u8, q12u8, q14u8, q15u8;
+    int16x8_t q2s16, q13s16, q11s16;
+    int8x8_t d28s8, d29s8;
+    int8x16_t q2s8, q3s8, q10s8, q11s8, q14s8;
+    uint8x8x4_t d0u8x4;  // d6, d7, d8, d9
+    uint8x8x4_t d1u8x4;  // d10, d11, d12, d13
+    uint8x8x2_t d2u8x2;  // d12, d13
+    uint8x8x2_t d3u8x2;  // d14, d15
+
+    qblimit = vdupq_n_u8(*blimit);
+
+    src1 = s - 2;
+    d0u8x4 = read_4x8(src1, p, d0u8x4);
+    src1 += p * 8;
+    d1u8x4 = read_4x8(src1, p, d1u8x4);
+
+    q3u8 = vcombine_u8(d0u8x4.val[0], d1u8x4.val[0]);  // d6 d10
+    q4u8 = vcombine_u8(d0u8x4.val[2], d1u8x4.val[2]);  // d8 d12
+    q5u8 = vcombine_u8(d0u8x4.val[1], d1u8x4.val[1]);  // d7 d11
+    q6u8 = vcombine_u8(d0u8x4.val[3], d1u8x4.val[3]);  // d9 d13
+
+    q15u8 = vabdq_u8(q5u8, q4u8);
+    q14u8 = vabdq_u8(q3u8, q6u8);
+
+    q15u8 = vqaddq_u8(q15u8, q15u8);
+    q14u8 = vshrq_n_u8(q14u8, 1);
+    q0u8 = vdupq_n_u8(0x80);
+    q11s16 = vdupq_n_s16(3);
+    q15u8 = vqaddq_u8(q15u8, q14u8);
+
+    q3u8 = veorq_u8(q3u8, q0u8);
+    q4u8 = veorq_u8(q4u8, q0u8);
+    q5u8 = veorq_u8(q5u8, q0u8);
+    q6u8 = veorq_u8(q6u8, q0u8);
+
+    q15u8 = vcgeq_u8(qblimit, q15u8);
+
+    q2s16 = vsubl_s8(vget_low_s8(vreinterpretq_s8_u8(q4u8)),
+                     vget_low_s8(vreinterpretq_s8_u8(q5u8)));
+    q13s16 = vsubl_s8(vget_high_s8(vreinterpretq_s8_u8(q4u8)),
+                      vget_high_s8(vreinterpretq_s8_u8(q5u8)));
+
+    q14s8 = vqsubq_s8(vreinterpretq_s8_u8(q3u8),
+                      vreinterpretq_s8_u8(q6u8));
+
+    q2s16 = vmulq_s16(q2s16, q11s16);
+    q13s16 = vmulq_s16(q13s16, q11s16);
+
+    q11u8 = vdupq_n_u8(3);
+    q12u8 = vdupq_n_u8(4);
+
+    q2s16 = vaddw_s8(q2s16, vget_low_s8(q14s8));
+    q13s16 = vaddw_s8(q13s16, vget_high_s8(q14s8));
+
+    d28s8 = vqmovn_s16(q2s16);
+    d29s8 = vqmovn_s16(q13s16);
+    q14s8 = vcombine_s8(d28s8, d29s8);
+
+    q14s8 = vandq_s8(q14s8, vreinterpretq_s8_u8(q15u8));
+
+    q2s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q11u8));
+    q3s8 = vqaddq_s8(q14s8, vreinterpretq_s8_u8(q12u8));
+    q2s8 = vshrq_n_s8(q2s8, 3);
+    q14s8 = vshrq_n_s8(q3s8, 3);
+
+    q11s8 = vqaddq_s8(vreinterpretq_s8_u8(q5u8), q2s8);
+    q10s8 = vqsubq_s8(vreinterpretq_s8_u8(q4u8), q14s8);
+
+    q6u8 = veorq_u8(vreinterpretq_u8_s8(q11s8), q0u8);
+    q7u8 = veorq_u8(vreinterpretq_u8_s8(q10s8), q0u8);
+
+    d2u8x2.val[0] = vget_low_u8(q6u8);   // d12
+    d2u8x2.val[1] = vget_low_u8(q7u8);   // d14
+    d3u8x2.val[0] = vget_high_u8(q6u8);  // d13
+    d3u8x2.val[1] = vget_high_u8(q7u8);  // d15
+
+    src1 = s - 1;
+    write_2x8(src1, p, d2u8x2, d3u8x2);
+}
+
+void vp8_loop_filter_bvs_neon(
+        unsigned char *y_ptr,
+        int y_stride,
+        const unsigned char *blimit) {
+    y_ptr += 4;
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+    y_ptr += 4;
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+    y_ptr += 4;
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+    return;
+}
+
+void vp8_loop_filter_mbvs_neon(
+        unsigned char *y_ptr,
+        int y_stride,
+        const unsigned char *blimit) {
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, blimit);
+    return;
+}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.asm
+++ /dev/null
@@ -1,469 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_mbloop_filter_horizontal_edge_y_neon|
-    EXPORT  |vp8_mbloop_filter_horizontal_edge_uv_neon|
-    EXPORT  |vp8_mbloop_filter_vertical_edge_y_neon|
-    EXPORT  |vp8_mbloop_filter_vertical_edge_uv_neon|
-    ARM
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; void vp8_mbloop_filter_horizontal_edge_y_neon(unsigned char *src, int pitch,
-;                                               const unsigned char *blimit,
-;                                               const unsigned char *limit,
-;                                               const unsigned char *thresh)
-; r0    unsigned char *src,
-; r1    int pitch,
-; r2    unsigned char blimit
-; r3    unsigned char limit
-; sp    unsigned char thresh,
-|vp8_mbloop_filter_horizontal_edge_y_neon| PROC
-    push        {lr}
-    add         r1, r1, r1                  ; double stride
-    ldr         r12, [sp, #4]               ; load thresh
-    sub         r0, r0, r1, lsl #1          ; move src pointer down by 4 lines
-    vdup.u8     q2, r12                     ; thresh
-    add         r12, r0, r1,  lsr #1        ; move src pointer up by 1 line
-
-    vld1.u8     {q3}, [r0@128], r1              ; p3
-    vld1.u8     {q4}, [r12@128], r1             ; p2
-    vld1.u8     {q5}, [r0@128], r1              ; p1
-    vld1.u8     {q6}, [r12@128], r1             ; p0
-    vld1.u8     {q7}, [r0@128], r1              ; q0
-    vld1.u8     {q8}, [r12@128], r1             ; q1
-    vld1.u8     {q9}, [r0@128], r1              ; q2
-    vld1.u8     {q10}, [r12@128], r1            ; q3
-
-    bl          vp8_mbloop_filter_neon
-
-    sub         r12, r12, r1, lsl #2
-    add         r0, r12, r1, lsr #1
-
-    vst1.u8     {q4}, [r12@128],r1         ; store op2
-    vst1.u8     {q5}, [r0@128],r1          ; store op1
-    vst1.u8     {q6}, [r12@128], r1        ; store op0
-    vst1.u8     {q7}, [r0@128],r1          ; store oq0
-    vst1.u8     {q8}, [r12@128]            ; store oq1</