Bug 1178215 - Update libvpx. r=rillian, a=lmandel
authorJan Gerber <j@mailb.org>
Mon, 29 Jun 2015 23:07:20 +0200
changeset 281416 f49571596d6f0c25c20e848c47f1d267ec503227
parent 281415 c3b64ca7c3c1706036ec5e0b97b74d3378e09062
child 281417 c73b2abc0b01b64a679b929bc6899c1937785e18
push id4932
push userjlund@mozilla.com
push dateMon, 10 Aug 2015 18:23:06 +0000
treeherdermozilla-beta@6dd5a4f5f745 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrillian, lmandel
bugs1178215
milestone41.0a2
Bug 1178215 - Update libvpx. r=rillian, a=lmandel Update libvpx to e67d45d4ce92468ba193288b59093fef0a502662
CLOBBER
media/libvpx/PATENTS
media/libvpx/README_MOZILLA
media/libvpx/sources.mozbuild
media/libvpx/third_party/x86inc/x86inc.asm
media/libvpx/vp8/common/alloccommon.c
media/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm
media/libvpx/vp8/common/arm/armv6/vp8_sad16x16_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance16x16_armv6.asm
media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
media/libvpx/vp8/common/arm/filter_arm.c
media/libvpx/vp8/common/arm/neon/sad_neon.c
media/libvpx/vp8/common/arm/neon/variance_neon.c
media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
media/libvpx/vp8/common/arm/variance_arm.c
media/libvpx/vp8/common/common.h
media/libvpx/vp8/common/copy_c.c
media/libvpx/vp8/common/debugmodes.c
media/libvpx/vp8/common/dequantize.c
media/libvpx/vp8/common/entropy.c
media/libvpx/vp8/common/entropymode.c
media/libvpx/vp8/common/extend.c
media/libvpx/vp8/common/filter.c
media/libvpx/vp8/common/generic/systemdependent.c
media/libvpx/vp8/common/idct_blk.c
media/libvpx/vp8/common/idctllm.c
media/libvpx/vp8/common/loopfilter.c
media/libvpx/vp8/common/mfqe.c
media/libvpx/vp8/common/postproc.c
media/libvpx/vp8/common/reconinter.c
media/libvpx/vp8/common/reconintra.c
media/libvpx/vp8/common/rtcd.c
media/libvpx/vp8/common/sad_c.c
media/libvpx/vp8/common/setupintrarecon.c
media/libvpx/vp8/common/variance.h
media/libvpx/vp8/common/variance_c.c
media/libvpx/vp8/common/x86/copy_sse2.asm
media/libvpx/vp8/common/x86/copy_sse3.asm
media/libvpx/vp8/common/x86/idct_blk_mmx.c
media/libvpx/vp8/common/x86/sad_mmx.asm
media/libvpx/vp8/common/x86/sad_sse2.asm
media/libvpx/vp8/common/x86/sad_sse3.asm
media/libvpx/vp8/common/x86/sad_sse4.asm
media/libvpx/vp8/common/x86/sad_ssse3.asm
media/libvpx/vp8/common/x86/variance_impl_mmx.asm
media/libvpx/vp8/common/x86/variance_impl_sse2.asm
media/libvpx/vp8/common/x86/variance_mmx.c
media/libvpx/vp8/common/x86/variance_sse2.c
media/libvpx/vp8/common/x86/variance_ssse3.c
media/libvpx/vp8/common/x86/vp8_asm_stubs.c
media/libvpx/vp8/common/x86/vp8_variance_impl_mmx.asm
media/libvpx/vp8/common/x86/vp8_variance_mmx.c
media/libvpx/vp8/common/x86/vp8_variance_sse2.c
media/libvpx/vp8/decoder/decodeframe.c
media/libvpx/vp8/decoder/detokenize.c
media/libvpx/vp8/decoder/error_concealment.c
media/libvpx/vp8/decoder/onyxd_if.c
media/libvpx/vp8/decoder/threading.c
media/libvpx/vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm
media/libvpx/vp8/encoder/arm/neon/vp8_mse16x16_neon.c
media/libvpx/vp8/encoder/bitstream.c
media/libvpx/vp8/encoder/dct.c
media/libvpx/vp8/encoder/denoising.c
media/libvpx/vp8/encoder/encodeframe.c
media/libvpx/vp8/encoder/encodeintra.c
media/libvpx/vp8/encoder/encodemb.c
media/libvpx/vp8/encoder/ethreading.c
media/libvpx/vp8/encoder/firstpass.c
media/libvpx/vp8/encoder/mcomp.c
media/libvpx/vp8/encoder/modecosts.c
media/libvpx/vp8/encoder/modecosts.h
media/libvpx/vp8/encoder/onyx_if.c
media/libvpx/vp8/encoder/onyx_int.h
media/libvpx/vp8/encoder/pickinter.c
media/libvpx/vp8/encoder/picklpf.c
media/libvpx/vp8/encoder/quantize.c
media/libvpx/vp8/encoder/ratectrl.c
media/libvpx/vp8/encoder/ratectrl.h
media/libvpx/vp8/encoder/rdopt.c
media/libvpx/vp8/encoder/rdopt.h
media/libvpx/vp8/encoder/segmentation.c
media/libvpx/vp8/encoder/temporal_filter.c
media/libvpx/vp8/encoder/tokenize.c
media/libvpx/vp8/encoder/x86/quantize_sse2.c
media/libvpx/vp8/vp8_cx_iface.c
media/libvpx/vp8/vp8_dx_iface.c
media/libvpx/vp8_rtcd_armv7-android-gcc.h
media/libvpx/vp8_rtcd_generic-gnu.h
media/libvpx/vp8_rtcd_x86-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86-linux-gcc.h
media/libvpx/vp8_rtcd_x86-win32-gcc.h
media/libvpx/vp8_rtcd_x86-win32-vs12.h
media/libvpx/vp8_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp8_rtcd_x86_64-linux-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-gcc.h
media/libvpx/vp8_rtcd_x86_64-win64-vs12.h
media/libvpx/vp9/common/arm/neon/vp9_avg_neon.c
media/libvpx/vp9/common/arm/neon/vp9_avg_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve_avg_neon.c
media/libvpx/vp9/common/arm/neon/vp9_convolve_avg_neon_asm.asm
media/libvpx/vp9/common/arm/neon/vp9_convolve_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct16x16_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct32x32_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct4x4_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_idct8x8_1_add_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon.c
media/libvpx/vp9/common/arm/neon/vp9_reconintra_neon_asm.asm
media/libvpx/vp9/common/vp9_alloccommon.c
media/libvpx/vp9/common/vp9_alloccommon.h
media/libvpx/vp9/common/vp9_blockd.c
media/libvpx/vp9/common/vp9_blockd.h
media/libvpx/vp9/common/vp9_common.h
media/libvpx/vp9/common/vp9_convolve.c
media/libvpx/vp9/common/vp9_debugmodes.c
media/libvpx/vp9/common/vp9_entropy.c
media/libvpx/vp9/common/vp9_entropy.h
media/libvpx/vp9/common/vp9_entropymode.c
media/libvpx/vp9/common/vp9_entropymode.h
media/libvpx/vp9/common/vp9_enums.h
media/libvpx/vp9/common/vp9_filter.c
media/libvpx/vp9/common/vp9_filter.h
media/libvpx/vp9/common/vp9_frame_buffers.c
media/libvpx/vp9/common/vp9_idct.c
media/libvpx/vp9/common/vp9_idct.h
media/libvpx/vp9/common/vp9_loopfilter.c
media/libvpx/vp9/common/vp9_loopfilter.h
media/libvpx/vp9/common/vp9_loopfilter_filters.c
media/libvpx/vp9/common/vp9_mfqe.c
media/libvpx/vp9/common/vp9_mvref_common.c
media/libvpx/vp9/common/vp9_onyxc_int.h
media/libvpx/vp9/common/vp9_postproc.c
media/libvpx/vp9/common/vp9_pred_common.c
media/libvpx/vp9/common/vp9_reconinter.c
media/libvpx/vp9/common/vp9_reconintra.c
media/libvpx/vp9/common/vp9_reconintra.h
media/libvpx/vp9/common/vp9_rtcd.c
media/libvpx/vp9/common/vp9_scan.h
media/libvpx/vp9/common/vp9_systemdependent.h
media/libvpx/vp9/common/vp9_thread_common.c
media/libvpx/vp9/common/x86/convolve.h
media/libvpx/vp9/common/x86/vp9_asm_stubs.c
media/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_idct_intrin_sse2.h
media/libvpx/vp9/common/x86/vp9_idct_intrin_ssse3.c
media/libvpx/vp9/common/x86/vp9_intrapred_sse2.asm
media/libvpx/vp9/common/x86/vp9_loopfilter_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_loopfilter_intrin_sse2.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c
media/libvpx/vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c
media/libvpx/vp9/decoder/vp9_decodeframe.c
media/libvpx/vp9/decoder/vp9_decodemv.c
media/libvpx/vp9/decoder/vp9_decodemv.h
media/libvpx/vp9/decoder/vp9_decoder.c
media/libvpx/vp9/decoder/vp9_detokenize.c
media/libvpx/vp9/decoder/vp9_detokenize.h
media/libvpx/vp9/decoder/vp9_dthread.c
media/libvpx/vp9/encoder/arm/neon/vp9_quantize_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad4d_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_sad_neon.c
media/libvpx/vp9/encoder/arm/neon/vp9_variance_neon.c
media/libvpx/vp9/encoder/vp9_aq_complexity.c
media/libvpx/vp9/encoder/vp9_aq_complexity.h
media/libvpx/vp9/encoder/vp9_aq_cyclicrefresh.c
media/libvpx/vp9/encoder/vp9_aq_variance.c
media/libvpx/vp9/encoder/vp9_avg.c
media/libvpx/vp9/encoder/vp9_bitstream.c
media/libvpx/vp9/encoder/vp9_blockiness.c
media/libvpx/vp9/encoder/vp9_dct.c
media/libvpx/vp9/encoder/vp9_denoiser.c
media/libvpx/vp9/encoder/vp9_encodeframe.c
media/libvpx/vp9/encoder/vp9_encodeframe.h
media/libvpx/vp9/encoder/vp9_encodemb.c
media/libvpx/vp9/encoder/vp9_encodemv.c
media/libvpx/vp9/encoder/vp9_encodemv.h
media/libvpx/vp9/encoder/vp9_encoder.c
media/libvpx/vp9/encoder/vp9_encoder.h
media/libvpx/vp9/encoder/vp9_ethread.c
media/libvpx/vp9/encoder/vp9_extend.c
media/libvpx/vp9/encoder/vp9_fastssim.c
media/libvpx/vp9/encoder/vp9_firstpass.c
media/libvpx/vp9/encoder/vp9_firstpass.h
media/libvpx/vp9/encoder/vp9_mbgraph.c
media/libvpx/vp9/encoder/vp9_mcomp.c
media/libvpx/vp9/encoder/vp9_mcomp.h
media/libvpx/vp9/encoder/vp9_picklpf.c
media/libvpx/vp9/encoder/vp9_pickmode.c
media/libvpx/vp9/encoder/vp9_psnrhvs.c
media/libvpx/vp9/encoder/vp9_quantize.c
media/libvpx/vp9/encoder/vp9_ratectrl.c
media/libvpx/vp9/encoder/vp9_ratectrl.h
media/libvpx/vp9/encoder/vp9_rd.c
media/libvpx/vp9/encoder/vp9_rd.h
media/libvpx/vp9/encoder/vp9_rdopt.c
media/libvpx/vp9/encoder/vp9_rdopt.h
media/libvpx/vp9/encoder/vp9_resize.c
media/libvpx/vp9/encoder/vp9_sad.c
media/libvpx/vp9/encoder/vp9_segmentation.c
media/libvpx/vp9/encoder/vp9_skin_detection.c
media/libvpx/vp9/encoder/vp9_speed_features.c
media/libvpx/vp9/encoder/vp9_speed_features.h
media/libvpx/vp9/encoder/vp9_ssim.h
media/libvpx/vp9/encoder/vp9_subexp.c
media/libvpx/vp9/encoder/vp9_subexp.h
media/libvpx/vp9/encoder/vp9_svc_layercontext.c
media/libvpx/vp9/encoder/vp9_svc_layercontext.h
media/libvpx/vp9/encoder/vp9_temporal_filter.c
media/libvpx/vp9/encoder/vp9_temporal_filter.h
media/libvpx/vp9/encoder/vp9_tokenize.c
media/libvpx/vp9/encoder/vp9_variance.c
media/libvpx/vp9/encoder/vp9_variance.h
media/libvpx/vp9/encoder/vp9_writer.h
media/libvpx/vp9/encoder/x86/vp9_avg_intrin_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct32x32_avx2_impl.h
media/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct32x32_sse2_impl.h
media/libvpx/vp9/encoder/x86/vp9_dct_avx2.c
media/libvpx/vp9/encoder/x86/vp9_dct_impl_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_sse2.c
media/libvpx/vp9/encoder/x86/vp9_dct_sse2_impl.h
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3.c
media/libvpx/vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_denoiser_sse2.c
media/libvpx/vp9/encoder/x86/vp9_error_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_error_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_quantize_intrin_sse2.c
media/libvpx/vp9/encoder/x86/vp9_highbd_sad4d_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_sad_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_subpel_variance.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_variance_impl_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_highbd_variance_sse2.c
media/libvpx/vp9/encoder/x86/vp9_quantize_sse2.c
media/libvpx/vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm
media/libvpx/vp9/encoder/x86/vp9_sad4d_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_sad4d_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_sad_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_sad_sse2.asm
media/libvpx/vp9/encoder/x86/vp9_sad_sse3.asm
media/libvpx/vp9/encoder/x86/vp9_sad_sse4.asm
media/libvpx/vp9/encoder/x86/vp9_sad_ssse3.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance.asm
media/libvpx/vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c
media/libvpx/vp9/encoder/x86/vp9_variance_sse2.c
media/libvpx/vp9/vp9_cx_iface.c
media/libvpx/vp9/vp9_dx_iface.c
media/libvpx/vp9/vp9_iface_common.h
media/libvpx/vp9_rtcd_armv7-android-gcc.h
media/libvpx/vp9_rtcd_generic-gnu.h
media/libvpx/vp9_rtcd_x86-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86-linux-gcc.h
media/libvpx/vp9_rtcd_x86-win32-gcc.h
media/libvpx/vp9_rtcd_x86-win32-vs12.h
media/libvpx/vp9_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vp9_rtcd_x86_64-linux-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-gcc.h
media/libvpx/vp9_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx/internal/vpx_codec_internal.h
media/libvpx/vpx/src/svc_encodeframe.c
media/libvpx/vpx/svc_context.h
media/libvpx/vpx/vp8cx.h
media/libvpx/vpx/vp8dx.h
media/libvpx/vpx/vpx_encoder.h
media/libvpx/vpx_config_armv7-android-gcc.asm
media/libvpx/vpx_config_armv7-android-gcc.h
media/libvpx/vpx_config_generic-gnu.asm
media/libvpx/vpx_config_generic-gnu.h
media/libvpx/vpx_config_x86-darwin9-gcc.asm
media/libvpx/vpx_config_x86-darwin9-gcc.h
media/libvpx/vpx_config_x86-linux-gcc.asm
media/libvpx/vpx_config_x86-linux-gcc.h
media/libvpx/vpx_config_x86-win32-gcc.asm
media/libvpx/vpx_config_x86-win32-gcc.h
media/libvpx/vpx_config_x86-win32-vs12.asm
media/libvpx/vpx_config_x86-win32-vs12.h
media/libvpx/vpx_config_x86_64-darwin9-gcc.asm
media/libvpx/vpx_config_x86_64-darwin9-gcc.h
media/libvpx/vpx_config_x86_64-linux-gcc.asm
media/libvpx/vpx_config_x86_64-linux-gcc.h
media/libvpx/vpx_config_x86_64-win64-gcc.asm
media/libvpx/vpx_config_x86_64-win64-gcc.h
media/libvpx/vpx_config_x86_64-win64-vs12.asm
media/libvpx/vpx_config_x86_64-win64-vs12.h
media/libvpx/vpx_dsp/arm/sad4d_neon.c
media/libvpx/vpx_dsp/arm/sad_media.asm
media/libvpx/vpx_dsp/arm/sad_neon.c
media/libvpx/vpx_dsp/arm/variance_media.asm
media/libvpx/vpx_dsp/arm/variance_neon.c
media/libvpx/vpx_dsp/sad.c
media/libvpx/vpx_dsp/variance.c
media/libvpx/vpx_dsp/vpx_dsp_rtcd.c
media/libvpx/vpx_dsp/x86/highbd_sad4d_sse2.asm
media/libvpx/vpx_dsp/x86/highbd_sad_sse2.asm
media/libvpx/vpx_dsp/x86/highbd_variance_impl_sse2.asm
media/libvpx/vpx_dsp/x86/highbd_variance_sse2.c
media/libvpx/vpx_dsp/x86/sad4d_avx2.c
media/libvpx/vpx_dsp/x86/sad4d_sse2.asm
media/libvpx/vpx_dsp/x86/sad_avx2.c
media/libvpx/vpx_dsp/x86/sad_mmx.asm
media/libvpx/vpx_dsp/x86/sad_sse2.asm
media/libvpx/vpx_dsp/x86/sad_sse3.asm
media/libvpx/vpx_dsp/x86/sad_sse4.asm
media/libvpx/vpx_dsp/x86/sad_ssse3.asm
media/libvpx/vpx_dsp/x86/variance_avx2.c
media/libvpx/vpx_dsp/x86/variance_impl_avx2.c
media/libvpx/vpx_dsp/x86/variance_impl_mmx.asm
media/libvpx/vpx_dsp/x86/variance_mmx.c
media/libvpx/vpx_dsp/x86/variance_sse2.c
media/libvpx/vpx_dsp_rtcd_armv7-android-gcc.h
media/libvpx/vpx_dsp_rtcd_generic-gnu.h
media/libvpx/vpx_dsp_rtcd_x86-darwin9-gcc.h
media/libvpx/vpx_dsp_rtcd_x86-linux-gcc.h
media/libvpx/vpx_dsp_rtcd_x86-win32-gcc.h
media/libvpx/vpx_dsp_rtcd_x86-win32-vs12.h
media/libvpx/vpx_dsp_rtcd_x86_64-darwin9-gcc.h
media/libvpx/vpx_dsp_rtcd_x86_64-linux-gcc.h
media/libvpx/vpx_dsp_rtcd_x86_64-win64-gcc.h
media/libvpx/vpx_dsp_rtcd_x86_64-win64-vs12.h
media/libvpx/vpx_mem/include/vpx_mem_intrnl.h
media/libvpx/vpx_mem/include/vpx_mem_tracker.h
media/libvpx/vpx_mem/vpx_mem.c
media/libvpx/vpx_mem/vpx_mem.h
media/libvpx/vpx_ports/mem.h
media/libvpx/vpx_ports/msvc.h
media/libvpx/vpx_ports/vpx_once.h
media/libvpx/vpx_ports/x86.h
media/libvpx/vpx_scale/generic/gen_scalers.c
media/libvpx/vpx_scale/generic/vpx_scale.c
media/libvpx/vpx_scale/generic/yv12config.c
media/libvpx/vpx_scale/generic/yv12extend.c
media/libvpx/vpx_scale/vpx_scale_rtcd.c
media/libvpx/vpx_version.h
--- a/CLOBBER
+++ b/CLOBBER
@@ -17,9 +17,9 @@
 #
 # Modifying this file will now automatically clobber the buildbot machines \o/
 #
 
 # Are you updating CLOBBER because you think it's needed for your WebIDL
 # changes to stick? As of bug 928195, this shouldn't be necessary! Please
 # don't change CLOBBER for WebIDL changes any more.
 
-Merge day clobber
\ No newline at end of file
+Bug 1178215 requires clobber for libvpx file moves.
--- a/media/libvpx/PATENTS
+++ b/media/libvpx/PATENTS
@@ -12,12 +12,12 @@ such license applies only to those paten
 Google and acquired in the future, licensable by Google that are necessarily
 infringed by these implementations of WebM. This grant does not include claims
 that would be infringed only as a consequence of further modification of these
 implementations. If you or your agent or exclusive licensee institute or order
 or agree to the institution of patent litigation or any other patent
 enforcement activity against any entity (including a cross-claim or
 counterclaim in a lawsuit) alleging that any of these implementations of WebM
 or any code incorporated within any of these implementations of WebM
-constitutes direct or contributory patent infringement, or inducement of
+constitute direct or contributory patent infringement, or inducement of
 patent infringement, then any patent rights granted to you under this License
 for these implementations of WebM shall terminate as of the date such
 litigation is filed.
--- a/media/libvpx/README_MOZILLA
+++ b/media/libvpx/README_MOZILLA
@@ -3,9 +3,9 @@ git repository using the update.py scrip
 made were those applied by update.py and the addition of
 moz.build and Makefile.in build files for the
 Mozilla build system.
 
 The libvpx git repository is:
 
     https://chromium.googlesource.com/webm/libvpx
 
-The git commit ID used was c74bf6d889992c3cabe017ec353ca85c323107cd
+The git commit ID used was e67d45d4ce92468ba193288b59093fef0a502662
--- a/media/libvpx/sources.mozbuild
+++ b/media/libvpx/sources.mozbuild
@@ -9,19 +9,16 @@ files = {
              'vp8/common/arm/armv6/filter_v6.asm',
              'vp8/common/arm/armv6/idct_blk_v6.c',
              'vp8/common/arm/armv6/idct_v6.asm',
              'vp8/common/arm/armv6/intra4x4_predict_v6.asm',
              'vp8/common/arm/armv6/iwalsh_v6.asm',
              'vp8/common/arm/armv6/loopfilter_v6.asm',
              'vp8/common/arm/armv6/simpleloopfilter_v6.asm',
              'vp8/common/arm/armv6/sixtappredict8x4_v6.asm',
-             'vp8/common/arm/armv6/vp8_sad16x16_armv6.asm',
-             'vp8/common/arm/armv6/vp8_variance16x16_armv6.asm',
-             'vp8/common/arm/armv6/vp8_variance8x8_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_h_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_hv_armv6.asm',
              'vp8/common/arm/armv6/vp8_variance_halfpixvar16x16_v_armv6.asm',
              'vp8/common/arm/bilinearfilter_arm.c',
              'vp8/common/arm/dequantize_arm.c',
              'vp8/common/arm/filter_arm.c',
              'vp8/common/arm/loopfilter_arm.c',
              'vp8/common/arm/neon/bilinearpredict_neon.c',
@@ -33,35 +30,31 @@ files = {
              'vp8/common/arm/neon/idct_dequant_0_2x_neon.c',
              'vp8/common/arm/neon/idct_dequant_full_2x_neon.c',
              'vp8/common/arm/neon/iwalsh_neon.c',
              'vp8/common/arm/neon/loopfilter_neon.c',
              'vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.c',
              'vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.c',
              'vp8/common/arm/neon/mbloopfilter_neon.c',
              'vp8/common/arm/neon/reconintra_neon.c',
-             'vp8/common/arm/neon/sad_neon.c',
              'vp8/common/arm/neon/shortidct4x4llm_neon.c',
              'vp8/common/arm/neon/sixtappredict_neon.c',
-             'vp8/common/arm/neon/variance_neon.c',
              'vp8/common/arm/neon/vp8_subpixelvariance_neon.c',
              'vp8/common/arm/variance_arm.c',
-             'vp8/encoder/arm/armv6/vp8_mse16x16_armv6.asm',
              'vp8/encoder/arm/armv6/vp8_short_fdct4x4_armv6.asm',
              'vp8/encoder/arm/armv6/walsh_v6.asm',
              'vp8/encoder/arm/dct_arm.c',
              'vp8/encoder/arm/neon/denoising_neon.c',
              'vp8/encoder/arm/neon/fastquantizeb_neon.c',
              'vp8/encoder/arm/neon/shortfdct_neon.c',
              'vp8/encoder/arm/neon/subtract_neon.c',
-             'vp8/encoder/arm/neon/vp8_mse16x16_neon.c',
              'vp8/encoder/arm/neon/vp8_shortwalsh4x4_neon.c',
-             'vp9/common/arm/neon/vp9_avg_neon_asm.asm',
              'vp9/common/arm/neon/vp9_convolve8_avg_neon_asm.asm',
              'vp9/common/arm/neon/vp9_convolve8_neon_asm.asm',
+             'vp9/common/arm/neon/vp9_convolve_avg_neon_asm.asm',
              'vp9/common/arm/neon/vp9_convolve_neon.c',
              'vp9/common/arm/neon/vp9_copy_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct16x16_1_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct16x16_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct16x16_neon.c',
              'vp9/common/arm/neon/vp9_idct32x32_1_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct32x32_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_idct4x4_1_add_neon_asm.asm',
@@ -70,36 +63,40 @@ files = {
              'vp9/common/arm/neon/vp9_idct8x8_add_neon_asm.asm',
              'vp9/common/arm/neon/vp9_iht4x4_add_neon.c',
              'vp9/common/arm/neon/vp9_iht8x8_add_neon.c',
              'vp9/common/arm/neon/vp9_loopfilter_16_neon_asm.asm',
              'vp9/common/arm/neon/vp9_loopfilter_4_neon_asm.asm',
              'vp9/common/arm/neon/vp9_loopfilter_8_neon_asm.asm',
              'vp9/common/arm/neon/vp9_loopfilter_neon.c',
              'vp9/common/arm/neon/vp9_mb_lpf_neon.asm',
+             'vp9/common/arm/neon/vp9_reconintra_neon.c',
              'vp9/common/arm/neon/vp9_reconintra_neon_asm.asm',
              'vp9/common/arm/neon/vp9_save_reg_neon.asm',
              'vp9/encoder/arm/neon/vp9_dct_neon.c',
              'vp9/encoder/arm/neon/vp9_quantize_neon.c',
-             'vp9/encoder/arm/neon/vp9_sad4d_neon.c',
-             'vp9/encoder/arm/neon/vp9_sad_neon.c',
              'vp9/encoder/arm/neon/vp9_subtract_neon.c',
              'vp9/encoder/arm/neon/vp9_variance_neon.c',
              'vp9/encoder/arm/neon/vp9enc_avg_neon.c',
+             'vpx_dsp/arm/sad4d_neon.c',
+             'vpx_dsp/arm/sad_media.asm',
+             'vpx_dsp/arm/sad_neon.c',
+             'vpx_dsp/arm/variance_media.asm',
+             'vpx_dsp/arm/variance_neon.c',
              'vpx_ports/arm_cpudetect.c'],
  'AVX2': ['vp9/common/x86/vp9_loopfilter_intrin_avx2.c',
           'vp9/common/x86/vp9_subpixel_8t_intrin_avx2.c',
-          'vp9/encoder/x86/vp9_dct32x32_avx2.c',
           'vp9/encoder/x86/vp9_dct_avx2.c',
           'vp9/encoder/x86/vp9_error_intrin_avx2.c',
-          'vp9/encoder/x86/vp9_sad4d_intrin_avx2.c',
-          'vp9/encoder/x86/vp9_sad_intrin_avx2.c',
           'vp9/encoder/x86/vp9_subpel_variance_impl_intrin_avx2.c',
           'vp9/encoder/x86/vp9_variance_avx2.c',
-          'vp9/encoder/x86/vp9_variance_impl_intrin_avx2.c'],
+          'vpx_dsp/x86/sad4d_avx2.c',
+          'vpx_dsp/x86/sad_avx2.c',
+          'vpx_dsp/x86/variance_avx2.c',
+          'vpx_dsp/x86/variance_impl_avx2.c'],
  'ERROR_CONCEALMENT': ['vp8/decoder/error_concealment.c'],
  'EXPORTS': ['vpx/vp8.h',
              'vpx/vp8cx.h',
              'vpx/vp8dx.h',
              'vpx/vpx_codec.h',
              'vpx/vpx_decoder.h',
              'vpx/vpx_encoder.h',
              'vpx/vpx_frame_buffer.h',
@@ -110,16 +107,17 @@ files = {
              'vpx_ports/arm.h',
              'vpx_ports/mem.h',
              'vpx_ports/vpx_timer.h',
              'vpx_ports/x86.h',
              'vpx_scale/vpx_scale.h',
              'vpx_scale/yv12config.h'],
  'SOURCES': ['vp8/common/alloccommon.c',
              'vp8/common/blockd.c',
+             'vp8/common/copy_c.c',
              'vp8/common/debugmodes.c',
              'vp8/common/dequantize.c',
              'vp8/common/entropy.c',
              'vp8/common/entropymode.c',
              'vp8/common/entropymv.c',
              'vp8/common/extend.c',
              'vp8/common/filter.c',
              'vp8/common/findnearmv.c',
@@ -130,17 +128,16 @@ files = {
              'vp8/common/loopfilter_filters.c',
              'vp8/common/mbpitch.c',
              'vp8/common/modecont.c',
              'vp8/common/quant_common.c',
              'vp8/common/reconinter.c',
              'vp8/common/reconintra.c',
              'vp8/common/reconintra4x4.c',
              'vp8/common/rtcd.c',
-             'vp8/common/sad_c.c',
              'vp8/common/setupintrarecon.c',
              'vp8/common/swapyv12buffer.c',
              'vp8/common/treecoder.c',
              'vp8/common/variance_c.c',
              'vp8/decoder/dboolhuff.c',
              'vp8/decoder/decodeframe.c',
              'vp8/decoder/decodemv.c',
              'vp8/decoder/detokenize.c',
@@ -225,17 +222,16 @@ files = {
              'vp9/encoder/vp9_mcomp.c',
              'vp9/encoder/vp9_picklpf.c',
              'vp9/encoder/vp9_pickmode.c',
              'vp9/encoder/vp9_quantize.c',
              'vp9/encoder/vp9_ratectrl.c',
              'vp9/encoder/vp9_rd.c',
              'vp9/encoder/vp9_rdopt.c',
              'vp9/encoder/vp9_resize.c',
-             'vp9/encoder/vp9_sad.c',
              'vp9/encoder/vp9_segmentation.c',
              'vp9/encoder/vp9_skin_detection.c',
              'vp9/encoder/vp9_speed_features.c',
              'vp9/encoder/vp9_subexp.c',
              'vp9/encoder/vp9_svc_layercontext.c',
              'vp9/encoder/vp9_temporal_filter.c',
              'vp9/encoder/vp9_tokenize.c',
              'vp9/encoder/vp9_treewriter.c',
@@ -244,16 +240,19 @@ files = {
              'vp9/encoder/vp9_writer.c',
              'vp9/vp9_cx_iface.c',
              'vp9/vp9_dx_iface.c',
              'vpx/src/vpx_codec.c',
              'vpx/src/vpx_decoder.c',
              'vpx/src/vpx_encoder.c',
              'vpx/src/vpx_image.c',
              'vpx/src/vpx_psnr.c',
+             'vpx_dsp/sad.c',
+             'vpx_dsp/variance.c',
+             'vpx_dsp/vpx_dsp_rtcd.c',
              'vpx_mem/vpx_mem.c',
              'vpx_scale/generic/gen_scalers.c',
              'vpx_scale/generic/vpx_scale.c',
              'vpx_scale/generic/yv12config.c',
              'vpx_scale/generic/yv12extend.c',
              'vpx_scale/vpx_scale_rtcd.c'],
  'VP8_POSTPROC': ['vp8/common/mfqe.c', 'vp8/common/postproc.c'],
  'VP9_POSTPROC': ['vp9/common/vp9_mfqe.c',
@@ -261,48 +260,45 @@ files = {
                   'vp9/common/x86/vp9_mfqe_sse2.asm'],
  'X86-64_ASM': ['third_party/x86inc/x86inc.asm',
                 'vp8/common/x86/loopfilter_block_sse2_x86_64.asm',
                 'vp8/encoder/x86/ssim_opt_x86_64.asm',
                 'vp9/common/x86/vp9_idct_ssse3_x86_64.asm',
                 'vp9/encoder/x86/vp9_dct_ssse3_x86_64.asm',
                 'vp9/encoder/x86/vp9_quantize_ssse3_x86_64.asm',
                 'vp9/encoder/x86/vp9_ssim_opt_x86_64.asm'],
- 'X86_ASM': ['vp8/common/x86/dequantize_mmx.asm',
+ 'X86_ASM': ['vp8/common/x86/copy_sse2.asm',
+             'vp8/common/x86/copy_sse3.asm',
+             'vp8/common/x86/dequantize_mmx.asm',
              'vp8/common/x86/filter_x86.c',
              'vp8/common/x86/idct_blk_mmx.c',
              'vp8/common/x86/idct_blk_sse2.c',
              'vp8/common/x86/idctllm_mmx.asm',
              'vp8/common/x86/idctllm_sse2.asm',
              'vp8/common/x86/iwalsh_mmx.asm',
              'vp8/common/x86/iwalsh_sse2.asm',
              'vp8/common/x86/loopfilter_mmx.asm',
              'vp8/common/x86/loopfilter_sse2.asm',
              'vp8/common/x86/loopfilter_x86.c',
              'vp8/common/x86/mfqe_sse2.asm',
              'vp8/common/x86/postproc_mmx.asm',
              'vp8/common/x86/postproc_sse2.asm',
              'vp8/common/x86/recon_mmx.asm',
              'vp8/common/x86/recon_sse2.asm',
              'vp8/common/x86/recon_wrapper_sse2.c',
-             'vp8/common/x86/sad_mmx.asm',
-             'vp8/common/x86/sad_sse2.asm',
-             'vp8/common/x86/sad_sse3.asm',
-             'vp8/common/x86/sad_sse4.asm',
-             'vp8/common/x86/sad_ssse3.asm',
              'vp8/common/x86/subpixel_mmx.asm',
              'vp8/common/x86/subpixel_sse2.asm',
              'vp8/common/x86/subpixel_ssse3.asm',
-             'vp8/common/x86/variance_impl_mmx.asm',
              'vp8/common/x86/variance_impl_sse2.asm',
              'vp8/common/x86/variance_impl_ssse3.asm',
-             'vp8/common/x86/variance_mmx.c',
-             'vp8/common/x86/variance_sse2.c',
              'vp8/common/x86/variance_ssse3.c',
              'vp8/common/x86/vp8_asm_stubs.c',
+             'vp8/common/x86/vp8_variance_impl_mmx.asm',
+             'vp8/common/x86/vp8_variance_mmx.c',
+             'vp8/common/x86/vp8_variance_sse2.c',
              'vp8/encoder/x86/dct_mmx.asm',
              'vp8/encoder/x86/dct_sse2.asm',
              'vp8/encoder/x86/denoising_sse2.c',
              'vp8/encoder/x86/encodeopt.asm',
              'vp8/encoder/x86/fwalsh_sse2.asm',
              'vp8/encoder/x86/quantize_mmx.asm',
              'vp8/encoder/x86/quantize_sse2.c',
              'vp8/encoder/x86/quantize_sse4.c',
@@ -310,37 +306,38 @@ files = {
              'vp8/encoder/x86/subtract_mmx.asm',
              'vp8/encoder/x86/subtract_sse2.asm',
              'vp8/encoder/x86/temporal_filter_apply_sse2.asm',
              'vp8/encoder/x86/vp8_enc_stubs_mmx.c',
              'vp8/encoder/x86/vp8_enc_stubs_sse2.c',
              'vp9/common/x86/vp9_asm_stubs.c',
              'vp9/common/x86/vp9_copy_sse2.asm',
              'vp9/common/x86/vp9_idct_intrin_sse2.c',
-             'vp9/common/x86/vp9_idct_intrin_ssse3.c',
              'vp9/common/x86/vp9_intrapred_sse2.asm',
              'vp9/common/x86/vp9_intrapred_ssse3.asm',
              'vp9/common/x86/vp9_loopfilter_intrin_sse2.c',
              'vp9/common/x86/vp9_loopfilter_mmx.asm',
              'vp9/common/x86/vp9_subpixel_8t_intrin_ssse3.c',
              'vp9/common/x86/vp9_subpixel_8t_sse2.asm',
              'vp9/common/x86/vp9_subpixel_8t_ssse3.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_sse2.asm',
              'vp9/common/x86/vp9_subpixel_bilinear_ssse3.asm',
              'vp9/encoder/x86/vp9_avg_intrin_sse2.c',
-             'vp9/encoder/x86/vp9_dct32x32_sse2.c',
-             'vp9/encoder/x86/vp9_dct_impl_sse2.c',
              'vp9/encoder/x86/vp9_dct_mmx.asm',
              'vp9/encoder/x86/vp9_dct_sse2.c',
              'vp9/encoder/x86/vp9_dct_ssse3.c',
              'vp9/encoder/x86/vp9_error_sse2.asm',
              'vp9/encoder/x86/vp9_quantize_sse2.c',
-             'vp9/encoder/x86/vp9_sad4d_sse2.asm',
-             'vp9/encoder/x86/vp9_sad_sse2.asm',
-             'vp9/encoder/x86/vp9_sad_sse3.asm',
-             'vp9/encoder/x86/vp9_sad_sse4.asm',
-             'vp9/encoder/x86/vp9_sad_ssse3.asm',
              'vp9/encoder/x86/vp9_subpel_variance.asm',
              'vp9/encoder/x86/vp9_subtract_sse2.asm',
              'vp9/encoder/x86/vp9_temporal_filter_apply_sse2.asm',
              'vp9/encoder/x86/vp9_variance_sse2.c',
+             'vpx_dsp/x86/sad4d_sse2.asm',
+             'vpx_dsp/x86/sad_mmx.asm',
+             'vpx_dsp/x86/sad_sse2.asm',
+             'vpx_dsp/x86/sad_sse3.asm',
+             'vpx_dsp/x86/sad_sse4.asm',
+             'vpx_dsp/x86/sad_ssse3.asm',
+             'vpx_dsp/x86/variance_impl_mmx.asm',
+             'vpx_dsp/x86/variance_mmx.c',
+             'vpx_dsp/x86/variance_sse2.c',
              'vpx_ports/emms.asm']
 }
--- a/media/libvpx/third_party/x86inc/x86inc.asm
+++ b/media/libvpx/third_party/x86inc/x86inc.asm
@@ -31,17 +31,19 @@
 ; has significant usefulness outside of x264 and we want it to be available
 ; to the largest audience possible.  Of course, if you modify it for your own
 ; purposes to add a new feature, we strongly encourage contributing a patch
 ; as this feature might be useful for others as well.  Send patches or ideas
 ; to x264-devel@videolan.org .
 
 %include "vpx_config.asm"
 
+%ifndef program_name
 %define program_name vp9
+%endif
 
 
 %define UNIX64 0
 %define WIN64  0
 %if ARCH_X86_64
     %ifidn __OUTPUT_FORMAT__,win32
         %define WIN64  1
     %elifidn __OUTPUT_FORMAT__,win64
--- a/media/libvpx/vp8/common/alloccommon.c
+++ b/media/libvpx/vp8/common/alloccommon.c
@@ -5,16 +5,17 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 
 #include "vpx_config.h"
+#include "alloccommon.h"
 #include "blockd.h"
 #include "vpx_mem/vpx_mem.h"
 #include "onyxc_int.h"
 #include "findnearmv.h"
 #include "entropymode.h"
 #include "systemdependent.h"
 
 void vp8_de_alloc_frame_buffers(VP8_COMMON *oci)
@@ -98,19 +99,19 @@ int vp8_alloc_frame_buffers(VP8_COMMON *
     if (!oci->above_context)
         goto allocation_fail;
 
 #if CONFIG_POSTPROC
     if (vp8_yv12_alloc_frame_buffer(&oci->post_proc_buffer, width, height, VP8BORDERINPIXELS) < 0)
         goto allocation_fail;
 
     oci->post_proc_buffer_int_used = 0;
-    vpx_memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
-    vpx_memset(oci->post_proc_buffer.buffer_alloc, 128,
-               oci->post_proc_buffer.frame_size);
+    memset(&oci->postproc_state, 0, sizeof(oci->postproc_state));
+    memset(oci->post_proc_buffer.buffer_alloc, 128,
+           oci->post_proc_buffer.frame_size);
 
     /* Allocate buffer to store post-processing filter coefficients.
      *
      * Note: Round up mb_cols to support SIMD reads
      */
     oci->pp_limits_buffer = vpx_memalign(16, 24 * ((oci->mb_cols + 1) & ~1));
     if (!oci->pp_limits_buffer)
         goto allocation_fail;
@@ -171,17 +172,17 @@ void vp8_create_common(VP8_COMMON *oci)
     oci->no_lpf = 0;
     oci->filter_type = NORMAL_LOOPFILTER;
     oci->use_bilinear_mc_filter = 0;
     oci->full_pixel = 0;
     oci->multi_token_partition = ONE_PARTITION;
     oci->clamp_type = RECON_CLAMP_REQUIRED;
 
     /* Initialize reference frame sign bias structure to defaults */
-    vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
+    memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
 
     /* Default disable buffer to buffer copying */
     oci->copy_buffer_to_gf = 0;
     oci->copy_buffer_to_arf = 0;
 }
 
 void vp8_remove_common(VP8_COMMON *oci)
 {
--- a/media/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm
+++ b/media/libvpx/vp8/common/arm/armv6/dequant_idct_v6.asm
@@ -160,17 +160,17 @@ vp8_dequant_idct_loop2_v6
     qadd16  r1, r1, r8
     usat16  r7, #8, r7
     usat16  r1, #8, r1
     orr     r1, r1, r7, lsl #8
     str     r9, [r2], r12           ; store output to dst
     str     r1, [r2], r12           ; store output to dst
     bne     vp8_dequant_idct_loop2_v6
 
-; vpx_memset
+; memset
     sub     r0, r0, #32
     add     sp, sp, #4
 
     mov     r12, #0
     str     r12, [r0]
     str     r12, [r0, #4]
     str     r12, [r0, #8]
     str     r12, [r0, #12]
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/armv6/vp8_variance8x8_armv6.asm
+++ /dev/null
@@ -1,101 +0,0 @@
-;
-;  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-    EXPORT  |vp8_variance8x8_armv6|
-
-    ARM
-
-    AREA ||.text||, CODE, READONLY, ALIGN=2
-
-; r0    unsigned char *src_ptr
-; r1    int source_stride
-; r2    unsigned char *ref_ptr
-; r3    int  recon_stride
-; stack unsigned int *sse
-|vp8_variance8x8_armv6| PROC
-
-    push    {r4-r10, lr}
-
-    pld     [r0, r1, lsl #0]
-    pld     [r2, r3, lsl #0]
-
-    mov     r12, #8             ; set loop counter to 8 (=block height)
-    mov     r4, #0              ; initialize sum = 0
-    mov     r5, #0              ; initialize sse = 0
-
-loop
-    ; 1st 4 pixels
-    ldr     r6, [r0, #0x0]      ; load 4 src pixels
-    ldr     r7, [r2, #0x0]      ; load 4 ref pixels
-
-    mov     lr, #0              ; constant zero
-
-    usub8   r8, r6, r7          ; calculate difference
-    pld     [r0, r1, lsl #1]
-    sel     r10, r8, lr         ; select bytes with positive difference
-    usub8   r9, r7, r6          ; calculate difference with reversed operands
-    pld     [r2, r3, lsl #1]
-    sel     r8, r9, lr          ; select bytes with negative difference
-
-    ; calculate partial sums
-    usad8   r6, r10, lr         ; calculate sum of positive differences
-    usad8   r7, r8, lr          ; calculate sum of negative differences
-    orr     r8, r8, r10         ; differences of all 4 pixels
-    ; calculate total sum
-    add    r4, r4, r6           ; add positive differences to sum
-    sub    r4, r4, r7           ; subtract negative differences from sum
-
-    ; calculate sse
-    uxtb16  r7, r8              ; byte (two pixels) to halfwords
-    uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
-    smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
-
-    ; 2nd 4 pixels
-    ldr     r6, [r0, #0x4]      ; load 4 src pixels
-    ldr     r7, [r2, #0x4]      ; load 4 ref pixels
-    smlad   r5, r10, r10, r5    ; dual signed multiply, add and accumulate (2)
-
-    usub8   r8, r6, r7          ; calculate difference
-    add     r0, r0, r1          ; set src_ptr to next row
-    sel     r10, r8, lr         ; select bytes with positive difference
-    usub8   r9, r7, r6          ; calculate difference with reversed operands
-    add     r2, r2, r3          ; set dst_ptr to next row
-    sel     r8, r9, lr          ; select bytes with negative difference
-
-    ; calculate partial sums
-    usad8   r6, r10, lr         ; calculate sum of positive differences
-    usad8   r7, r8, lr          ; calculate sum of negative differences
-    orr     r8, r8, r10         ; differences of all 4 pixels
-
-    ; calculate total sum
-    add     r4, r4, r6          ; add positive differences to sum
-    sub     r4, r4, r7          ; subtract negative differences from sum
-
-    ; calculate sse
-    uxtb16  r7, r8              ; byte (two pixels) to halfwords
-    uxtb16  r10, r8, ror #8     ; another two pixels to halfwords
-    smlad   r5, r7, r7, r5      ; dual signed multiply, add and accumulate (1)
-    subs    r12, r12, #1        ; next row
-    smlad   r5, r10, r10, r5    ; dual signed multiply, add and accumulate (2)
-
-    bne     loop
-
-    ; return stuff
-    ldr     r8, [sp, #32]       ; get address of sse
-    mul     r1, r4, r4          ; sum * sum
-    str     r5, [r8]            ; store sse
-    sub     r0, r5, r1, ASR #6  ; return (sse - ((sum * sum) >> 6))
-
-    pop     {r4-r10, pc}
-
-    ENDP
-
-    END
--- a/media/libvpx/vp8/common/arm/filter_arm.c
+++ b/media/libvpx/vp8/common/arm/filter_arm.c
@@ -94,17 +94,17 @@ void vp8_sixtap_predict4x4_armv6
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int  dst_pitch
 )
 {
     const short  *HFilter;
     const short  *VFilter;
-    DECLARE_ALIGNED_ARRAY(4, short, FData, 12*4); /* Temp data buffer used in filtering */
+    DECLARE_ALIGNED(4, short, FData[12*4]); /* Temp data buffer used in filtering */
 
 
     HFilter = vp8_sub_pel_filters[xoffset];   /* 6 tap */
     VFilter = vp8_sub_pel_filters[yoffset];   /* 6 tap */
 
     /* Vfilter is null. First pass only */
     if (xoffset && !yoffset)
     {
@@ -142,17 +142,17 @@ void vp8_sixtap_predict8x8_armv6
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int  dst_pitch
 )
 {
     const short  *HFilter;
     const short  *VFilter;
-    DECLARE_ALIGNED_ARRAY(4, short, FData, 16*8); /* Temp data buffer used in filtering */
+    DECLARE_ALIGNED(4, short, FData[16*8]); /* Temp data buffer used in filtering */
 
     HFilter = vp8_sub_pel_filters[xoffset];   /* 6 tap */
     VFilter = vp8_sub_pel_filters[yoffset];   /* 6 tap */
 
     if (xoffset && !yoffset)
     {
         vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, HFilter);
     }
@@ -184,17 +184,17 @@ void vp8_sixtap_predict16x16_armv6
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int  dst_pitch
 )
 {
     const short  *HFilter;
     const short  *VFilter;
-    DECLARE_ALIGNED_ARRAY(4, short, FData, 24*16);    /* Temp data buffer used in filtering */
+    DECLARE_ALIGNED(4, short, FData[24*16]);    /* Temp data buffer used in filtering */
 
     HFilter = vp8_sub_pel_filters[xoffset];   /* 6 tap */
     VFilter = vp8_sub_pel_filters[yoffset];   /* 6 tap */
 
     if (xoffset && !yoffset)
     {
         vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 16, dst_pitch, HFilter);
     }
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/sad_neon.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-
-unsigned int vp8_sad8x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 7; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad8x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 15; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad4x4_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x8_t d0, d8;
-    uint16x8_t q12;
-    uint32x2_t d1;
-    uint64x1_t d3;
-    int i;
-
-    d0 = vld1_u8(src_ptr);
-    src_ptr += src_stride;
-    d8 = vld1_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(d0, d8);
-
-    for (i = 0; i < 3; i++) {
-        d0 = vld1_u8(src_ptr);
-        src_ptr += src_stride;
-        d8 = vld1_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, d0, d8);
-    }
-
-    d1 = vpaddl_u16(vget_low_u16(q12));
-    d3 = vpaddl_u32(d1);
-
-    return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
-}
-
-unsigned int vp8_sad16x16_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 15; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
-
-unsigned int vp8_sad16x8_neon(
-        unsigned char *src_ptr,
-        int src_stride,
-        unsigned char *ref_ptr,
-        int ref_stride) {
-    uint8x16_t q0, q4;
-    uint16x8_t q12, q13;
-    uint32x4_t q1;
-    uint64x2_t q3;
-    uint32x2_t d5;
-    int i;
-
-    q0 = vld1q_u8(src_ptr);
-    src_ptr += src_stride;
-    q4 = vld1q_u8(ref_ptr);
-    ref_ptr += ref_stride;
-    q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
-    q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
-
-    for (i = 0; i < 7; i++) {
-        q0 = vld1q_u8(src_ptr);
-        src_ptr += src_stride;
-        q4 = vld1q_u8(ref_ptr);
-        ref_ptr += ref_stride;
-        q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
-        q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
-    }
-
-    q12 = vaddq_u16(q12, q13);
-    q1 = vpaddlq_u16(q12);
-    q3 = vpaddlq_u32(q1);
-    d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
-                  vreinterpret_u32_u64(vget_high_u64(q3)));
-
-    return vget_lane_u32(d5, 0);
-}
deleted file mode 100644
--- a/media/libvpx/vp8/common/arm/neon/variance_neon.c
+++ /dev/null
@@ -1,320 +0,0 @@
-/*
- *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <arm_neon.h>
-#include "vpx_ports/mem.h"
-
-unsigned int vp8_variance16x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance16x8_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint8x16_t q0u8, q1u8, q2u8, q3u8;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 4; i++) {  // variance16x8_neon_loop
-        q0u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        q1u8 = vld1q_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        q2u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        q3u8 = vld1q_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-        q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-        q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-        q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance8x16_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d2u8, d4u8, d6u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint16x8_t q11u16, q12u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 8; i++) {  // variance8x16_neon_loop
-        d0u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d2u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        __builtin_prefetch(src_ptr);
-
-        d4u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d6u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        __builtin_prefetch(ref_ptr);
-
-        q11u16 = vsubl_u8(d0u8, d4u8);
-        q12u16 = vsubl_u8(d2u8, d6u8);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
-
-unsigned int vp8_variance8x8_neon(
-        const unsigned char *src_ptr,
-        int source_stride,
-        const unsigned char *ref_ptr,
-        int recon_stride,
-        unsigned int *sse) {
-    int i;
-    uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-    int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-    uint32x2_t d0u32, d10u32;
-    int64x1_t d0s64, d1s64;
-    uint16x8_t q11u16, q12u16, q13u16, q14u16;
-    int32x4_t q8s32, q9s32, q10s32;
-    int64x2_t q0s64, q1s64, q5s64;
-
-    q8s32 = vdupq_n_s32(0);
-    q9s32 = vdupq_n_s32(0);
-    q10s32 = vdupq_n_s32(0);
-
-    for (i = 0; i < 2; i++) {  // variance8x8_neon_loop
-        d0u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d1u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d2u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-        d3u8 = vld1_u8(src_ptr);
-        src_ptr += source_stride;
-
-        d4u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d5u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d6u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-        d7u8 = vld1_u8(ref_ptr);
-        ref_ptr += recon_stride;
-
-        q11u16 = vsubl_u8(d0u8, d4u8);
-        q12u16 = vsubl_u8(d1u8, d5u8);
-        q13u16 = vsubl_u8(d2u8, d6u8);
-        q14u16 = vsubl_u8(d3u8, d7u8);
-
-        d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-        d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
-        q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
-        q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
-
-        d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-        d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
-        q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-        q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
-
-        d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-        d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
-        q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
-        q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
-
-        d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-        d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-        q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
-        q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-        q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-    }
-
-    q10s32 = vaddq_s32(q10s32, q9s32);
-    q0s64 = vpaddlq_s32(q8s32);
-    q1s64 = vpaddlq_s32(q10s32);
-
-    d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
-    d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-    q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64),
-                      vreinterpret_s32_s64(d0s64));
-    vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
-
-    d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 6);
-    d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
-
-    return vget_lane_u32(d0u32, 0);
-}
--- a/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
+++ b/media/libvpx/vp8/common/arm/neon/vp8_subpixelvariance_neon.c
@@ -27,17 +27,17 @@ unsigned int vp8_sub_pixel_variance16x16
         const unsigned char *src_ptr,
         int src_pixels_per_line,
         int xoffset,
         int yoffset,
         const unsigned char *dst_ptr,
         int dst_pixels_per_line,
         unsigned int *sse) {
     int i;
-    DECLARE_ALIGNED_ARRAY(16, unsigned char, tmp, 528);
+    DECLARE_ALIGNED(16, unsigned char, tmp[528]);
     unsigned char *tmpp;
     unsigned char *tmpp2;
     uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8, d8u8, d9u8;
     uint8x8_t d10u8, d11u8, d12u8, d13u8, d14u8, d15u8, d16u8, d17u8, d18u8;
     uint8x8_t d19u8, d20u8, d21u8;
     int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
     uint32x2_t d0u32, d10u32;
     int64x1_t d0s64, d1s64, d2s64, d3s64;
@@ -906,22 +906,16 @@ unsigned int vp8_variance_halfpixvar16x1
     vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
 
     d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 8);
     d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
 
     return vget_lane_u32(d0u32, 0);
 }
 
-enum { kWidth8 = 8 };
-enum { kHeight8 = 8 };
-enum { kHeight8PlusOne = 9 };
-enum { kPixelStepOne = 1 };
-enum { kAlign16 = 16 };
-
 #define FILTER_BITS 7
 
 static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
   const int32x4_t a = vpaddlq_s16(v_16x8);
   const int64x2_t b = vpaddlq_s32(a);
   const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
                                vreinterpret_s32_s64(vget_high_s64(b)));
   return vget_lane_s32(c, 0);
@@ -963,18 +957,18 @@ static void variance_neon_w8(const uint8
   *sum = horizontal_add_s16x8(v_sum);
   *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
 }
 
 static unsigned int variance8x8_neon(const uint8_t *a, int a_stride,
                                      const uint8_t *b, int b_stride,
                                      unsigned int *sse) {
   int sum;
-  variance_neon_w8(a, a_stride, b, b_stride, kWidth8, kHeight8, sse, &sum);
-  return *sse - (((int64_t)sum * sum) / (kWidth8 * kHeight8));
+  variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum);
+  return *sse - (((int64_t)sum * sum) / (8 * 8));
 }
 
 static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
                                       uint8_t *output_ptr,
                                       unsigned int src_pixels_per_line,
                                       int pixel_step,
                                       unsigned int output_height,
                                       unsigned int output_width,
@@ -998,26 +992,26 @@ static void var_filter_block2d_bil_w8(co
 unsigned int vp8_sub_pixel_variance8x8_neon(
         const unsigned char *src,
         int src_stride,
         int xoffset,
         int yoffset,
         const unsigned char *dst,
         int dst_stride,
         unsigned int *sse) {
-  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, temp2, kHeight8PlusOne * kWidth8);
-  DECLARE_ALIGNED_ARRAY(kAlign16, uint8_t, fdata3, kHeight8PlusOne * kWidth8);
+  DECLARE_ALIGNED(16, uint8_t, temp2[9 * 8]);
+  DECLARE_ALIGNED(16, uint8_t, fdata3[9 * 8]);
   if (xoffset == 0) {
-    var_filter_block2d_bil_w8(src, temp2, src_stride, kWidth8, kHeight8,
-                              kWidth8, bilinear_taps_coeff[yoffset]);
+    var_filter_block2d_bil_w8(src, temp2, src_stride, 8, 8,
+                              8, bilinear_taps_coeff[yoffset]);
   } else if (yoffset == 0) {
-    var_filter_block2d_bil_w8(src, temp2, src_stride, kPixelStepOne,
-                              kHeight8PlusOne, kWidth8,
+    var_filter_block2d_bil_w8(src, temp2, src_stride, 1,
+                              9, 8,
                               bilinear_taps_coeff[xoffset]);
   } else {
-    var_filter_block2d_bil_w8(src, fdata3, src_stride, kPixelStepOne,
-                              kHeight8PlusOne, kWidth8,
+    var_filter_block2d_bil_w8(src, fdata3, src_stride, 1,
+                              9, 8,
                               bilinear_taps_coeff[xoffset]);
-    var_filter_block2d_bil_w8(fdata3, temp2, kWidth8, kWidth8, kHeight8,
-                              kWidth8, bilinear_taps_coeff[yoffset]);
+    var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8,
+                              8, bilinear_taps_coeff[yoffset]);
   }
-  return variance8x8_neon(temp2, kWidth8, dst, dst_stride, sse);
+  return variance8x8_neon(temp2, 8, dst, dst_stride, sse);
 }
--- a/media/libvpx/vp8/common/arm/variance_arm.c
+++ b/media/libvpx/vp8/common/arm/variance_arm.c
@@ -4,20 +4,24 @@
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 #include "vpx_config.h"
-#include "vp8_rtcd.h"
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
 #include "vp8/common/variance.h"
 #include "vp8/common/filter.h"
 
+// TODO(johannkoenig): Move this to vpx_dsp or vp8/encoder
+#if CONFIG_VP8_ENCODER
+
 #if HAVE_MEDIA
 #include "vp8/common/arm/bilinearfilter_arm.h"
 
 unsigned int vp8_sub_pixel_variance8x8_armv6
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
@@ -35,18 +39,18 @@ unsigned int vp8_sub_pixel_variance8x8_a
     VFilter = vp8_bilinear_filters[yoffset];
 
     vp8_filter_block2d_bil_first_pass_armv6(src_ptr, first_pass,
                                             src_pixels_per_line,
                                             9, 8, HFilter);
     vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
                                              8, 8, 8, VFilter);
 
-    return vp8_variance8x8_armv6(second_pass, 8, dst_ptr,
-                                   dst_pixels_per_line, sse);
+    return vpx_variance8x8_media(second_pass, 8, dst_ptr,
+                                 dst_pixels_per_line, sse);
 }
 
 unsigned int vp8_sub_pixel_variance16x16_armv6
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
@@ -81,23 +85,23 @@ unsigned int vp8_sub_pixel_variance16x16
         VFilter = vp8_bilinear_filters[yoffset];
 
         vp8_filter_block2d_bil_first_pass_armv6(src_ptr, first_pass,
                                                 src_pixels_per_line,
                                                 17, 16, HFilter);
         vp8_filter_block2d_bil_second_pass_armv6(first_pass, second_pass,
                                                  16, 16, 16, VFilter);
 
-        var = vp8_variance16x16_armv6(second_pass, 16, dst_ptr,
-                                       dst_pixels_per_line, sse);
+        var = vpx_variance16x16_media(second_pass, 16, dst_ptr,
+                                      dst_pixels_per_line, sse);
     }
     return var;
 }
 
-#endif /* HAVE_MEDIA */
+#endif  // HAVE_MEDIA
 
 
 #if HAVE_NEON
 
 extern unsigned int vp8_sub_pixel_variance16x16_neon_func
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
@@ -124,9 +128,10 @@ unsigned int vp8_sub_pixel_variance16x16
   else if (xoffset == 0 && yoffset == 4)
     return vp8_variance_halfpixvar16x16_v_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   else if (xoffset == 4 && yoffset == 4)
     return vp8_variance_halfpixvar16x16_hv_neon(src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse);
   else
     return vp8_sub_pixel_variance16x16_neon_func(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
 }
 
-#endif
+#endif  // HAVE_NEON
+#endif  // CONFIG_VP8_ENCODER
--- a/media/libvpx/vp8/common/common.h
+++ b/media/libvpx/vp8/common/common.h
@@ -24,28 +24,28 @@ extern "C" {
 
 #define MIN(x, y) (((x) < (y)) ? (x) : (y))
 #define MAX(x, y) (((x) > (y)) ? (x) : (y))
 
 /* Only need this for fixed-size arrays, for structs just assign. */
 
 #define vp8_copy( Dest, Src) { \
         assert( sizeof( Dest) == sizeof( Src)); \
-        vpx_memcpy( Dest, Src, sizeof( Src)); \
+        memcpy( Dest, Src, sizeof( Src)); \
     }
 
 /* Use this for variably-sized arrays. */
 
 #define vp8_copy_array( Dest, Src, N) { \
         assert( sizeof( *Dest) == sizeof( *Src)); \
-        vpx_memcpy( Dest, Src, N * sizeof( *Src)); \
+        memcpy( Dest, Src, N * sizeof( *Src)); \
     }
 
-#define vp8_zero( Dest)  vpx_memset( &Dest, 0, sizeof( Dest));
+#define vp8_zero( Dest)  memset( &Dest, 0, sizeof( Dest));
 
-#define vp8_zero_array( Dest, N)  vpx_memset( Dest, 0, N * sizeof( *Dest));
+#define vp8_zero_array( Dest, N)  memset( Dest, 0, N * sizeof( *Dest));
 
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
 #endif  // VP8_COMMON_COMMON_H_
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/copy_c.c
@@ -0,0 +1,32 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <string.h>
+
+#include "./vp8_rtcd.h"
+#include "vpx/vpx_integer.h"
+
+/* Copy 2 macroblocks to a buffer */
+void vp8_copy32xn_c(const unsigned char *src_ptr, int src_stride,
+                    unsigned char *dst_ptr, int dst_stride,
+                    int height)
+{
+    int r;
+
+    for (r = 0; r < height; r++)
+    {
+        memcpy(dst_ptr, src_ptr, 32);
+
+        src_ptr += src_stride;
+        dst_ptr += dst_stride;
+
+    }
+}
--- a/media/libvpx/vp8/common/debugmodes.c
+++ b/media/libvpx/vp8/common/debugmodes.c
@@ -76,17 +76,16 @@ void vp8_print_modes_and_motion_vectors(
 
         mb_index++;
         fprintf(mvs, "\n");
     }
 
     fprintf(mvs, "\n");
 
     /* print out the block modes */
-    mb_index = 0;
     fprintf(mvs, "Mbs for Frame %d\n", frame);
     {
         int b_row;
 
         for (b_row = 0; b_row < 4 * rows; b_row++)
         {
             int b_col;
             int bindex;
@@ -124,17 +123,16 @@ void vp8_print_modes_and_motion_vectors(
         mb_index++;
         fprintf(mvs, "\n");
     }
 
     fprintf(mvs, "\n");
 
 
     /* print out the block modes */
-    mb_index = 0;
     fprintf(mvs, "MVs for Frame %d\n", frame);
     {
         int b_row;
 
         for (b_row = 0; b_row < 4 * rows; b_row++)
         {
             int b_col;
             int bindex;
--- a/media/libvpx/vp8/common/dequantize.c
+++ b/media/libvpx/vp8/common/dequantize.c
@@ -33,11 +33,11 @@ void vp8_dequant_idct_add_c(short *input
 
     for (i = 0; i < 16; i++)
     {
         input[i] = dq[i] * input[i];
     }
 
     vp8_short_idct4x4llm_c(input, dest, stride, dest, stride);
 
-    vpx_memset(input, 0, 32);
+    memset(input, 0, 32);
 
 }
--- a/media/libvpx/vp8/common/entropy.c
+++ b/media/libvpx/vp8/common/entropy.c
@@ -178,12 +178,11 @@ const vp8_extra_bit_struct vp8_extra_bit
     { cat6, Pcat6, 11, 67},
     { 0, 0, 0, 0}
 };
 
 #include "default_coef_probs.h"
 
 void vp8_default_coef_probs(VP8_COMMON *pc)
 {
-    vpx_memcpy(pc->fc.coef_probs, default_coef_probs,
-                   sizeof(default_coef_probs));
+    memcpy(pc->fc.coef_probs, default_coef_probs, sizeof(default_coef_probs));
 }
 
--- a/media/libvpx/vp8/common/entropymode.c
+++ b/media/libvpx/vp8/common/entropymode.c
@@ -154,18 +154,18 @@ const vp8_tree_index vp8_small_mvtree [1
     -2, -3,
     10, 12,
     -4, -5,
     -6, -7
 };
 
 void vp8_init_mbmode_probs(VP8_COMMON *x)
 {
-    vpx_memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
-    vpx_memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
-    vpx_memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
+    memcpy(x->fc.ymode_prob, vp8_ymode_prob, sizeof(vp8_ymode_prob));
+    memcpy(x->fc.uv_mode_prob, vp8_uv_mode_prob, sizeof(vp8_uv_mode_prob));
+    memcpy(x->fc.sub_mv_ref_prob, sub_mv_ref_prob, sizeof(sub_mv_ref_prob));
 }
 
 void vp8_default_bmode_probs(vp8_prob p [VP8_BINTRAMODES-1])
 {
-    vpx_memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
+    memcpy(p, vp8_bmode_prob, sizeof(vp8_bmode_prob));
 }
 
--- a/media/libvpx/vp8/common/extend.c
+++ b/media/libvpx/vp8/common/extend.c
@@ -35,19 +35,19 @@ static void copy_and_extend_plane
     /* copy the left and right most columns out */
     src_ptr1 = s;
     src_ptr2 = s + w - 1;
     dest_ptr1 = d - el;
     dest_ptr2 = d + w;
 
     for (i = 0; i < h; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], el);
-        vpx_memcpy(dest_ptr1 + el, src_ptr1, w);
-        vpx_memset(dest_ptr2, src_ptr2[0], er);
+        memset(dest_ptr1, src_ptr1[0], el);
+        memcpy(dest_ptr1 + el, src_ptr1, w);
+        memset(dest_ptr2, src_ptr2[0], er);
         src_ptr1  += sp;
         src_ptr2  += sp;
         dest_ptr1 += dp;
         dest_ptr2 += dp;
     }
 
     /* Now copy the top and bottom lines into each line of the respective
      * borders
@@ -55,23 +55,23 @@ static void copy_and_extend_plane
     src_ptr1 = d - el;
     src_ptr2 = d + dp * (h - 1) - el;
     dest_ptr1 = d + dp * (-et) - el;
     dest_ptr2 = d + dp * (h) - el;
     linesize = el + er + w;
 
     for (i = 0; i < et; i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, linesize);
+        memcpy(dest_ptr1, src_ptr1, linesize);
         dest_ptr1 += dp;
     }
 
     for (i = 0; i < eb; i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, linesize);
+        memcpy(dest_ptr2, src_ptr2, linesize);
         dest_ptr2 += dp;
     }
 }
 
 
 void vp8_copy_and_extend_frame(YV12_BUFFER_CONFIG *src,
                                YV12_BUFFER_CONFIG *dst)
 {
--- a/media/libvpx/vp8/common/filter.c
+++ b/media/libvpx/vp8/common/filter.c
@@ -5,16 +5,17 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 
 #include "filter.h"
+#include "./vp8_rtcd.h"
 
 DECLARE_ALIGNED(16, const short, vp8_bilinear_filters[8][2]) =
 {
     { 128,   0 },
     { 112,  16 },
     {  96,  32 },
     {  80,  48 },
     {  64,  64 },
--- a/media/libvpx/vp8/common/generic/systemdependent.c
+++ b/media/libvpx/vp8/common/generic/systemdependent.c
@@ -12,16 +12,17 @@
 #include "vpx_config.h"
 #include "vp8_rtcd.h"
 #if ARCH_ARM
 #include "vpx_ports/arm.h"
 #elif ARCH_X86 || ARCH_X86_64
 #include "vpx_ports/x86.h"
 #endif
 #include "vp8/common/onyxc_int.h"
+#include "vp8/common/systemdependent.h"
 
 #if CONFIG_MULTITHREAD
 #if HAVE_UNISTD_H && !defined(__OS2__)
 #include <unistd.h>
 #elif defined(_WIN32)
 #include <windows.h>
 typedef void (WINAPI *PGNSI)(LPSYSTEM_INFO);
 #elif defined(__OS2__)
--- a/media/libvpx/vp8/common/idct_blk.c
+++ b/media/libvpx/vp8/common/idct_blk.c
@@ -28,17 +28,17 @@ void vp8_dequant_idct_add_y_block_c
     {
         for (j = 0; j < 4; j++)
         {
             if (*eobs++ > 1)
                 vp8_dequant_idct_add_c (q, dq, dst, stride);
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dst, stride, dst, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q   += 16;
             dst += 4;
         }
 
         dst += 4*stride - 16;
     }
@@ -54,17 +54,17 @@ void vp8_dequant_idct_add_uv_block_c
     {
         for (j = 0; j < 2; j++)
         {
             if (*eobs++ > 1)
                 vp8_dequant_idct_add_c (q, dq, dstu, stride);
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dstu, stride, dstu, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q    += 16;
             dstu += 4;
         }
 
         dstu += 4*stride - 8;
     }
@@ -73,17 +73,17 @@ void vp8_dequant_idct_add_uv_block_c
     {
         for (j = 0; j < 2; j++)
         {
             if (*eobs++ > 1)
                 vp8_dequant_idct_add_c (q, dq, dstv, stride);
             else
             {
                 vp8_dc_only_idct_add_c (q[0]*dq[0], dstv, stride, dstv, stride);
-                vpx_memset(q, 0, 2 * sizeof(q[0]));
+                memset(q, 0, 2 * sizeof(q[0]));
             }
 
             q    += 16;
             dstv += 4;
         }
 
         dstv += 4*stride - 8;
     }
--- a/media/libvpx/vp8/common/idctllm.c
+++ b/media/libvpx/vp8/common/idctllm.c
@@ -3,16 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "./vp8_rtcd.h"
 
 /****************************************************************************
  * Notes:
  *
  * This implementation makes use of 16 bit fixed point verio of two multiply
  * constants:
  *         1.   sqrt(2) * cos (pi/8)
  *         2.   sqrt(2) * sin (pi/8)
--- a/media/libvpx/vp8/common/loopfilter.c
+++ b/media/libvpx/vp8/common/loopfilter.c
@@ -77,21 +77,20 @@ void vp8_loop_filter_update_sharpness(lo
         {
             if (block_inside_limit > (9 - sharpness_lvl))
                 block_inside_limit = (9 - sharpness_lvl);
         }
 
         if (block_inside_limit < 1)
             block_inside_limit = 1;
 
-        vpx_memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
-        vpx_memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit),
-                SIMD_WIDTH);
-        vpx_memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
-                SIMD_WIDTH);
+        memset(lfi->lim[i], block_inside_limit, SIMD_WIDTH);
+        memset(lfi->blim[i], (2 * filt_lvl + block_inside_limit), SIMD_WIDTH);
+        memset(lfi->mblim[i], (2 * (filt_lvl + 2) + block_inside_limit),
+               SIMD_WIDTH);
     }
 }
 
 void vp8_loop_filter_init(VP8_COMMON *cm)
 {
     loop_filter_info_n *lfi = &cm->lf_info;
     int i;
 
@@ -100,17 +99,17 @@ void vp8_loop_filter_init(VP8_COMMON *cm
     cm->last_sharpness_level = cm->sharpness_level;
 
     /* init LUT for lvl  and hev thr picking */
     lf_init_lut(lfi);
 
     /* init hev threshold const vectors */
     for(i = 0; i < 4 ; i++)
     {
-        vpx_memset(lfi->hev_thr[i], i, SIMD_WIDTH);
+        memset(lfi->hev_thr[i], i, SIMD_WIDTH);
     }
 }
 
 void vp8_loop_filter_frame_init(VP8_COMMON *cm,
                                 MACROBLOCKD *mbd,
                                 int default_filt_lvl)
 {
     int seg,  /* segment number */
@@ -146,17 +145,17 @@ void vp8_loop_filter_frame_init(VP8_COMM
             }
         }
 
         if (!mbd->mode_ref_lf_delta_enabled)
         {
             /* we could get rid of this if we assume that deltas are set to
              * zero when not in use; encoder always uses deltas
              */
-            vpx_memset(lfi->lvl[seg][0], lvl_seg, 4 * 4 );
+            memset(lfi->lvl[seg][0], lvl_seg, 4 * 4 );
             continue;
         }
 
         /* INTRA_FRAME */
         ref = INTRA_FRAME;
 
         /* Apply delta for reference frame */
         lvl_ref = lvl_seg + mbd->ref_lf_deltas[ref];
--- a/media/libvpx/vp8/common/mfqe.c
+++ b/media/libvpx/vp8/common/mfqe.c
@@ -12,20 +12,21 @@
 /* MFQE: Multiframe Quality Enhancement
  * In rate limited situations keyframes may cause significant visual artifacts
  * commonly referred to as "popping." This file implements a postproccesing
  * algorithm which blends data from the preceeding frame when there is no
  * motion and the q from the previous frame is lower which indicates that it is
  * higher quality.
  */
 
-#include "postproc.h"
-#include "variance.h"
+#include "./vp8_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vp8/common/postproc.h"
+#include "vp8/common/variance.h"
 #include "vpx_mem/vpx_mem.h"
-#include "vp8_rtcd.h"
 #include "vpx_scale/yv12config.h"
 
 #include <limits.h>
 #include <stdlib.h>
 
 static void filter_by_weight(unsigned char *src, int src_stride,
                              unsigned char *dst, int dst_stride,
                              int block_size, int src_weight)
@@ -145,46 +146,46 @@ static void multiframe_quality_enhance_b
     unsigned char *udp;
     unsigned char *vp;
     unsigned char *vdp;
 
     unsigned int act, actd, sad, usad, vsad, sse, thr, thrsq, actrisk;
 
     if (blksize == 16)
     {
-        actd = (vp8_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
-        act = (vp8_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8;
+        actd = (vpx_variance16x16(yd, yd_stride, VP8_ZEROS, 0, &sse)+128)>>8;
+        act = (vpx_variance16x16(y, y_stride, VP8_ZEROS, 0, &sse)+128)>>8;
 #ifdef USE_SSD
-        sad = (vp8_variance16x16(y, y_stride, yd, yd_stride, &sse));
+        vpx_variance16x16(y, y_stride, yd, yd_stride, &sse);
         sad = (sse + 128)>>8;
-        usad = (vp8_variance8x8(u, uv_stride, ud, uvd_stride, &sse));
+        vpx_variance8x8(u, uv_stride, ud, uvd_stride, &sse);
         usad = (sse + 32)>>6;
-        vsad = (vp8_variance8x8(v, uv_stride, vd, uvd_stride, &sse));
+        vpx_variance8x8(v, uv_stride, vd, uvd_stride, &sse);
         vsad = (sse + 32)>>6;
 #else
-        sad = (vp8_sad16x16(y, y_stride, yd, yd_stride, UINT_MAX) + 128) >> 8;
-        usad = (vp8_sad8x8(u, uv_stride, ud, uvd_stride, UINT_MAX) + 32) >> 6;
-        vsad = (vp8_sad8x8(v, uv_stride, vd, uvd_stride, UINT_MAX)+ 32) >> 6;
+        sad = (vpx_sad16x16(y, y_stride, yd, yd_stride) + 128) >> 8;
+        usad = (vpx_sad8x8(u, uv_stride, ud, uvd_stride) + 32) >> 6;
+        vsad = (vpx_sad8x8(v, uv_stride, vd, uvd_stride)+ 32) >> 6;
 #endif
     }
     else /* if (blksize == 8) */
     {
-        actd = (vp8_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
-        act = (vp8_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6;
+        actd = (vpx_variance8x8(yd, yd_stride, VP8_ZEROS, 0, &sse)+32)>>6;
+        act = (vpx_variance8x8(y, y_stride, VP8_ZEROS, 0, &sse)+32)>>6;
 #ifdef USE_SSD
-        sad = (vp8_variance8x8(y, y_stride, yd, yd_stride, &sse));
+        vpx_variance8x8(y, y_stride, yd, yd_stride, &sse);
         sad = (sse + 32)>>6;
-        usad = (vp8_variance4x4(u, uv_stride, ud, uvd_stride, &sse));
+        vpx_variance4x4(u, uv_stride, ud, uvd_stride, &sse);
         usad = (sse + 8)>>4;
-        vsad = (vp8_variance4x4(v, uv_stride, vd, uvd_stride, &sse));
+        vpx_variance4x4(v, uv_stride, vd, uvd_stride, &sse);
         vsad = (sse + 8)>>4;
 #else
-        sad = (vp8_sad8x8(y, y_stride, yd, yd_stride, UINT_MAX) + 32) >> 6;
-        usad = (vp8_sad4x4(u, uv_stride, ud, uvd_stride, UINT_MAX) + 8) >> 4;
-        vsad = (vp8_sad4x4(v, uv_stride, vd, uvd_stride, UINT_MAX) + 8) >> 4;
+        sad = (vpx_sad8x8(y, y_stride, yd, yd_stride) + 32) >> 6;
+        usad = (vpx_sad4x4(u, uv_stride, ud, uvd_stride) + 8) >> 4;
+        vsad = (vpx_sad4x4(v, uv_stride, vd, uvd_stride) + 8) >> 4;
 #endif
     }
 
     actrisk = (actd > act * 5);
 
     /* thr = qdiff/16 + log2(act) + log4(qprev) */
     thr = (qdiff >> 4);
     while (actd >>= 1) thr++;
@@ -226,19 +227,19 @@ static void multiframe_quality_enhance_b
             vp8_copy_mem16x16(y, y_stride, yd, yd_stride);
             vp8_copy_mem8x8(u, uv_stride, ud, uvd_stride);
             vp8_copy_mem8x8(v, uv_stride, vd, uvd_stride);
         }
         else  /* if (blksize == 8) */
         {
             vp8_copy_mem8x8(y, y_stride, yd, yd_stride);
             for (up = u, udp = ud, i = 0; i < uvblksize; ++i, up += uv_stride, udp += uvd_stride)
-                vpx_memcpy(udp, up, uvblksize);
+                memcpy(udp, up, uvblksize);
             for (vp = v, vdp = vd, i = 0; i < uvblksize; ++i, vp += uv_stride, vdp += uvd_stride)
-                vpx_memcpy(vdp, vp, uvblksize);
+                memcpy(vdp, vp, uvblksize);
         }
     }
 }
 
 static int qualify_inter_mb(const MODE_INFO *mode_info_context, int *map)
 {
     if (mode_info_context->mbmi.mb_skip_coeff)
         map[0] = map[1] = map[2] = map[3] = 1;
@@ -336,18 +337,18 @@ void vp8_multiframe_quality_enhance
                                 unsigned char *udp = ud_ptr + 4*(i*dest->uv_stride+j);
                                 unsigned char *vp = v_ptr + 4*(i*show->uv_stride+j);
                                 unsigned char *vdp = vd_ptr + 4*(i*dest->uv_stride+j);
                                 vp8_copy_mem8x8(y_ptr + 8*(i*show->y_stride+j), show->y_stride,
                                                 yd_ptr + 8*(i*dest->y_stride+j), dest->y_stride);
                                 for (k = 0; k < 4; ++k, up += show->uv_stride, udp += dest->uv_stride,
                                                         vp += show->uv_stride, vdp += dest->uv_stride)
                                 {
-                                    vpx_memcpy(udp, up, 4);
-                                    vpx_memcpy(vdp, vp, 4);
+                                    memcpy(udp, up, 4);
+                                    memcpy(vdp, vp, 4);
                                 }
                             }
                         }
                 }
                 else /* totmap = 4 */
                 {
                     multiframe_quality_enhance_block(16, qcurr, qprev, y_ptr,
                                                      u_ptr, v_ptr,
--- a/media/libvpx/vp8/common/postproc.c
+++ b/media/libvpx/vp8/common/postproc.c
@@ -350,18 +350,18 @@ void vp8_deblock(VP8_COMMON             
             {
                 unsigned char mb_ppl;
 
                 if (mode_info_context->mbmi.mb_skip_coeff)
                     mb_ppl = (unsigned char)ppl >> 1;
                 else
                     mb_ppl = (unsigned char)ppl;
 
-                vpx_memset(ylptr, mb_ppl, 16);
-                vpx_memset(uvlptr, mb_ppl, 8);
+                memset(ylptr, mb_ppl, 16);
+                memset(uvlptr, mb_ppl, 8);
 
                 ylptr += 16;
                 uvlptr += 8;
                 mode_info_context++;
             }
             mode_info_context++;
 
             vp8_post_proc_down_and_across_mb_row(
@@ -398,17 +398,17 @@ void vp8_de_noise(VP8_COMMON            
     int ppl = (int)(level + .5);
     int mb_rows = cm->mb_rows;
     int mb_cols = cm->mb_cols;
     unsigned char *limits = cm->pp_limits_buffer;;
     (void) post;
     (void) low_var_thresh;
     (void) flag;
 
-    vpx_memset(limits, (unsigned char)ppl, 16 * mb_cols);
+    memset(limits, (unsigned char)ppl, 16 * mb_cols);
 
     /* TODO: The original code don't filter the 2 outer rows and columns. */
     for (mbr = 0; mbr < mb_rows; mbr++)
     {
         vp8_post_proc_down_and_across_mb_row(
             source->y_buffer + 16 * mbr * source->y_stride,
             source->y_buffer + 16 * mbr * source->y_stride,
             source->y_stride, source->y_stride, source->y_width, limits, 16);
@@ -422,17 +422,17 @@ void vp8_de_noise(VP8_COMMON            
               source->v_buffer + 8 * mbr * source->uv_stride,
               source->v_buffer + 8 * mbr * source->uv_stride,
               source->uv_stride, source->uv_stride, source->uv_width, limits,
               8);
         }
     }
 }
 
-double vp8_gaussian(double sigma, double mu, double x)
+static double gaussian(double sigma, double mu, double x)
 {
     return 1 / (sigma * sqrt(2.0 * 3.14159265)) *
            (exp(-(x - mu) * (x - mu) / (2 * sigma * sigma)));
 }
 
 static void fillrd(struct postproc_state *state, int q, int a)
 {
     char char_dist[300];
@@ -450,17 +450,17 @@ static void fillrd(struct postproc_state
      */
     {
         int next, j;
 
         next = 0;
 
         for (i = -32; i < 32; i++)
         {
-            const int v = (int)(.5 + 256 * vp8_gaussian(sigma, 0, i));
+            const int v = (int)(.5 + 256 * gaussian(sigma, 0, i));
 
             if (v)
             {
                 for (j = 0; j < v; j++)
                 {
                     char_dist[next+j] = (char) i;
                 }
 
@@ -758,17 +758,17 @@ int vp8_post_proc_frame(VP8_COMMON *oci,
                 vpx_internal_error(&oci->error, VPX_CODEC_MEM_ERROR,
                                    "Failed to allocate MFQE framebuffer");
 
             oci->post_proc_buffer_int_used = 1;
 
             /* insure that postproc is set to all 0's so that post proc
              * doesn't pull random data in from edge
              */
-            vpx_memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size);
+            memset((&oci->post_proc_buffer_int)->buffer_alloc,128,(&oci->post_proc_buffer)->frame_size);
 
         }
     }
 
     vp8_clear_system_state();
 
     if ((flags & VP8D_MFQE) &&
          oci->postproc_state.last_frame_valid &&
--- a/media/libvpx/vp8/common/reconinter.c
+++ b/media/libvpx/vp8/common/reconinter.c
@@ -5,16 +5,18 @@
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
 
 #include <limits.h>
+#include <string.h>
+
 #include "vpx_config.h"
 #include "vp8_rtcd.h"
 #include "vpx/vpx_integer.h"
 #include "blockd.h"
 #include "reconinter.h"
 #if CONFIG_RUNTIME_CPU_DETECT
 #include "onyxc_int.h"
 #endif
@@ -25,41 +27,18 @@ void vp8_copy_mem16x16_c(
     unsigned char *dst,
     int dst_stride)
 {
 
     int r;
 
     for (r = 0; r < 16; r++)
     {
-#if !(CONFIG_FAST_UNALIGNED)
-        dst[0] = src[0];
-        dst[1] = src[1];
-        dst[2] = src[2];
-        dst[3] = src[3];
-        dst[4] = src[4];
-        dst[5] = src[5];
-        dst[6] = src[6];
-        dst[7] = src[7];
-        dst[8] = src[8];
-        dst[9] = src[9];
-        dst[10] = src[10];
-        dst[11] = src[11];
-        dst[12] = src[12];
-        dst[13] = src[13];
-        dst[14] = src[14];
-        dst[15] = src[15];
+        memcpy(dst, src, 16);
 
-#else
-        ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
-        ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
-        ((uint32_t *)dst)[2] = ((uint32_t *)src)[2] ;
-        ((uint32_t *)dst)[3] = ((uint32_t *)src)[3] ;
-
-#endif
         src += src_stride;
         dst += dst_stride;
 
     }
 
 }
 
 void vp8_copy_mem8x8_c(
@@ -67,29 +46,18 @@ void vp8_copy_mem8x8_c(
     int src_stride,
     unsigned char *dst,
     int dst_stride)
 {
     int r;
 
     for (r = 0; r < 8; r++)
     {
-#if !(CONFIG_FAST_UNALIGNED)
-        dst[0] = src[0];
-        dst[1] = src[1];
-        dst[2] = src[2];
-        dst[3] = src[3];
-        dst[4] = src[4];
-        dst[5] = src[5];
-        dst[6] = src[6];
-        dst[7] = src[7];
-#else
-        ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
-        ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
-#endif
+        memcpy(dst, src, 8);
+
         src += src_stride;
         dst += dst_stride;
 
     }
 
 }
 
 void vp8_copy_mem8x4_c(
@@ -97,29 +65,18 @@ void vp8_copy_mem8x4_c(
     int src_stride,
     unsigned char *dst,
     int dst_stride)
 {
     int r;
 
     for (r = 0; r < 4; r++)
     {
-#if !(CONFIG_FAST_UNALIGNED)
-        dst[0] = src[0];
-        dst[1] = src[1];
-        dst[2] = src[2];
-        dst[3] = src[3];
-        dst[4] = src[4];
-        dst[5] = src[5];
-        dst[6] = src[6];
-        dst[7] = src[7];
-#else
-        ((uint32_t *)dst)[0] = ((uint32_t *)src)[0] ;
-        ((uint32_t *)dst)[1] = ((uint32_t *)src)[1] ;
-#endif
+        memcpy(dst, src, 8);
+
         src += src_stride;
         dst += dst_stride;
 
     }
 
 }
 
 
--- a/media/libvpx/vp8/common/reconintra.c
+++ b/media/libvpx/vp8/common/reconintra.c
@@ -65,20 +65,20 @@ void vp8_build_intra_predictors_mby_s_c(
             shift = 3 + x->up_available + x->left_available;
             expected_dc = (average + (1 << (shift - 1))) >> shift;
         }
         else
         {
             expected_dc = 128;
         }
 
-        /*vpx_memset(ypred_ptr, expected_dc, 256);*/
+        /*memset(ypred_ptr, expected_dc, 256);*/
         for (r = 0; r < 16; r++)
         {
-            vpx_memset(ypred_ptr, expected_dc, 16);
+            memset(ypred_ptr, expected_dc, 16);
             ypred_ptr += y_stride;
         }
     }
     break;
     case V_PRED:
     {
 
         for (r = 0; r < 16; r++)
@@ -93,17 +93,17 @@ void vp8_build_intra_predictors_mby_s_c(
     }
     break;
     case H_PRED:
     {
 
         for (r = 0; r < 16; r++)
         {
 
-            vpx_memset(ypred_ptr, yleft_col[r], 16);
+            memset(ypred_ptr, yleft_col[r], 16);
             ypred_ptr += y_stride;
         }
 
     }
     break;
     case TM_PRED:
     {
 
@@ -197,45 +197,45 @@ void vp8_build_intra_predictors_mbuv_s_c
         else
         {
             shift = 2 + x->up_available + x->left_available;
             expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
             expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
         }
 
 
-        /*vpx_memset(upred_ptr,expected_udc,64);*/
-        /*vpx_memset(vpred_ptr,expected_vdc,64);*/
+        /*memset(upred_ptr,expected_udc,64);*/
+        /*memset(vpred_ptr,expected_vdc,64);*/
         for (i = 0; i < 8; i++)
         {
-            vpx_memset(upred_ptr, expected_udc, 8);
-            vpx_memset(vpred_ptr, expected_vdc, 8);
+            memset(upred_ptr, expected_udc, 8);
+            memset(vpred_ptr, expected_vdc, 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
     }
     break;
     case V_PRED:
     {
         for (i = 0; i < 8; i++)
         {
-            vpx_memcpy(upred_ptr, uabove_row, 8);
-            vpx_memcpy(vpred_ptr, vabove_row, 8);
+            memcpy(upred_ptr, uabove_row, 8);
+            memcpy(vpred_ptr, vabove_row, 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
 
     }
     break;
     case H_PRED:
     {
         for (i = 0; i < 8; i++)
         {
-            vpx_memset(upred_ptr, uleft_col[i], 8);
-            vpx_memset(vpred_ptr, vleft_col[i], 8);
+            memset(upred_ptr, uleft_col[i], 8);
+            memset(vpred_ptr, vleft_col[i], 8);
             upred_ptr += pred_stride;
             vpred_ptr += pred_stride;
         }
     }
 
     break;
     case TM_PRED:
     {
--- a/media/libvpx/vp8/common/rtcd.c
+++ b/media/libvpx/vp8/common/rtcd.c
@@ -2,20 +2,18 @@
  *  Copyright (c) 2011 The WebM project authors. All Rights Reserved.
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
-#include "vpx_config.h"
+#include "./vpx_config.h"
 #define RTCD_C
-#include "vp8_rtcd.h"
+#include "./vp8_rtcd.h"
 #include "vpx_ports/vpx_once.h"
 
-extern void vpx_scale_rtcd(void);
 
 void vp8_rtcd()
 {
-    vpx_scale_rtcd();
     once(setup_rtcd_internal);
 }
deleted file mode 100644
--- a/media/libvpx/vp8/common/sad_c.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-
-#include <limits.h>
-#include <stdlib.h>
-#include "vpx_config.h"
-#include "vpx/vpx_integer.h"
-
-static unsigned int sad_mx_n_c(const unsigned char *src_ptr, int src_stride,
-                               const unsigned char *ref_ptr, int ref_stride,
-                               unsigned int max_sad, int m, int n)
-{
-    int r, c;
-    unsigned int sad = 0;
-
-    for (r = 0; r < n; r++)
-    {
-        for (c = 0; c < m; c++)
-        {
-            sad += abs(src_ptr[c] - ref_ptr[c]);
-        }
-
-        if (sad > max_sad)
-          break;
-
-        src_ptr += src_stride;
-        ref_ptr += ref_stride;
-    }
-
-    return sad;
-}
-
-/* max_sad is provided as an optional optimization point. Alternative
- * implementations of these functions are not required to check it.
- */
-
-unsigned int vp8_sad16x16_c(const unsigned char *src_ptr, int src_stride,
-                            const unsigned char *ref_ptr, int ref_stride,
-                            unsigned int max_sad)
-{
-    return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, max_sad, 16, 16);
-}
-
-unsigned int vp8_sad8x8_c(const unsigned char *src_ptr, int src_stride,
-                          const unsigned char *ref_ptr, int ref_stride,
-                          unsigned int max_sad)
-{
-    return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, max_sad, 8, 8);
-}
-
-unsigned int vp8_sad16x8_c(const unsigned char *src_ptr, int src_stride,
-                           const unsigned char *ref_ptr, int ref_stride,
-                           unsigned int max_sad)
-{
-    return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, max_sad, 16, 8);
-
-}
-
-unsigned int vp8_sad8x16_c(const unsigned char *src_ptr, int src_stride,
-                           const unsigned char *ref_ptr, int ref_stride,
-                           unsigned int max_sad)
-{
-    return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, max_sad, 8, 16);
-}
-
-unsigned int vp8_sad4x4_c(const unsigned char *src_ptr, int src_stride,
-                          const unsigned char *ref_ptr, int ref_stride,
-                          unsigned int max_sad)
-{
-    return sad_mx_n_c(src_ptr, src_stride, ref_ptr, ref_stride, max_sad, 4, 4);
-}
-
-void vp8_sad16x16x3_c(const unsigned char *src_ptr, int src_stride,
-                      const unsigned char *ref_ptr, int ref_stride,
-                      unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-}
-
-void vp8_sad16x16x8_c(const unsigned char *src_ptr, int src_stride,
-                      const unsigned char *ref_ptr, int ref_stride,
-                      unsigned short *sad_array)
-{
-    sad_array[0] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-    sad_array[3] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, UINT_MAX);
-    sad_array[4] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, UINT_MAX);
-    sad_array[5] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, UINT_MAX);
-    sad_array[6] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, UINT_MAX);
-    sad_array[7] = (unsigned short)vp8_sad16x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, UINT_MAX);
-}
-
-void vp8_sad16x8x3_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char *ref_ptr, int ref_stride,
-                     unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-}
-
-void vp8_sad16x8x8_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char *ref_ptr, int ref_stride,
-                     unsigned short *sad_array)
-{
-    sad_array[0] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-    sad_array[3] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, UINT_MAX);
-    sad_array[4] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, UINT_MAX);
-    sad_array[5] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, UINT_MAX);
-    sad_array[6] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, UINT_MAX);
-    sad_array[7] = (unsigned short)vp8_sad16x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x8x3_c(const unsigned char *src_ptr, int src_stride,
-                    const unsigned char *ref_ptr, int ref_stride,
-                    unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x8x8_c(const unsigned char *src_ptr, int src_stride,
-                    const unsigned char *ref_ptr, int ref_stride,
-                    unsigned short *sad_array)
-{
-    sad_array[0] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-    sad_array[3] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, UINT_MAX);
-    sad_array[4] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, UINT_MAX);
-    sad_array[5] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, UINT_MAX);
-    sad_array[6] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, UINT_MAX);
-    sad_array[7] = (unsigned short)vp8_sad8x8_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x16x3_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char *ref_ptr, int ref_stride,
-                     unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x16x8_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char *ref_ptr, int ref_stride,
-                     unsigned short *sad_array)
-{
-    sad_array[0] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-    sad_array[3] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, UINT_MAX);
-    sad_array[4] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, UINT_MAX);
-    sad_array[5] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, UINT_MAX);
-    sad_array[6] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, UINT_MAX);
-    sad_array[7] = (unsigned short)vp8_sad8x16_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, UINT_MAX);
-}
-
-void vp8_sad4x4x3_c(const unsigned char *src_ptr, int src_stride,
-                    const unsigned char *ref_ptr, int ref_stride,
-                    unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-}
-
-void vp8_sad4x4x8_c(const unsigned char *src_ptr, int src_stride,
-                    const unsigned char *ref_ptr, int ref_stride,
-                    unsigned short *sad_array)
-{
-    sad_array[0] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 0, ref_stride, UINT_MAX);
-    sad_array[1] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 1, ref_stride, UINT_MAX);
-    sad_array[2] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 2, ref_stride, UINT_MAX);
-    sad_array[3] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 3, ref_stride, UINT_MAX);
-    sad_array[4] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 4, ref_stride, UINT_MAX);
-    sad_array[5] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 5, ref_stride, UINT_MAX);
-    sad_array[6] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 6, ref_stride, UINT_MAX);
-    sad_array[7] = (unsigned short)vp8_sad4x4_c(src_ptr, src_stride, ref_ptr + 7, ref_stride, UINT_MAX);
-}
-
-void vp8_sad16x16x4d_c(const unsigned char *src_ptr, int src_stride,
-                       const unsigned char * const ref_ptr[], int ref_stride,
-                       unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, UINT_MAX);
-    sad_array[3] = vp8_sad16x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, UINT_MAX);
-}
-
-void vp8_sad16x8x4d_c(const unsigned char *src_ptr, int src_stride,
-                      const unsigned char * const ref_ptr[], int ref_stride,
-                      unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, UINT_MAX);
-    sad_array[3] = vp8_sad16x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x8x4d_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char * const ref_ptr[], int ref_stride,
-                     unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[0], ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[1], ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[2], ref_stride, UINT_MAX);
-    sad_array[3] = vp8_sad8x8_c(src_ptr, src_stride, ref_ptr[3], ref_stride, UINT_MAX);
-}
-
-void vp8_sad8x16x4d_c(const unsigned char *src_ptr, int src_stride,
-                      const unsigned char * const ref_ptr[], int ref_stride,
-                      unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[0], ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[1], ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[2], ref_stride, UINT_MAX);
-    sad_array[3] = vp8_sad8x16_c(src_ptr, src_stride, ref_ptr[3], ref_stride, UINT_MAX);
-}
-
-void vp8_sad4x4x4d_c(const unsigned char *src_ptr, int src_stride,
-                     const unsigned char * const ref_ptr[], int  ref_stride,
-                     unsigned int *sad_array)
-{
-    sad_array[0] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[0], ref_stride, UINT_MAX);
-    sad_array[1] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[1], ref_stride, UINT_MAX);
-    sad_array[2] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[2], ref_stride, UINT_MAX);
-    sad_array[3] = vp8_sad4x4_c(src_ptr, src_stride, ref_ptr[3], ref_stride, UINT_MAX);
-}
-
-/* Copy 2 macroblocks to a buffer */
-void vp8_copy32xn_c(unsigned char *src_ptr, int src_stride,
-                    unsigned char *dst_ptr, int dst_stride,
-                    int height)
-{
-    int r;
-
-    for (r = 0; r < height; r++)
-    {
-#if !(CONFIG_FAST_UNALIGNED)
-        dst_ptr[0] = src_ptr[0];
-        dst_ptr[1] = src_ptr[1];
-        dst_ptr[2] = src_ptr[2];
-        dst_ptr[3] = src_ptr[3];
-        dst_ptr[4] = src_ptr[4];
-        dst_ptr[5] = src_ptr[5];
-        dst_ptr[6] = src_ptr[6];
-        dst_ptr[7] = src_ptr[7];
-        dst_ptr[8] = src_ptr[8];
-        dst_ptr[9] = src_ptr[9];
-        dst_ptr[10] = src_ptr[10];
-        dst_ptr[11] = src_ptr[11];
-        dst_ptr[12] = src_ptr[12];
-        dst_ptr[13] = src_ptr[13];
-        dst_ptr[14] = src_ptr[14];
-        dst_ptr[15] = src_ptr[15];
-        dst_ptr[16] = src_ptr[16];
-        dst_ptr[17] = src_ptr[17];
-        dst_ptr[18] = src_ptr[18];
-        dst_ptr[19] = src_ptr[19];
-        dst_ptr[20] = src_ptr[20];
-        dst_ptr[21] = src_ptr[21];
-        dst_ptr[22] = src_ptr[22];
-        dst_ptr[23] = src_ptr[23];
-        dst_ptr[24] = src_ptr[24];
-        dst_ptr[25] = src_ptr[25];
-        dst_ptr[26] = src_ptr[26];
-        dst_ptr[27] = src_ptr[27];
-        dst_ptr[28] = src_ptr[28];
-        dst_ptr[29] = src_ptr[29];
-        dst_ptr[30] = src_ptr[30];
-        dst_ptr[31] = src_ptr[31];
-#else
-        ((uint32_t *)dst_ptr)[0] = ((uint32_t *)src_ptr)[0] ;
-        ((uint32_t *)dst_ptr)[1] = ((uint32_t *)src_ptr)[1] ;
-        ((uint32_t *)dst_ptr)[2] = ((uint32_t *)src_ptr)[2] ;
-        ((uint32_t *)dst_ptr)[3] = ((uint32_t *)src_ptr)[3] ;
-        ((uint32_t *)dst_ptr)[4] = ((uint32_t *)src_ptr)[4] ;
-        ((uint32_t *)dst_ptr)[5] = ((uint32_t *)src_ptr)[5] ;
-        ((uint32_t *)dst_ptr)[6] = ((uint32_t *)src_ptr)[6] ;
-        ((uint32_t *)dst_ptr)[7] = ((uint32_t *)src_ptr)[7] ;
-#endif
-        src_ptr += src_stride;
-        dst_ptr += dst_stride;
-
-    }
-}
--- a/media/libvpx/vp8/common/setupintrarecon.c
+++ b/media/libvpx/vp8/common/setupintrarecon.c
@@ -12,28 +12,28 @@
 #include "setupintrarecon.h"
 #include "vpx_mem/vpx_mem.h"
 
 void vp8_setup_intra_recon(YV12_BUFFER_CONFIG *ybf)
 {
     int i;
 
     /* set up frame new frame for intra coded blocks */
-    vpx_memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+    memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
     for (i = 0; i < ybf->y_height; i++)
         ybf->y_buffer[ybf->y_stride *i - 1] = (unsigned char) 129;
 
-    vpx_memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
     for (i = 0; i < ybf->uv_height; i++)
         ybf->u_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129;
 
-    vpx_memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
     for (i = 0; i < ybf->uv_height; i++)
         ybf->v_buffer[ybf->uv_stride *i - 1] = (unsigned char) 129;
 
 }
 
 void vp8_setup_intra_recon_top_line(YV12_BUFFER_CONFIG *ybf)
 {
-    vpx_memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
-    vpx_memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
-    vpx_memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->y_buffer - 1 - ybf->y_stride, 127, ybf->y_width + 5);
+    memset(ybf->u_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
+    memset(ybf->v_buffer - 1 - ybf->uv_stride, 127, ybf->uv_width + 5);
 }
--- a/media/libvpx/vp8/common/variance.h
+++ b/media/libvpx/vp8/common/variance.h
@@ -9,60 +9,52 @@
  */
 
 
 #ifndef VP8_COMMON_VARIANCE_H_
 #define VP8_COMMON_VARIANCE_H_
 
 #include "vpx_config.h"
 
+#include "vpx/vpx_integer.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-typedef unsigned int(*vp8_sad_fn_t)(
-    const unsigned char *src_ptr,
+typedef unsigned int(*vpx_sad_fn_t)(
+    const uint8_t *src_ptr,
     int source_stride,
-    const unsigned char *ref_ptr,
-    int ref_stride,
-    unsigned int max_sad);
+    const uint8_t *ref_ptr,
+    int ref_stride);
 
 typedef void (*vp8_copy32xn_fn_t)(
     const unsigned char *src_ptr,
     int source_stride,
-    const unsigned char *ref_ptr,
+    unsigned char *ref_ptr,
     int ref_stride,
     int n);
 
-typedef void (*vp8_sad_multi_fn_t)(
+typedef void (*vpx_sad_multi_fn_t)(
     const unsigned char *src_ptr,
     int source_stride,
-    const unsigned char *ref_ptr,
+    const unsigned char *ref_array,
     int  ref_stride,
     unsigned int *sad_array);
 
-typedef void (*vp8_sad_multi1_fn_t)
+typedef void (*vpx_sad_multi_d_fn_t)
     (
      const unsigned char *src_ptr,
      int source_stride,
-     const unsigned char *ref_ptr,
-     int  ref_stride,
-     unsigned short *sad_array
-    );
-
-typedef void (*vp8_sad_multi_d_fn_t)
-    (
-     const unsigned char *src_ptr,
-     int source_stride,
-     const unsigned char * const ref_ptr[],
+     const unsigned char * const ref_array[],
      int  ref_stride,
      unsigned int *sad_array
     );
 
-typedef unsigned int (*vp8_variance_fn_t)
+typedef unsigned int (*vpx_variance_fn_t)
     (
      const unsigned char *src_ptr,
      int source_stride,
      const unsigned char *ref_ptr,
      int  ref_stride,
      unsigned int *sse
     );
 
@@ -72,50 +64,27 @@ typedef unsigned int (*vp8_subpixvarianc
       int  source_stride,
       int  xoffset,
       int  yoffset,
       const unsigned char *ref_ptr,
       int Refstride,
       unsigned int *sse
     );
 
-typedef void (*vp8_ssimpf_fn_t)
-      (
-        unsigned char *s,
-        int sp,
-        unsigned char *r,
-        int rp,
-        unsigned long *sum_s,
-        unsigned long *sum_r,
-        unsigned long *sum_sq_s,
-        unsigned long *sum_sq_r,
-        unsigned long *sum_sxr
-      );
-
-typedef unsigned int (*vp8_getmbss_fn_t)(const short *);
-
-typedef unsigned int (*vp8_get16x16prederror_fn_t)
-    (
-     const unsigned char *src_ptr,
-     int source_stride,
-     const unsigned char *ref_ptr,
-     int  ref_stride
-    );
-
 typedef struct variance_vtable
 {
-    vp8_sad_fn_t            sdf;
-    vp8_variance_fn_t       vf;
+    vpx_sad_fn_t            sdf;
+    vpx_variance_fn_t       vf;
     vp8_subpixvariance_fn_t svf;
-    vp8_variance_fn_t       svf_halfpix_h;
-    vp8_variance_fn_t       svf_halfpix_v;
-    vp8_variance_fn_t       svf_halfpix_hv;
-    vp8_sad_multi_fn_t      sdx3f;
-    vp8_sad_multi1_fn_t     sdx8f;
-    vp8_sad_multi_d_fn_t    sdx4df;
+    vpx_variance_fn_t       svf_halfpix_h;
+    vpx_variance_fn_t       svf_halfpix_v;
+    vpx_variance_fn_t       svf_halfpix_hv;
+    vpx_sad_multi_fn_t      sdx3f;
+    vpx_sad_multi_fn_t      sdx8f;
+    vpx_sad_multi_d_fn_t    sdx4df;
 #if ARCH_X86 || ARCH_X86_64
     vp8_copy32xn_fn_t       copymem;
 #endif
 } vp8_variance_fn_ptr_t;
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
--- a/media/libvpx/vp8/common/variance_c.c
+++ b/media/libvpx/vp8/common/variance_c.c
@@ -3,168 +3,62 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-
+#include "./vp8_rtcd.h"
+#include "filter.h"
 #include "variance.h"
-#include "filter.h"
-
 
-unsigned int vp8_get_mb_ss_c
-(
-    const short *src_ptr
-)
-{
-    unsigned int i = 0, sum = 0;
-
-    do
-    {
-        sum += (src_ptr[i] * src_ptr[i]);
-        i++;
-    }
-    while (i < 256);
-
-    return sum;
+/* This is a bad idea.
+ * ctz = count trailing zeros */
+static int ctz(int a) {
+  int b = 0;
+  while (a != 1) {
+    a >>= 1;
+    b++;
+  }
+  return b;
 }
 
-
-static void variance(
+static unsigned int variance(
     const unsigned char *src_ptr,
     int  source_stride,
     const unsigned char *ref_ptr,
     int  recon_stride,
     int  w,
     int  h,
-    unsigned int *sse,
-    int *sum)
+    unsigned int *sse)
 {
     int i, j;
-    int diff;
+    int diff, sum;
 
-    *sum = 0;
+    sum = 0;
     *sse = 0;
 
     for (i = 0; i < h; i++)
     {
         for (j = 0; j < w; j++)
         {
             diff = src_ptr[j] - ref_ptr[j];
-            *sum += diff;
+            sum += diff;
             *sse += diff * diff;
         }
 
         src_ptr += source_stride;
         ref_ptr += recon_stride;
     }
-}
 
-
-unsigned int vp8_variance16x16_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 8));
-}
-
-unsigned int vp8_variance8x16_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 16, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
-}
-
-unsigned int vp8_variance16x8_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 8, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
+    return (*sse - (((unsigned int)sum * sum) >> (int)((ctz(w) + ctz(h)))));
 }
 
-
-unsigned int vp8_variance8x8_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 8, 8, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 6));
-}
-
-unsigned int vp8_variance4x4_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 4, 4, &var, &avg);
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 4));
-}
-
-
-unsigned int vp8_mse16x16_c(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-    variance(src_ptr, source_stride, ref_ptr, recon_stride, 16, 16, &var, &avg);
-    *sse = var;
-    return var;
-}
-
-
 /****************************************************************************
  *
  *  ROUTINE       : filter_block2d_bil_first_pass
  *
  *  INPUTS        : UINT8  *src_ptr          : Pointer to source block.
  *                  UINT32 src_pixels_per_line : Stride of input block.
  *                  UINT32 pixel_step        : Offset between filter input samples (see notes).
  *                  UINT32 output_height     : Input block height.
@@ -298,17 +192,17 @@ unsigned int vp8_sub_pixel_variance4x4_c
     VFilter = vp8_bilinear_filters[yoffset];
 
     /* First filter 1d Horizontal */
     var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 5, 4, HFilter);
 
     /* Now filter Verticaly */
     var_filter_block2d_bil_second_pass(FData3, temp2, 4,  4,  4,  4, VFilter);
 
-    return vp8_variance4x4_c(temp2, 4, dst_ptr, dst_pixels_per_line, sse);
+    return variance(temp2, 4, dst_ptr, dst_pixels_per_line, 4, 4, sse);
 }
 
 
 unsigned int vp8_sub_pixel_variance8x8_c
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
@@ -323,17 +217,17 @@ unsigned int vp8_sub_pixel_variance8x8_c
     const short *HFilter, *VFilter;
 
     HFilter = vp8_bilinear_filters[xoffset];
     VFilter = vp8_bilinear_filters[yoffset];
 
     var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 8, HFilter);
     var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 8, 8, VFilter);
 
-    return vp8_variance8x8_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+    return variance(temp2, 8, dst_ptr, dst_pixels_per_line, 8, 8, sse);
 }
 
 unsigned int vp8_sub_pixel_variance16x16_c
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
@@ -347,17 +241,17 @@ unsigned int vp8_sub_pixel_variance16x16
     const short *HFilter, *VFilter;
 
     HFilter = vp8_bilinear_filters[xoffset];
     VFilter = vp8_bilinear_filters[yoffset];
 
     var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 16, HFilter);
     var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 16, 16, VFilter);
 
-    return vp8_variance16x16_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+    return variance(temp2, 16, dst_ptr, dst_pixels_per_line, 16, 16, sse);
 }
 
 
 unsigned int vp8_variance_halfpixvar16x16_h_c(
     const unsigned char *src_ptr,
     int  source_stride,
     const unsigned char *ref_ptr,
     int  recon_stride,
@@ -387,31 +281,16 @@ unsigned int vp8_variance_halfpixvar16x1
     int  recon_stride,
     unsigned int *sse)
 {
     return vp8_sub_pixel_variance16x16_c(src_ptr, source_stride, 4, 4,
                                          ref_ptr, recon_stride, sse);
 }
 
 
-unsigned int vp8_sub_pixel_mse16x16_c
-(
-    const unsigned char  *src_ptr,
-    int  src_pixels_per_line,
-    int  xoffset,
-    int  yoffset,
-    const unsigned char *dst_ptr,
-    int dst_pixels_per_line,
-    unsigned int *sse
-)
-{
-    vp8_sub_pixel_variance16x16_c(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
-    return *sse;
-}
-
 unsigned int vp8_sub_pixel_variance16x8_c
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     const unsigned char *dst_ptr,
     int dst_pixels_per_line,
@@ -423,17 +302,17 @@ unsigned int vp8_sub_pixel_variance16x8_
     const short *HFilter, *VFilter;
 
     HFilter = vp8_bilinear_filters[xoffset];
     VFilter = vp8_bilinear_filters[yoffset];
 
     var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 9, 16, HFilter);
     var_filter_block2d_bil_second_pass(FData3, temp2, 16, 16, 8, 16, VFilter);
 
-    return vp8_variance16x8_c(temp2, 16, dst_ptr, dst_pixels_per_line, sse);
+    return variance(temp2, 16, dst_ptr, dst_pixels_per_line, 16, 8, sse);
 }
 
 unsigned int vp8_sub_pixel_variance8x16_c
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
@@ -449,10 +328,10 @@ unsigned int vp8_sub_pixel_variance8x16_
 
     HFilter = vp8_bilinear_filters[xoffset];
     VFilter = vp8_bilinear_filters[yoffset];
 
 
     var_filter_block2d_bil_first_pass(src_ptr, FData3, src_pixels_per_line, 1, 17, 8, HFilter);
     var_filter_block2d_bil_second_pass(FData3, temp2, 8, 8, 16, 8, VFilter);
 
-    return vp8_variance8x16_c(temp2, 8, dst_ptr, dst_pixels_per_line, sse);
+    return variance(temp2, 8, dst_ptr, dst_pixels_per_line, 8, 16, sse);
 }
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/x86/copy_sse2.asm
@@ -0,0 +1,93 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+%include "vpx_ports/x86_abi_support.asm"
+
+
+;void vp8_copy32xn_sse2(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *dst_ptr,
+;    int  dst_stride,
+;    int height);
+global sym(vp8_copy32xn_sse2) PRIVATE
+sym(vp8_copy32xn_sse2):
+    push        rbp
+    mov         rbp, rsp
+    SHADOW_ARGS_TO_STACK 5
+    SAVE_XMM 7
+    push        rsi
+    push        rdi
+    ; end prolog
+
+        mov             rsi,        arg(0) ;src_ptr
+        mov             rdi,        arg(2) ;dst_ptr
+
+        movsxd          rax,        dword ptr arg(1) ;src_stride
+        movsxd          rdx,        dword ptr arg(3) ;dst_stride
+        movsxd          rcx,        dword ptr arg(4) ;height
+
+.block_copy_sse2_loopx4:
+        movdqu          xmm0,       XMMWORD PTR [rsi]
+        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
+        movdqu          xmm2,       XMMWORD PTR [rsi + rax]
+        movdqu          xmm3,       XMMWORD PTR [rsi + rax + 16]
+
+        lea             rsi,        [rsi+rax*2]
+
+        movdqu          xmm4,       XMMWORD PTR [rsi]
+        movdqu          xmm5,       XMMWORD PTR [rsi + 16]
+        movdqu          xmm6,       XMMWORD PTR [rsi + rax]
+        movdqu          xmm7,       XMMWORD PTR [rsi + rax + 16]
+
+        lea             rsi,    [rsi+rax*2]
+
+        movdqa          XMMWORD PTR [rdi], xmm0
+        movdqa          XMMWORD PTR [rdi + 16], xmm1
+        movdqa          XMMWORD PTR [rdi + rdx], xmm2
+        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm3
+
+        lea             rdi,    [rdi+rdx*2]
+
+        movdqa          XMMWORD PTR [rdi], xmm4
+        movdqa          XMMWORD PTR [rdi + 16], xmm5
+        movdqa          XMMWORD PTR [rdi + rdx], xmm6
+        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm7
+
+        lea             rdi,    [rdi+rdx*2]
+
+        sub             rcx,     4
+        cmp             rcx,     4
+        jge             .block_copy_sse2_loopx4
+
+        cmp             rcx, 0
+        je              .copy_is_done
+
+.block_copy_sse2_loop:
+        movdqu          xmm0,       XMMWORD PTR [rsi]
+        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
+        lea             rsi,    [rsi+rax]
+
+        movdqa          XMMWORD PTR [rdi], xmm0
+        movdqa          XMMWORD PTR [rdi + 16], xmm1
+        lea             rdi,    [rdi+rdx]
+
+        sub             rcx,     1
+        jne             .block_copy_sse2_loop
+
+.copy_is_done:
+    ; begin epilog
+    pop rdi
+    pop rsi
+    RESTORE_XMM
+    UNSHADOW_ARGS
+    pop         rbp
+    ret
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/x86/copy_sse3.asm
@@ -0,0 +1,146 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+%include "vpx_ports/x86_abi_support.asm"
+
+%macro STACK_FRAME_CREATE_X3 0
+%if ABI_IS_32BIT
+  %define     src_ptr       rsi
+  %define     src_stride    rax
+  %define     ref_ptr       rdi
+  %define     ref_stride    rdx
+  %define     end_ptr       rcx
+  %define     ret_var       rbx
+  %define     result_ptr    arg(4)
+  %define     max_sad       arg(4)
+  %define     height        dword ptr arg(4)
+    push        rbp
+    mov         rbp,        rsp
+    push        rsi
+    push        rdi
+    push        rbx
+
+    mov         rsi,        arg(0)              ; src_ptr
+    mov         rdi,        arg(2)              ; ref_ptr
+
+    movsxd      rax,        dword ptr arg(1)    ; src_stride
+    movsxd      rdx,        dword ptr arg(3)    ; ref_stride
+%else
+  %if LIBVPX_YASM_WIN64
+    SAVE_XMM 7, u
+    %define     src_ptr     rcx
+    %define     src_stride  rdx
+    %define     ref_ptr     r8
+    %define     ref_stride  r9
+    %define     end_ptr     r10
+    %define     ret_var     r11
+    %define     result_ptr  [rsp+xmm_stack_space+8+4*8]
+    %define     max_sad     [rsp+xmm_stack_space+8+4*8]
+    %define     height      dword ptr [rsp+xmm_stack_space+8+4*8]
+  %else
+    %define     src_ptr     rdi
+    %define     src_stride  rsi
+    %define     ref_ptr     rdx
+    %define     ref_stride  rcx
+    %define     end_ptr     r9
+    %define     ret_var     r10
+    %define     result_ptr  r8
+    %define     max_sad     r8
+    %define     height      r8
+  %endif
+%endif
+
+%endmacro
+
+%macro STACK_FRAME_DESTROY_X3 0
+  %define     src_ptr
+  %define     src_stride
+  %define     ref_ptr
+  %define     ref_stride
+  %define     end_ptr
+  %define     ret_var
+  %define     result_ptr
+  %define     max_sad
+  %define     height
+
+%if ABI_IS_32BIT
+    pop         rbx
+    pop         rdi
+    pop         rsi
+    pop         rbp
+%else
+  %if LIBVPX_YASM_WIN64
+    RESTORE_XMM
+  %endif
+%endif
+    ret
+%endmacro
+
+
+;void vp8_copy32xn_sse3(
+;    unsigned char *src_ptr,
+;    int  src_stride,
+;    unsigned char *dst_ptr,
+;    int  dst_stride,
+;    int height);
+global sym(vp8_copy32xn_sse3) PRIVATE
+sym(vp8_copy32xn_sse3):
+
+    STACK_FRAME_CREATE_X3
+
+.block_copy_sse3_loopx4:
+        lea             end_ptr,    [src_ptr+src_stride*2]
+
+        movdqu          xmm0,       XMMWORD PTR [src_ptr]
+        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
+        movdqu          xmm2,       XMMWORD PTR [src_ptr + src_stride]
+        movdqu          xmm3,       XMMWORD PTR [src_ptr + src_stride + 16]
+        movdqu          xmm4,       XMMWORD PTR [end_ptr]
+        movdqu          xmm5,       XMMWORD PTR [end_ptr + 16]
+        movdqu          xmm6,       XMMWORD PTR [end_ptr + src_stride]
+        movdqu          xmm7,       XMMWORD PTR [end_ptr + src_stride + 16]
+
+        lea             src_ptr,    [src_ptr+src_stride*4]
+
+        lea             end_ptr,    [ref_ptr+ref_stride*2]
+
+        movdqa          XMMWORD PTR [ref_ptr], xmm0
+        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
+        movdqa          XMMWORD PTR [ref_ptr + ref_stride], xmm2
+        movdqa          XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
+        movdqa          XMMWORD PTR [end_ptr], xmm4
+        movdqa          XMMWORD PTR [end_ptr + 16], xmm5
+        movdqa          XMMWORD PTR [end_ptr + ref_stride], xmm6
+        movdqa          XMMWORD PTR [end_ptr + ref_stride + 16], xmm7
+
+        lea             ref_ptr,    [ref_ptr+ref_stride*4]
+
+        sub             height,     4
+        cmp             height,     4
+        jge             .block_copy_sse3_loopx4
+
+        ;Check to see if there is more rows need to be copied.
+        cmp             height, 0
+        je              .copy_is_done
+
+.block_copy_sse3_loop:
+        movdqu          xmm0,       XMMWORD PTR [src_ptr]
+        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
+        lea             src_ptr,    [src_ptr+src_stride]
+
+        movdqa          XMMWORD PTR [ref_ptr], xmm0
+        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
+        lea             ref_ptr,    [ref_ptr+ref_stride]
+
+        sub             height,     1
+        jne             .block_copy_sse3_loop
+
+.copy_is_done:
+    STACK_FRAME_DESTROY_X3
--- a/media/libvpx/vp8/common/x86/idct_blk_mmx.c
+++ b/media/libvpx/vp8/common/x86/idct_blk_mmx.c
@@ -31,44 +31,44 @@ void vp8_dequant_idct_add_y_block_mmx
 
     for (i = 0; i < 4; i++)
     {
         if (eobs[0] > 1)
             vp8_dequant_idct_add_mmx (q, dq, dst, stride);
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dst, stride, dst, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
             vp8_dequant_idct_add_mmx (q+16, dq, dst+4, stride);
         else if (eobs[1] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dst+4, stride,
                                       dst+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[2] > 1)
             vp8_dequant_idct_add_mmx (q+32, dq, dst+8, stride);
         else if (eobs[2] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[32]*dq[0], dst+8, stride,
                                       dst+8, stride);
-            vpx_memset(q + 32, 0, 2 * sizeof(q[0]));
+            memset(q + 32, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[3] > 1)
             vp8_dequant_idct_add_mmx (q+48, dq, dst+12, stride);
         else if (eobs[3] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[48]*dq[0], dst+12, stride,
                                       dst+12, stride);
-            vpx_memset(q + 48, 0, 2 * sizeof(q[0]));
+            memset(q + 48, 0, 2 * sizeof(q[0]));
         }
 
         q    += 64;
         dst  += 4*stride;
         eobs += 4;
     }
 }
 
@@ -80,49 +80,49 @@ void vp8_dequant_idct_add_uv_block_mmx
 
     for (i = 0; i < 2; i++)
     {
         if (eobs[0] > 1)
             vp8_dequant_idct_add_mmx (q, dq, dstu, stride);
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstu, stride, dstu, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
             vp8_dequant_idct_add_mmx (q+16, dq, dstu+4, stride);
         else if (eobs[1] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstu+4, stride,
                                       dstu+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         q    += 32;
         dstu += 4*stride;
         eobs += 2;
     }
 
     for (i = 0; i < 2; i++)
     {
         if (eobs[0] > 1)
             vp8_dequant_idct_add_mmx (q, dq, dstv, stride);
         else if (eobs[0] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[0]*dq[0], dstv, stride, dstv, stride);
-            vpx_memset(q, 0, 2 * sizeof(q[0]));
+            memset(q, 0, 2 * sizeof(q[0]));
         }
 
         if (eobs[1] > 1)
             vp8_dequant_idct_add_mmx (q+16, dq, dstv+4, stride);
         else if (eobs[1] == 1)
         {
             vp8_dc_only_idct_add_mmx (q[16]*dq[0], dstv+4, stride,
                                       dstv+4, stride);
-            vpx_memset(q + 16, 0, 2 * sizeof(q[0]));
+            memset(q + 16, 0, 2 * sizeof(q[0]));
         }
 
         q    += 32;
         dstv += 4*stride;
         eobs += 2;
     }
 }
deleted file mode 100644
--- a/media/libvpx/vp8/common/x86/sad_sse2.asm
+++ /dev/null
@@ -1,410 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-
-;unsigned int vp8_sad16x16_wmt(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride)
-global sym(vp8_sad16x16_wmt) PRIVATE
-sym(vp8_sad16x16_wmt):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 4
-    SAVE_XMM 6
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        lea             rcx,        [rsi+rax*8]
-
-        lea             rcx,        [rcx+rax*8]
-        pxor            xmm6,       xmm6
-
-.x16x16sad_wmt_loop:
-
-        movq            xmm0,       QWORD PTR [rsi]
-        movq            xmm2,       QWORD PTR [rsi+8]
-
-        movq            xmm1,       QWORD PTR [rdi]
-        movq            xmm3,       QWORD PTR [rdi+8]
-
-        movq            xmm4,       QWORD PTR [rsi+rax]
-        movq            xmm5,       QWORD PTR [rdi+rdx]
-
-
-        punpcklbw       xmm0,       xmm2
-        punpcklbw       xmm1,       xmm3
-
-        psadbw          xmm0,       xmm1
-        movq            xmm2,       QWORD PTR [rsi+rax+8]
-
-        movq            xmm3,       QWORD PTR [rdi+rdx+8]
-        lea             rsi,        [rsi+rax*2]
-
-        lea             rdi,        [rdi+rdx*2]
-        punpcklbw       xmm4,       xmm2
-
-        punpcklbw       xmm5,       xmm3
-        psadbw          xmm4,       xmm5
-
-        paddw           xmm6,       xmm0
-        paddw           xmm6,       xmm4
-
-        cmp             rsi,        rcx
-        jne             .x16x16sad_wmt_loop
-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movq            rax,        xmm0
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-;unsigned int vp8_sad8x16_wmt(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  max_sad)
-global sym(vp8_sad8x16_wmt) PRIVATE
-sym(vp8_sad8x16_wmt):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rbx
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        movsxd          rbx,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        lea             rcx,        [rsi+rbx*8]
-
-        lea             rcx,        [rcx+rbx*8]
-        pxor            mm7,        mm7
-
-.x8x16sad_wmt_loop:
-
-        movq            rax,        mm7
-        cmp             eax,        arg(4)
-        ja              .x8x16sad_wmt_early_exit
-
-        movq            mm0,        QWORD PTR [rsi]
-        movq            mm1,        QWORD PTR [rdi]
-
-        movq            mm2,        QWORD PTR [rsi+rbx]
-        movq            mm3,        QWORD PTR [rdi+rdx]
-
-        psadbw          mm0,        mm1
-        psadbw          mm2,        mm3
-
-        lea             rsi,        [rsi+rbx*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        paddw           mm7,        mm0
-        paddw           mm7,        mm2
-
-        cmp             rsi,        rcx
-        jne             .x8x16sad_wmt_loop
-
-        movq            rax,        mm7
-
-.x8x16sad_wmt_early_exit:
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    pop         rbx
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;unsigned int vp8_sad8x8_wmt(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride)
-global sym(vp8_sad8x8_wmt) PRIVATE
-sym(vp8_sad8x8_wmt):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rbx
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        movsxd          rbx,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        lea             rcx,        [rsi+rbx*8]
-        pxor            mm7,        mm7
-
-.x8x8sad_wmt_loop:
-
-        movq            rax,        mm7
-        cmp             eax,        arg(4)
-        ja              .x8x8sad_wmt_early_exit
-
-        movq            mm0,        QWORD PTR [rsi]
-        movq            mm1,        QWORD PTR [rdi]
-
-        psadbw          mm0,        mm1
-        lea             rsi,        [rsi+rbx]
-
-        add             rdi,        rdx
-        paddw           mm7,        mm0
-
-        cmp             rsi,        rcx
-        jne             .x8x8sad_wmt_loop
-
-        movq            rax,        mm7
-.x8x8sad_wmt_early_exit:
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    pop         rbx
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-;unsigned int vp8_sad4x4_wmt(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride)
-global sym(vp8_sad4x4_wmt) PRIVATE
-sym(vp8_sad4x4_wmt):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 4
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        movd            mm0,        DWORD PTR [rsi]
-        movd            mm1,        DWORD PTR [rdi]
-
-        movd            mm2,        DWORD PTR [rsi+rax]
-        movd            mm3,        DWORD PTR [rdi+rdx]
-
-        punpcklbw       mm0,        mm2
-        punpcklbw       mm1,        mm3
-
-        psadbw          mm0,        mm1
-        lea             rsi,        [rsi+rax*2]
-
-        lea             rdi,        [rdi+rdx*2]
-        movd            mm4,        DWORD PTR [rsi]
-
-        movd            mm5,        DWORD PTR [rdi]
-        movd            mm6,        DWORD PTR [rsi+rax]
-
-        movd            mm7,        DWORD PTR [rdi+rdx]
-        punpcklbw       mm4,        mm6
-
-        punpcklbw       mm5,        mm7
-        psadbw          mm4,        mm5
-
-        paddw           mm0,        mm4
-        movq            rax,        mm0
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;unsigned int vp8_sad16x8_wmt(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride)
-global sym(vp8_sad16x8_wmt) PRIVATE
-sym(vp8_sad16x8_wmt):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rbx
-    push        rsi
-    push        rdi
-    ; end prolog
-
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        movsxd          rbx,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        lea             rcx,        [rsi+rbx*8]
-        pxor            mm7,        mm7
-
-.x16x8sad_wmt_loop:
-
-        movq            rax,        mm7
-        cmp             eax,        arg(4)
-        ja              .x16x8sad_wmt_early_exit
-
-        movq            mm0,        QWORD PTR [rsi]
-        movq            mm2,        QWORD PTR [rsi+8]
-
-        movq            mm1,        QWORD PTR [rdi]
-        movq            mm3,        QWORD PTR [rdi+8]
-
-        movq            mm4,        QWORD PTR [rsi+rbx]
-        movq            mm5,        QWORD PTR [rdi+rdx]
-
-        psadbw          mm0,        mm1
-        psadbw          mm2,        mm3
-
-        movq            mm1,        QWORD PTR [rsi+rbx+8]
-        movq            mm3,        QWORD PTR [rdi+rdx+8]
-
-        psadbw          mm4,        mm5
-        psadbw          mm1,        mm3
-
-        lea             rsi,        [rsi+rbx*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        paddw           mm0,        mm2
-        paddw           mm4,        mm1
-
-        paddw           mm7,        mm0
-        paddw           mm7,        mm4
-
-        cmp             rsi,        rcx
-        jne             .x16x8sad_wmt_loop
-
-        movq            rax,        mm7
-
-.x16x8sad_wmt_early_exit:
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    pop         rbx
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-;void vp8_copy32xn_sse2(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *dst_ptr,
-;    int  dst_stride,
-;    int height);
-global sym(vp8_copy32xn_sse2) PRIVATE
-sym(vp8_copy32xn_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;dst_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;dst_stride
-        movsxd          rcx,        dword ptr arg(4) ;height
-
-.block_copy_sse2_loopx4:
-        movdqu          xmm0,       XMMWORD PTR [rsi]
-        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
-        movdqu          xmm2,       XMMWORD PTR [rsi + rax]
-        movdqu          xmm3,       XMMWORD PTR [rsi + rax + 16]
-
-        lea             rsi,        [rsi+rax*2]
-
-        movdqu          xmm4,       XMMWORD PTR [rsi]
-        movdqu          xmm5,       XMMWORD PTR [rsi + 16]
-        movdqu          xmm6,       XMMWORD PTR [rsi + rax]
-        movdqu          xmm7,       XMMWORD PTR [rsi + rax + 16]
-
-        lea             rsi,    [rsi+rax*2]
-
-        movdqa          XMMWORD PTR [rdi], xmm0
-        movdqa          XMMWORD PTR [rdi + 16], xmm1
-        movdqa          XMMWORD PTR [rdi + rdx], xmm2
-        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm3
-
-        lea             rdi,    [rdi+rdx*2]
-
-        movdqa          XMMWORD PTR [rdi], xmm4
-        movdqa          XMMWORD PTR [rdi + 16], xmm5
-        movdqa          XMMWORD PTR [rdi + rdx], xmm6
-        movdqa          XMMWORD PTR [rdi + rdx + 16], xmm7
-
-        lea             rdi,    [rdi+rdx*2]
-
-        sub             rcx,     4
-        cmp             rcx,     4
-        jge             .block_copy_sse2_loopx4
-
-        cmp             rcx, 0
-        je              .copy_is_done
-
-.block_copy_sse2_loop:
-        movdqu          xmm0,       XMMWORD PTR [rsi]
-        movdqu          xmm1,       XMMWORD PTR [rsi + 16]
-        lea             rsi,    [rsi+rax]
-
-        movdqa          XMMWORD PTR [rdi], xmm0
-        movdqa          XMMWORD PTR [rdi + 16], xmm1
-        lea             rdi,    [rdi+rdx]
-
-        sub             rcx,     1
-        jne             .block_copy_sse2_loop
-
-.copy_is_done:
-    ; begin epilog
-    pop rdi
-    pop rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
deleted file mode 100644
--- a/media/libvpx/vp8/common/x86/sad_sse3.asm
+++ /dev/null
@@ -1,960 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-%include "vpx_ports/x86_abi_support.asm"
-
-%macro STACK_FRAME_CREATE_X3 0
-%if ABI_IS_32BIT
-  %define     src_ptr       rsi
-  %define     src_stride    rax
-  %define     ref_ptr       rdi
-  %define     ref_stride    rdx
-  %define     end_ptr       rcx
-  %define     ret_var       rbx
-  %define     result_ptr    arg(4)
-  %define     max_sad       arg(4)
-  %define     height        dword ptr arg(4)
-    push        rbp
-    mov         rbp,        rsp
-    push        rsi
-    push        rdi
-    push        rbx
-
-    mov         rsi,        arg(0)              ; src_ptr
-    mov         rdi,        arg(2)              ; ref_ptr
-
-    movsxd      rax,        dword ptr arg(1)    ; src_stride
-    movsxd      rdx,        dword ptr arg(3)    ; ref_stride
-%else
-  %if LIBVPX_YASM_WIN64
-    SAVE_XMM 7, u
-    %define     src_ptr     rcx
-    %define     src_stride  rdx
-    %define     ref_ptr     r8
-    %define     ref_stride  r9
-    %define     end_ptr     r10
-    %define     ret_var     r11
-    %define     result_ptr  [rsp+xmm_stack_space+8+4*8]
-    %define     max_sad     [rsp+xmm_stack_space+8+4*8]
-    %define     height      dword ptr [rsp+xmm_stack_space+8+4*8]
-  %else
-    %define     src_ptr     rdi
-    %define     src_stride  rsi
-    %define     ref_ptr     rdx
-    %define     ref_stride  rcx
-    %define     end_ptr     r9
-    %define     ret_var     r10
-    %define     result_ptr  r8
-    %define     max_sad     r8
-    %define     height      r8
-  %endif
-%endif
-
-%endmacro
-
-%macro STACK_FRAME_DESTROY_X3 0
-  %define     src_ptr
-  %define     src_stride
-  %define     ref_ptr
-  %define     ref_stride
-  %define     end_ptr
-  %define     ret_var
-  %define     result_ptr
-  %define     max_sad
-  %define     height
-
-%if ABI_IS_32BIT
-    pop         rbx
-    pop         rdi
-    pop         rsi
-    pop         rbp
-%else
-  %if LIBVPX_YASM_WIN64
-    RESTORE_XMM
-  %endif
-%endif
-    ret
-%endmacro
-
-%macro STACK_FRAME_CREATE_X4 0
-%if ABI_IS_32BIT
-  %define     src_ptr       rsi
-  %define     src_stride    rax
-  %define     r0_ptr        rcx
-  %define     r1_ptr        rdx
-  %define     r2_ptr        rbx
-  %define     r3_ptr        rdi
-  %define     ref_stride    rbp
-  %define     result_ptr    arg(4)
-    push        rbp
-    mov         rbp,        rsp
-    push        rsi
-    push        rdi
-    push        rbx
-
-    push        rbp
-    mov         rdi,        arg(2)              ; ref_ptr_base
-
-    LOAD_X4_ADDRESSES rdi, rcx, rdx, rax, rdi
-
-    mov         rsi,        arg(0)              ; src_ptr
-
-    movsxd      rbx,        dword ptr arg(1)    ; src_stride
-    movsxd      rbp,        dword ptr arg(3)    ; ref_stride
-
-    xchg        rbx,        rax
-%else
-  %if LIBVPX_YASM_WIN64
-    SAVE_XMM 7, u
-    %define     src_ptr     rcx
-    %define     src_stride  rdx
-    %define     r0_ptr      rsi
-    %define     r1_ptr      r10
-    %define     r2_ptr      r11
-    %define     r3_ptr      r8
-    %define     ref_stride  r9
-    %define     result_ptr  [rsp+xmm_stack_space+16+4*8]
-    push        rsi
-
-    LOAD_X4_ADDRESSES r8, r0_ptr, r1_ptr, r2_ptr, r3_ptr
-  %else
-    %define     src_ptr     rdi
-    %define     src_stride  rsi
-    %define     r0_ptr      r9
-    %define     r1_ptr      r10
-    %define     r2_ptr      r11
-    %define     r3_ptr      rdx
-    %define     ref_stride  rcx
-    %define     result_ptr  r8
-
-    LOAD_X4_ADDRESSES rdx, r0_ptr, r1_ptr, r2_ptr, r3_ptr
-
-  %endif
-%endif
-%endmacro
-
-%macro STACK_FRAME_DESTROY_X4 0
-  %define     src_ptr
-  %define     src_stride
-  %define     r0_ptr
-  %define     r1_ptr
-  %define     r2_ptr
-  %define     r3_ptr
-  %define     ref_stride
-  %define     result_ptr
-
-%if ABI_IS_32BIT
-    pop         rbx
-    pop         rdi
-    pop         rsi
-    pop         rbp
-%else
-  %if LIBVPX_YASM_WIN64
-    pop         rsi
-    RESTORE_XMM
-  %endif
-%endif
-    ret
-%endmacro
-
-%macro PROCESS_16X2X3 5
-%if %1==0
-        movdqa          xmm0,       XMMWORD PTR [%2]
-        lddqu           xmm5,       XMMWORD PTR [%3]
-        lddqu           xmm6,       XMMWORD PTR [%3+1]
-        lddqu           xmm7,       XMMWORD PTR [%3+2]
-
-        psadbw          xmm5,       xmm0
-        psadbw          xmm6,       xmm0
-        psadbw          xmm7,       xmm0
-%else
-        movdqa          xmm0,       XMMWORD PTR [%2]
-        lddqu           xmm1,       XMMWORD PTR [%3]
-        lddqu           xmm2,       XMMWORD PTR [%3+1]
-        lddqu           xmm3,       XMMWORD PTR [%3+2]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endif
-        movdqa          xmm0,       XMMWORD PTR [%2+%4]
-        lddqu           xmm1,       XMMWORD PTR [%3+%5]
-        lddqu           xmm2,       XMMWORD PTR [%3+%5+1]
-        lddqu           xmm3,       XMMWORD PTR [%3+%5+2]
-
-%if %1==0 || %1==1
-        lea             %2,         [%2+%4*2]
-        lea             %3,         [%3+%5*2]
-%endif
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endmacro
-
-%macro PROCESS_8X2X3 5
-%if %1==0
-        movq            mm0,       QWORD PTR [%2]
-        movq            mm5,       QWORD PTR [%3]
-        movq            mm6,       QWORD PTR [%3+1]
-        movq            mm7,       QWORD PTR [%3+2]
-
-        psadbw          mm5,       mm0
-        psadbw          mm6,       mm0
-        psadbw          mm7,       mm0
-%else
-        movq            mm0,       QWORD PTR [%2]
-        movq            mm1,       QWORD PTR [%3]
-        movq            mm2,       QWORD PTR [%3+1]
-        movq            mm3,       QWORD PTR [%3+2]
-
-        psadbw          mm1,       mm0
-        psadbw          mm2,       mm0
-        psadbw          mm3,       mm0
-
-        paddw           mm5,       mm1
-        paddw           mm6,       mm2
-        paddw           mm7,       mm3
-%endif
-        movq            mm0,       QWORD PTR [%2+%4]
-        movq            mm1,       QWORD PTR [%3+%5]
-        movq            mm2,       QWORD PTR [%3+%5+1]
-        movq            mm3,       QWORD PTR [%3+%5+2]
-
-%if %1==0 || %1==1
-        lea             %2,        [%2+%4*2]
-        lea             %3,        [%3+%5*2]
-%endif
-
-        psadbw          mm1,       mm0
-        psadbw          mm2,       mm0
-        psadbw          mm3,       mm0
-
-        paddw           mm5,       mm1
-        paddw           mm6,       mm2
-        paddw           mm7,       mm3
-%endmacro
-
-%macro LOAD_X4_ADDRESSES 5
-        mov             %2,         [%1+REG_SZ_BYTES*0]
-        mov             %3,         [%1+REG_SZ_BYTES*1]
-
-        mov             %4,         [%1+REG_SZ_BYTES*2]
-        mov             %5,         [%1+REG_SZ_BYTES*3]
-%endmacro
-
-%macro PROCESS_16X2X4 8
-%if %1==0
-        movdqa          xmm0,       XMMWORD PTR [%2]
-        lddqu           xmm4,       XMMWORD PTR [%3]
-        lddqu           xmm5,       XMMWORD PTR [%4]
-        lddqu           xmm6,       XMMWORD PTR [%5]
-        lddqu           xmm7,       XMMWORD PTR [%6]
-
-        psadbw          xmm4,       xmm0
-        psadbw          xmm5,       xmm0
-        psadbw          xmm6,       xmm0
-        psadbw          xmm7,       xmm0
-%else
-        movdqa          xmm0,       XMMWORD PTR [%2]
-        lddqu           xmm1,       XMMWORD PTR [%3]
-        lddqu           xmm2,       XMMWORD PTR [%4]
-        lddqu           xmm3,       XMMWORD PTR [%5]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm4,       xmm1
-        lddqu           xmm1,       XMMWORD PTR [%6]
-        paddw           xmm5,       xmm2
-        paddw           xmm6,       xmm3
-
-        psadbw          xmm1,       xmm0
-        paddw           xmm7,       xmm1
-%endif
-        movdqa          xmm0,       XMMWORD PTR [%2+%7]
-        lddqu           xmm1,       XMMWORD PTR [%3+%8]
-        lddqu           xmm2,       XMMWORD PTR [%4+%8]
-        lddqu           xmm3,       XMMWORD PTR [%5+%8]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm4,       xmm1
-        lddqu           xmm1,       XMMWORD PTR [%6+%8]
-        paddw           xmm5,       xmm2
-        paddw           xmm6,       xmm3
-
-%if %1==0 || %1==1
-        lea             %2,         [%2+%7*2]
-        lea             %3,         [%3+%8*2]
-
-        lea             %4,         [%4+%8*2]
-        lea             %5,         [%5+%8*2]
-
-        lea             %6,         [%6+%8*2]
-%endif
-        psadbw          xmm1,       xmm0
-        paddw           xmm7,       xmm1
-
-%endmacro
-
-%macro PROCESS_8X2X4 8
-%if %1==0
-        movq            mm0,        QWORD PTR [%2]
-        movq            mm4,        QWORD PTR [%3]
-        movq            mm5,        QWORD PTR [%4]
-        movq            mm6,        QWORD PTR [%5]
-        movq            mm7,        QWORD PTR [%6]
-
-        psadbw          mm4,        mm0
-        psadbw          mm5,        mm0
-        psadbw          mm6,        mm0
-        psadbw          mm7,        mm0
-%else
-        movq            mm0,        QWORD PTR [%2]
-        movq            mm1,        QWORD PTR [%3]
-        movq            mm2,        QWORD PTR [%4]
-        movq            mm3,        QWORD PTR [%5]
-
-        psadbw          mm1,        mm0
-        psadbw          mm2,        mm0
-        psadbw          mm3,        mm0
-
-        paddw           mm4,        mm1
-        movq            mm1,        QWORD PTR [%6]
-        paddw           mm5,        mm2
-        paddw           mm6,        mm3
-
-        psadbw          mm1,        mm0
-        paddw           mm7,        mm1
-%endif
-        movq            mm0,        QWORD PTR [%2+%7]
-        movq            mm1,        QWORD PTR [%3+%8]
-        movq            mm2,        QWORD PTR [%4+%8]
-        movq            mm3,        QWORD PTR [%5+%8]
-
-        psadbw          mm1,        mm0
-        psadbw          mm2,        mm0
-        psadbw          mm3,        mm0
-
-        paddw           mm4,        mm1
-        movq            mm1,        QWORD PTR [%6+%8]
-        paddw           mm5,        mm2
-        paddw           mm6,        mm3
-
-%if %1==0 || %1==1
-        lea             %2,         [%2+%7*2]
-        lea             %3,         [%3+%8*2]
-
-        lea             %4,         [%4+%8*2]
-        lea             %5,         [%5+%8*2]
-
-        lea             %6,         [%6+%8*2]
-%endif
-        psadbw          mm1,        mm0
-        paddw           mm7,        mm1
-
-%endmacro
-
-;void int vp8_sad16x16x3_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x16x3_sse3) PRIVATE
-sym(vp8_sad16x16x3_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
-
-        mov             rcx,        result_ptr
-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rcx],      xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rcx+4],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rcx+8],    xmm0
-
-    STACK_FRAME_DESTROY_X3
-
-;void int vp8_sad16x8x3_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x8x3_sse3) PRIVATE
-sym(vp8_sad16x8x3_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        PROCESS_16X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_16X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
-
-        mov             rcx,        result_ptr
-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rcx],      xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rcx+4],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rcx+8],    xmm0
-
-    STACK_FRAME_DESTROY_X3
-
-;void int vp8_sad8x16x3_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad8x16x3_sse3) PRIVATE
-sym(vp8_sad8x16x3_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
-
-        mov             rcx,        result_ptr
-
-        punpckldq       mm5,        mm6
-
-        movq            [rcx],      mm5
-        movd            [rcx+8],    mm7
-
-    STACK_FRAME_DESTROY_X3
-
-;void int vp8_sad8x8x3_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad8x8x3_sse3) PRIVATE
-sym(vp8_sad8x8x3_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        PROCESS_8X2X3 0, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 1, src_ptr, ref_ptr, src_stride, ref_stride
-        PROCESS_8X2X3 2, src_ptr, ref_ptr, src_stride, ref_stride
-
-        mov             rcx,        result_ptr
-
-        punpckldq       mm5,        mm6
-
-        movq            [rcx],      mm5
-        movd            [rcx+8],    mm7
-
-    STACK_FRAME_DESTROY_X3
-
-;void int vp8_sad4x4x3_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad4x4x3_sse3) PRIVATE
-sym(vp8_sad4x4x3_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        movd            mm0,        DWORD PTR [src_ptr]
-        movd            mm1,        DWORD PTR [ref_ptr]
-
-        movd            mm2,        DWORD PTR [src_ptr+src_stride]
-        movd            mm3,        DWORD PTR [ref_ptr+ref_stride]
-
-        punpcklbw       mm0,        mm2
-        punpcklbw       mm1,        mm3
-
-        movd            mm4,        DWORD PTR [ref_ptr+1]
-        movd            mm5,        DWORD PTR [ref_ptr+2]
-
-        movd            mm2,        DWORD PTR [ref_ptr+ref_stride+1]
-        movd            mm3,        DWORD PTR [ref_ptr+ref_stride+2]
-
-        psadbw          mm1,        mm0
-
-        punpcklbw       mm4,        mm2
-        punpcklbw       mm5,        mm3
-
-        psadbw          mm4,        mm0
-        psadbw          mm5,        mm0
-
-        lea             src_ptr,    [src_ptr+src_stride*2]
-        lea             ref_ptr,    [ref_ptr+ref_stride*2]
-
-        movd            mm0,        DWORD PTR [src_ptr]
-        movd            mm2,        DWORD PTR [ref_ptr]
-
-        movd            mm3,        DWORD PTR [src_ptr+src_stride]
-        movd            mm6,        DWORD PTR [ref_ptr+ref_stride]
-
-        punpcklbw       mm0,        mm3
-        punpcklbw       mm2,        mm6
-
-        movd            mm3,        DWORD PTR [ref_ptr+1]
-        movd            mm7,        DWORD PTR [ref_ptr+2]
-
-        psadbw          mm2,        mm0
-
-        paddw           mm1,        mm2
-
-        movd            mm2,        DWORD PTR [ref_ptr+ref_stride+1]
-        movd            mm6,        DWORD PTR [ref_ptr+ref_stride+2]
-
-        punpcklbw       mm3,        mm2
-        punpcklbw       mm7,        mm6
-
-        psadbw          mm3,        mm0
-        psadbw          mm7,        mm0
-
-        paddw           mm3,        mm4
-        paddw           mm7,        mm5
-
-        mov             rcx,        result_ptr
-
-        punpckldq       mm1,        mm3
-
-        movq            [rcx],      mm1
-        movd            [rcx+8],    mm7
-
-    STACK_FRAME_DESTROY_X3
-
-;unsigned int vp8_sad16x16_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  max_sad)
-;%define lddqu movdqu
-global sym(vp8_sad16x16_sse3) PRIVATE
-sym(vp8_sad16x16_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-        mov             end_ptr,    4
-        pxor            xmm7,        xmm7
-
-.vp8_sad16x16_sse3_loop:
-        movdqa          xmm0,       XMMWORD PTR [src_ptr]
-        movdqu          xmm1,       XMMWORD PTR [ref_ptr]
-        movdqa          xmm2,       XMMWORD PTR [src_ptr+src_stride]
-        movdqu          xmm3,       XMMWORD PTR [ref_ptr+ref_stride]
-
-        lea             src_ptr,    [src_ptr+src_stride*2]
-        lea             ref_ptr,    [ref_ptr+ref_stride*2]
-
-        movdqa          xmm4,       XMMWORD PTR [src_ptr]
-        movdqu          xmm5,       XMMWORD PTR [ref_ptr]
-        movdqa          xmm6,       XMMWORD PTR [src_ptr+src_stride]
-
-        psadbw          xmm0,       xmm1
-
-        movdqu          xmm1,       XMMWORD PTR [ref_ptr+ref_stride]
-
-        psadbw          xmm2,       xmm3
-        psadbw          xmm4,       xmm5
-        psadbw          xmm6,       xmm1
-
-        lea             src_ptr,    [src_ptr+src_stride*2]
-        lea             ref_ptr,    [ref_ptr+ref_stride*2]
-
-        paddw           xmm7,        xmm0
-        paddw           xmm7,        xmm2
-        paddw           xmm7,        xmm4
-        paddw           xmm7,        xmm6
-
-        sub             end_ptr,     1
-        jne             .vp8_sad16x16_sse3_loop
-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-        paddw           xmm0,       xmm7
-        movq            rax,        xmm0
-
-    STACK_FRAME_DESTROY_X3
-
-;void vp8_copy32xn_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *dst_ptr,
-;    int  dst_stride,
-;    int height);
-global sym(vp8_copy32xn_sse3) PRIVATE
-sym(vp8_copy32xn_sse3):
-
-    STACK_FRAME_CREATE_X3
-
-.block_copy_sse3_loopx4:
-        lea             end_ptr,    [src_ptr+src_stride*2]
-
-        movdqu          xmm0,       XMMWORD PTR [src_ptr]
-        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
-        movdqu          xmm2,       XMMWORD PTR [src_ptr + src_stride]
-        movdqu          xmm3,       XMMWORD PTR [src_ptr + src_stride + 16]
-        movdqu          xmm4,       XMMWORD PTR [end_ptr]
-        movdqu          xmm5,       XMMWORD PTR [end_ptr + 16]
-        movdqu          xmm6,       XMMWORD PTR [end_ptr + src_stride]
-        movdqu          xmm7,       XMMWORD PTR [end_ptr + src_stride + 16]
-
-        lea             src_ptr,    [src_ptr+src_stride*4]
-
-        lea             end_ptr,    [ref_ptr+ref_stride*2]
-
-        movdqa          XMMWORD PTR [ref_ptr], xmm0
-        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
-        movdqa          XMMWORD PTR [ref_ptr + ref_stride], xmm2
-        movdqa          XMMWORD PTR [ref_ptr + ref_stride + 16], xmm3
-        movdqa          XMMWORD PTR [end_ptr], xmm4
-        movdqa          XMMWORD PTR [end_ptr + 16], xmm5
-        movdqa          XMMWORD PTR [end_ptr + ref_stride], xmm6
-        movdqa          XMMWORD PTR [end_ptr + ref_stride + 16], xmm7
-
-        lea             ref_ptr,    [ref_ptr+ref_stride*4]
-
-        sub             height,     4
-        cmp             height,     4
-        jge             .block_copy_sse3_loopx4
-
-        ;Check to see if there is more rows need to be copied.
-        cmp             height, 0
-        je              .copy_is_done
-
-.block_copy_sse3_loop:
-        movdqu          xmm0,       XMMWORD PTR [src_ptr]
-        movdqu          xmm1,       XMMWORD PTR [src_ptr + 16]
-        lea             src_ptr,    [src_ptr+src_stride]
-
-        movdqa          XMMWORD PTR [ref_ptr], xmm0
-        movdqa          XMMWORD PTR [ref_ptr + 16], xmm1
-        lea             ref_ptr,    [ref_ptr+ref_stride]
-
-        sub             height,     1
-        jne             .block_copy_sse3_loop
-
-.copy_is_done:
-    STACK_FRAME_DESTROY_X3
-
-;void vp8_sad16x16x4d_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr_base,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x16x4d_sse3) PRIVATE
-sym(vp8_sad16x16x4d_sse3):
-
-    STACK_FRAME_CREATE_X4
-
-        PROCESS_16X2X4 0, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 2, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-
-%if ABI_IS_32BIT
-        pop             rbp
-%endif
-        mov             rcx,        result_ptr
-
-        movq            xmm0,       xmm4
-        psrldq          xmm4,       8
-
-        paddw           xmm0,       xmm4
-        movd            [rcx],      xmm0
-;-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rcx+4],    xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rcx+8],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rcx+12],   xmm0
-
-    STACK_FRAME_DESTROY_X4
-
-;void vp8_sad16x8x4d_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr_base,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x8x4d_sse3) PRIVATE
-sym(vp8_sad16x8x4d_sse3):
-
-    STACK_FRAME_CREATE_X4
-
-        PROCESS_16X2X4 0, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_16X2X4 2, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-
-%if ABI_IS_32BIT
-        pop             rbp
-%endif
-        mov             rcx,        result_ptr
-
-        movq            xmm0,       xmm4
-        psrldq          xmm4,       8
-
-        paddw           xmm0,       xmm4
-        movd            [rcx],      xmm0
-;-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rcx+4],    xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rcx+8],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rcx+12],   xmm0
-
-    STACK_FRAME_DESTROY_X4
-
-;void int vp8_sad8x16x4d_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad8x16x4d_sse3) PRIVATE
-sym(vp8_sad8x16x4d_sse3):
-
-    STACK_FRAME_CREATE_X4
-
-        PROCESS_8X2X4 0, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 2, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-
-%if ABI_IS_32BIT
-        pop             rbp
-%endif
-        mov             rcx,        result_ptr
-
-        punpckldq       mm4,        mm5
-        punpckldq       mm6,        mm7
-
-        movq            [rcx],      mm4
-        movq            [rcx+8],    mm6
-
-    STACK_FRAME_DESTROY_X4
-
-;void int vp8_sad8x8x4d_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad8x8x4d_sse3) PRIVATE
-sym(vp8_sad8x8x4d_sse3):
-
-    STACK_FRAME_CREATE_X4
-
-        PROCESS_8X2X4 0, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 1, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-        PROCESS_8X2X4 2, src_ptr, r0_ptr, r1_ptr, r2_ptr, r3_ptr, src_stride, ref_stride
-
-%if ABI_IS_32BIT
-        pop             rbp
-%endif
-        mov             rcx,        result_ptr
-
-        punpckldq       mm4,        mm5
-        punpckldq       mm6,        mm7
-
-        movq            [rcx],      mm4
-        movq            [rcx+8],    mm6
-
-    STACK_FRAME_DESTROY_X4
-
-;void int vp8_sad4x4x4d_sse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad4x4x4d_sse3) PRIVATE
-sym(vp8_sad4x4x4d_sse3):
-
-    STACK_FRAME_CREATE_X4
-
-        movd            mm0,        DWORD PTR [src_ptr]
-        movd            mm1,        DWORD PTR [r0_ptr]
-
-        movd            mm2,        DWORD PTR [src_ptr+src_stride]
-        movd            mm3,        DWORD PTR [r0_ptr+ref_stride]
-
-        punpcklbw       mm0,        mm2
-        punpcklbw       mm1,        mm3
-
-        movd            mm4,        DWORD PTR [r1_ptr]
-        movd            mm5,        DWORD PTR [r2_ptr]
-
-        movd            mm6,        DWORD PTR [r3_ptr]
-        movd            mm2,        DWORD PTR [r1_ptr+ref_stride]
-
-        movd            mm3,        DWORD PTR [r2_ptr+ref_stride]
-        movd            mm7,        DWORD PTR [r3_ptr+ref_stride]
-
-        psadbw          mm1,        mm0
-
-        punpcklbw       mm4,        mm2
-        punpcklbw       mm5,        mm3
-
-        punpcklbw       mm6,        mm7
-        psadbw          mm4,        mm0
-
-        psadbw          mm5,        mm0
-        psadbw          mm6,        mm0
-
-
-
-        lea             src_ptr,    [src_ptr+src_stride*2]
-        lea             r0_ptr,     [r0_ptr+ref_stride*2]
-
-        lea             r1_ptr,     [r1_ptr+ref_stride*2]
-        lea             r2_ptr,     [r2_ptr+ref_stride*2]
-
-        lea             r3_ptr,     [r3_ptr+ref_stride*2]
-
-        movd            mm0,        DWORD PTR [src_ptr]
-        movd            mm2,        DWORD PTR [r0_ptr]
-
-        movd            mm3,        DWORD PTR [src_ptr+src_stride]
-        movd            mm7,        DWORD PTR [r0_ptr+ref_stride]
-
-        punpcklbw       mm0,        mm3
-        punpcklbw       mm2,        mm7
-
-        movd            mm3,        DWORD PTR [r1_ptr]
-        movd            mm7,        DWORD PTR [r2_ptr]
-
-        psadbw          mm2,        mm0
-%if ABI_IS_32BIT
-        mov             rax,        rbp
-
-        pop             rbp
-%define     ref_stride    rax
-%endif
-        mov             rsi,        result_ptr
-
-        paddw           mm1,        mm2
-        movd            [rsi],      mm1
-
-        movd            mm2,        DWORD PTR [r1_ptr+ref_stride]
-        movd            mm1,        DWORD PTR [r2_ptr+ref_stride]
-
-        punpcklbw       mm3,        mm2
-        punpcklbw       mm7,        mm1
-
-        psadbw          mm3,        mm0
-        psadbw          mm7,        mm0
-
-        movd            mm2,        DWORD PTR [r3_ptr]
-        movd            mm1,        DWORD PTR [r3_ptr+ref_stride]
-
-        paddw           mm3,        mm4
-        paddw           mm7,        mm5
-
-        movd            [rsi+4],    mm3
-        punpcklbw       mm2,        mm1
-
-        movd            [rsi+8],    mm7
-        psadbw          mm2,        mm0
-
-        paddw           mm2,        mm6
-        movd            [rsi+12],   mm2
-
-
-    STACK_FRAME_DESTROY_X4
-
deleted file mode 100644
--- a/media/libvpx/vp8/common/x86/sad_sse4.asm
+++ /dev/null
@@ -1,353 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-
-%macro PROCESS_16X2X8 1
-%if %1
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        movq            xmm1,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        movq            xmm2,       MMWORD PTR [rdi+16]
-        punpcklqdq      xmm1,       xmm3
-        punpcklqdq      xmm3,       xmm2
-
-        movdqa          xmm2,       xmm1
-        mpsadbw         xmm1,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-
-        psrldq          xmm0,       8
-
-        movdqa          xmm4,       xmm3
-        mpsadbw         xmm3,       xmm0,  0x0
-        mpsadbw         xmm4,       xmm0,  0x5
-
-        paddw           xmm1,       xmm2
-        paddw           xmm1,       xmm3
-        paddw           xmm1,       xmm4
-%else
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        movq            xmm5,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        movq            xmm2,       MMWORD PTR [rdi+16]
-        punpcklqdq      xmm5,       xmm3
-        punpcklqdq      xmm3,       xmm2
-
-        movdqa          xmm2,       xmm5
-        mpsadbw         xmm5,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-
-        psrldq          xmm0,       8
-
-        movdqa          xmm4,       xmm3
-        mpsadbw         xmm3,       xmm0,  0x0
-        mpsadbw         xmm4,       xmm0,  0x5
-
-        paddw           xmm5,       xmm2
-        paddw           xmm5,       xmm3
-        paddw           xmm5,       xmm4
-
-        paddw           xmm1,       xmm5
-%endif
-        movdqa          xmm0,       XMMWORD PTR [rsi + rax]
-        movq            xmm5,       MMWORD PTR [rdi+ rdx]
-        movq            xmm3,       MMWORD PTR [rdi+ rdx+8]
-        movq            xmm2,       MMWORD PTR [rdi+ rdx+16]
-        punpcklqdq      xmm5,       xmm3
-        punpcklqdq      xmm3,       xmm2
-
-        lea             rsi,        [rsi+rax*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        movdqa          xmm2,       xmm5
-        mpsadbw         xmm5,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-
-        psrldq          xmm0,       8
-        movdqa          xmm4,       xmm3
-        mpsadbw         xmm3,       xmm0,  0x0
-        mpsadbw         xmm4,       xmm0,  0x5
-
-        paddw           xmm5,       xmm2
-        paddw           xmm5,       xmm3
-        paddw           xmm5,       xmm4
-
-        paddw           xmm1,       xmm5
-%endmacro
-
-%macro PROCESS_8X2X8 1
-%if %1
-        movq            xmm0,       MMWORD PTR [rsi]
-        movq            xmm1,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        punpcklqdq      xmm1,       xmm3
-
-        movdqa          xmm2,       xmm1
-        mpsadbw         xmm1,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-        paddw           xmm1,       xmm2
-%else
-        movq            xmm0,       MMWORD PTR [rsi]
-        movq            xmm5,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        punpcklqdq      xmm5,       xmm3
-
-        movdqa          xmm2,       xmm5
-        mpsadbw         xmm5,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-        paddw           xmm5,       xmm2
-
-        paddw           xmm1,       xmm5
-%endif
-        movq            xmm0,       MMWORD PTR [rsi + rax]
-        movq            xmm5,       MMWORD PTR [rdi+ rdx]
-        movq            xmm3,       MMWORD PTR [rdi+ rdx+8]
-        punpcklqdq      xmm5,       xmm3
-
-        lea             rsi,        [rsi+rax*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        movdqa          xmm2,       xmm5
-        mpsadbw         xmm5,       xmm0,  0x0
-        mpsadbw         xmm2,       xmm0,  0x5
-        paddw           xmm5,       xmm2
-
-        paddw           xmm1,       xmm5
-%endmacro
-
-%macro PROCESS_4X2X8 1
-%if %1
-        movd            xmm0,       [rsi]
-        movq            xmm1,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        punpcklqdq      xmm1,       xmm3
-
-        mpsadbw         xmm1,       xmm0,  0x0
-%else
-        movd            xmm0,       [rsi]
-        movq            xmm5,       MMWORD PTR [rdi]
-        movq            xmm3,       MMWORD PTR [rdi+8]
-        punpcklqdq      xmm5,       xmm3
-
-        mpsadbw         xmm5,       xmm0,  0x0
-
-        paddw           xmm1,       xmm5
-%endif
-        movd            xmm0,       [rsi + rax]
-        movq            xmm5,       MMWORD PTR [rdi+ rdx]
-        movq            xmm3,       MMWORD PTR [rdi+ rdx+8]
-        punpcklqdq      xmm5,       xmm3
-
-        lea             rsi,        [rsi+rax*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        mpsadbw         xmm5,       xmm0,  0x0
-
-        paddw           xmm1,       xmm5
-%endmacro
-
-
-;void vp8_sad16x16x8_sse4(
-;    const unsigned char *src_ptr,
-;    int  src_stride,
-;    const unsigned char *ref_ptr,
-;    int  ref_stride,
-;    unsigned short *sad_array);
-global sym(vp8_sad16x16x8_sse4) PRIVATE
-sym(vp8_sad16x16x8_sse4):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0)           ;src_ptr
-        mov             rdi,        arg(2)           ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        PROCESS_16X2X8 1
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-
-        mov             rdi,        arg(4)           ;Results
-        movdqa          XMMWORD PTR [rdi],    xmm1
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;void vp8_sad16x8x8_sse4(
-;    const unsigned char *src_ptr,
-;    int  src_stride,
-;    const unsigned char *ref_ptr,
-;    int  ref_stride,
-;    unsigned short *sad_array
-;);
-global sym(vp8_sad16x8x8_sse4) PRIVATE
-sym(vp8_sad16x8x8_sse4):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0)           ;src_ptr
-        mov             rdi,        arg(2)           ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        PROCESS_16X2X8 1
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-        PROCESS_16X2X8 0
-
-        mov             rdi,        arg(4)           ;Results
-        movdqa          XMMWORD PTR [rdi],    xmm1
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;void vp8_sad8x8x8_sse4(
-;    const unsigned char *src_ptr,
-;    int  src_stride,
-;    const unsigned char *ref_ptr,
-;    int  ref_stride,
-;    unsigned short *sad_array
-;);
-global sym(vp8_sad8x8x8_sse4) PRIVATE
-sym(vp8_sad8x8x8_sse4):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0)           ;src_ptr
-        mov             rdi,        arg(2)           ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        PROCESS_8X2X8 1
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-
-        mov             rdi,        arg(4)           ;Results
-        movdqa          XMMWORD PTR [rdi],    xmm1
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;void vp8_sad8x16x8_sse4(
-;    const unsigned char *src_ptr,
-;    int  src_stride,
-;    const unsigned char *ref_ptr,
-;    int  ref_stride,
-;    unsigned short *sad_array
-;);
-global sym(vp8_sad8x16x8_sse4) PRIVATE
-sym(vp8_sad8x16x8_sse4):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0)           ;src_ptr
-        mov             rdi,        arg(2)           ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        PROCESS_8X2X8 1
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        PROCESS_8X2X8 0
-        mov             rdi,        arg(4)           ;Results
-        movdqa          XMMWORD PTR [rdi],    xmm1
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;void vp8_sad4x4x8_c(
-;    const unsigned char *src_ptr,
-;    int  src_stride,
-;    const unsigned char *ref_ptr,
-;    int  ref_stride,
-;    unsigned short *sad_array
-;);
-global sym(vp8_sad4x4x8_sse4) PRIVATE
-sym(vp8_sad4x4x8_sse4):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    push        rsi
-    push        rdi
-    ; end prolog
-
-        mov             rsi,        arg(0)           ;src_ptr
-        mov             rdi,        arg(2)           ;ref_ptr
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        PROCESS_4X2X8 1
-        PROCESS_4X2X8 0
-
-        mov             rdi,        arg(4)           ;Results
-        movdqa          XMMWORD PTR [rdi],    xmm1
-
-    ; begin epilog
-    pop         rdi
-    pop         rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-
-
deleted file mode 100644
--- a/media/libvpx/vp8/common/x86/sad_ssse3.asm
+++ /dev/null
@@ -1,370 +0,0 @@
-;
-;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
-;
-;  Use of this source code is governed by a BSD-style license
-;  that can be found in the LICENSE file in the root of the source
-;  tree. An additional intellectual property rights grant can be found
-;  in the file PATENTS.  All contributing project authors may
-;  be found in the AUTHORS file in the root of the source tree.
-;
-
-
-%include "vpx_ports/x86_abi_support.asm"
-
-%macro PROCESS_16X2X3 1
-%if %1
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        lddqu           xmm5,       XMMWORD PTR [rdi]
-        lddqu           xmm6,       XMMWORD PTR [rdi+1]
-        lddqu           xmm7,       XMMWORD PTR [rdi+2]
-
-        psadbw          xmm5,       xmm0
-        psadbw          xmm6,       xmm0
-        psadbw          xmm7,       xmm0
-%else
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        lddqu           xmm1,       XMMWORD PTR [rdi]
-        lddqu           xmm2,       XMMWORD PTR [rdi+1]
-        lddqu           xmm3,       XMMWORD PTR [rdi+2]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endif
-        movdqa          xmm0,       XMMWORD PTR [rsi+rax]
-        lddqu           xmm1,       XMMWORD PTR [rdi+rdx]
-        lddqu           xmm2,       XMMWORD PTR [rdi+rdx+1]
-        lddqu           xmm3,       XMMWORD PTR [rdi+rdx+2]
-
-        lea             rsi,        [rsi+rax*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endmacro
-
-%macro PROCESS_16X2X3_OFFSET 2
-%if %1
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        movdqa          xmm4,       XMMWORD PTR [rdi]
-        movdqa          xmm7,       XMMWORD PTR [rdi+16]
-
-        movdqa          xmm5,       xmm7
-        palignr         xmm5,       xmm4,       %2
-
-        movdqa          xmm6,       xmm7
-        palignr         xmm6,       xmm4,       (%2+1)
-
-        palignr         xmm7,       xmm4,       (%2+2)
-
-        psadbw          xmm5,       xmm0
-        psadbw          xmm6,       xmm0
-        psadbw          xmm7,       xmm0
-%else
-        movdqa          xmm0,       XMMWORD PTR [rsi]
-        movdqa          xmm4,       XMMWORD PTR [rdi]
-        movdqa          xmm3,       XMMWORD PTR [rdi+16]
-
-        movdqa          xmm1,       xmm3
-        palignr         xmm1,       xmm4,       %2
-
-        movdqa          xmm2,       xmm3
-        palignr         xmm2,       xmm4,       (%2+1)
-
-        palignr         xmm3,       xmm4,       (%2+2)
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endif
-        movdqa          xmm0,       XMMWORD PTR [rsi+rax]
-        movdqa          xmm4,       XMMWORD PTR [rdi+rdx]
-        movdqa          xmm3,       XMMWORD PTR [rdi+rdx+16]
-
-        movdqa          xmm1,       xmm3
-        palignr         xmm1,       xmm4,       %2
-
-        movdqa          xmm2,       xmm3
-        palignr         xmm2,       xmm4,       (%2+1)
-
-        palignr         xmm3,       xmm4,       (%2+2)
-
-        lea             rsi,        [rsi+rax*2]
-        lea             rdi,        [rdi+rdx*2]
-
-        psadbw          xmm1,       xmm0
-        psadbw          xmm2,       xmm0
-        psadbw          xmm3,       xmm0
-
-        paddw           xmm5,       xmm1
-        paddw           xmm6,       xmm2
-        paddw           xmm7,       xmm3
-%endmacro
-
-%macro PROCESS_16X16X3_OFFSET 2
-%2_aligned_by_%1:
-
-        sub             rdi,        %1
-
-        PROCESS_16X2X3_OFFSET 1, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-
-        jmp             %2_store_off
-
-%endmacro
-
-%macro PROCESS_16X8X3_OFFSET 2
-%2_aligned_by_%1:
-
-        sub             rdi,        %1
-
-        PROCESS_16X2X3_OFFSET 1, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-        PROCESS_16X2X3_OFFSET 0, %1
-
-        jmp             %2_store_off
-
-%endmacro
-
-;void int vp8_sad16x16x3_ssse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x16x3_ssse3) PRIVATE
-sym(vp8_sad16x16x3_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rcx
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        mov             rdx,        0xf
-        and             rdx,        rdi
-
-        jmp .vp8_sad16x16x3_ssse3_skiptable
-.vp8_sad16x16x3_ssse3_jumptable:
-        dd .vp8_sad16x16x3_ssse3_aligned_by_0  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_1  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_2  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_3  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_4  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_5  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_6  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_7  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_8  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_9  - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_10 - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_11 - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_12 - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_13 - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_14 - .vp8_sad16x16x3_ssse3_do_jump
-        dd .vp8_sad16x16x3_ssse3_aligned_by_15 - .vp8_sad16x16x3_ssse3_do_jump
-.vp8_sad16x16x3_ssse3_skiptable:
-
-        call .vp8_sad16x16x3_ssse3_do_jump
-.vp8_sad16x16x3_ssse3_do_jump:
-        pop             rcx                         ; get the address of do_jump
-        mov             rax,  .vp8_sad16x16x3_ssse3_jumptable - .vp8_sad16x16x3_ssse3_do_jump
-        add             rax,  rcx  ; get the absolute address of vp8_sad16x16x3_ssse3_jumptable
-
-        movsxd          rax,  dword [rax + 4*rdx]   ; get the 32 bit offset from the jumptable
-        add             rcx,        rax
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        jmp             rcx
-
-        PROCESS_16X16X3_OFFSET 0,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 1,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 2,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 3,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 4,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 5,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 6,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 7,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 8,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 9,  .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 10, .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 11, .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 12, .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 13, .vp8_sad16x16x3_ssse3
-        PROCESS_16X16X3_OFFSET 14, .vp8_sad16x16x3_ssse3
-
-.vp8_sad16x16x3_ssse3_aligned_by_15:
-        PROCESS_16X2X3 1
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-
-.vp8_sad16x16x3_ssse3_store_off:
-        mov             rdi,        arg(4) ;Results
-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rdi],      xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rdi+4],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rdi+8],    xmm0
-
-    ; begin epilog
-    pop         rcx
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-;void int vp8_sad16x8x3_ssse3(
-;    unsigned char *src_ptr,
-;    int  src_stride,
-;    unsigned char *ref_ptr,
-;    int  ref_stride,
-;    int  *results)
-global sym(vp8_sad16x8x3_ssse3) PRIVATE
-sym(vp8_sad16x8x3_ssse3):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 5
-    SAVE_XMM 7
-    push        rsi
-    push        rdi
-    push        rcx
-    ; end prolog
-
-        mov             rsi,        arg(0) ;src_ptr
-        mov             rdi,        arg(2) ;ref_ptr
-
-        mov             rdx,        0xf
-        and             rdx,        rdi
-
-        jmp .vp8_sad16x8x3_ssse3_skiptable
-.vp8_sad16x8x3_ssse3_jumptable:
-        dd .vp8_sad16x8x3_ssse3_aligned_by_0  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_1  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_2  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_3  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_4  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_5  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_6  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_7  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_8  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_9  - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_10 - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_11 - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_12 - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_13 - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_14 - .vp8_sad16x8x3_ssse3_do_jump
-        dd .vp8_sad16x8x3_ssse3_aligned_by_15 - .vp8_sad16x8x3_ssse3_do_jump
-.vp8_sad16x8x3_ssse3_skiptable:
-
-        call .vp8_sad16x8x3_ssse3_do_jump
-.vp8_sad16x8x3_ssse3_do_jump:
-        pop             rcx                         ; get the address of do_jump
-        mov             rax,  .vp8_sad16x8x3_ssse3_jumptable - .vp8_sad16x8x3_ssse3_do_jump
-        add             rax,  rcx  ; get the absolute address of vp8_sad16x8x3_ssse3_jumptable
-
-        movsxd          rax,  dword [rax + 4*rdx]   ; get the 32 bit offset from the jumptable
-        add             rcx,        rax
-
-        movsxd          rax,        dword ptr arg(1) ;src_stride
-        movsxd          rdx,        dword ptr arg(3) ;ref_stride
-
-        jmp             rcx
-
-        PROCESS_16X8X3_OFFSET 0,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 1,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 2,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 3,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 4,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 5,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 6,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 7,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 8,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 9,  .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 10, .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 11, .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 12, .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 13, .vp8_sad16x8x3_ssse3
-        PROCESS_16X8X3_OFFSET 14, .vp8_sad16x8x3_ssse3
-
-.vp8_sad16x8x3_ssse3_aligned_by_15:
-
-        PROCESS_16X2X3 1
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-        PROCESS_16X2X3 0
-
-.vp8_sad16x8x3_ssse3_store_off:
-        mov             rdi,        arg(4) ;Results
-
-        movq            xmm0,       xmm5
-        psrldq          xmm5,       8
-
-        paddw           xmm0,       xmm5
-        movd            [rdi],      xmm0
-;-
-        movq            xmm0,       xmm6
-        psrldq          xmm6,       8
-
-        paddw           xmm0,       xmm6
-        movd            [rdi+4],    xmm0
-;-
-        movq            xmm0,       xmm7
-        psrldq          xmm7,       8
-
-        paddw           xmm0,       xmm7
-        movd            [rdi+8],    xmm0
-
-    ; begin epilog
-    pop         rcx
-    pop         rdi
-    pop         rsi
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
--- a/media/libvpx/vp8/common/x86/variance_impl_sse2.asm
+++ b/media/libvpx/vp8/common/x86/variance_impl_sse2.asm
@@ -8,403 +8,16 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
 
 %include "vpx_ports/x86_abi_support.asm"
 
 %define xmm_filter_shift            7
 
-;unsigned int vp8_get_mb_ss_sse2
-;(
-;    short *src_ptr
-;)
-global sym(vp8_get_mb_ss_sse2) PRIVATE
-sym(vp8_get_mb_ss_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 1
-    GET_GOT     rbx
-    push rsi
-    push rdi
-    sub         rsp, 16
-    ; end prolog
-
-
-        mov         rax, arg(0) ;[src_ptr]
-        mov         rcx, 8
-        pxor        xmm4, xmm4
-
-.NEXTROW:
-        movdqa      xmm0, [rax]
-        movdqa      xmm1, [rax+16]
-        movdqa      xmm2, [rax+32]
-        movdqa      xmm3, [rax+48]
-        pmaddwd     xmm0, xmm0
-        pmaddwd     xmm1, xmm1
-        pmaddwd     xmm2, xmm2
-        pmaddwd     xmm3, xmm3
-
-        paddd       xmm0, xmm1
-        paddd       xmm2, xmm3
-        paddd       xmm4, xmm0
-        paddd       xmm4, xmm2
-
-        add         rax, 0x40
-        dec         rcx
-        ja          .NEXTROW
-
-        movdqa      xmm3,xmm4
-        psrldq      xmm4,8
-        paddd       xmm4,xmm3
-        movdqa      xmm3,xmm4
-        psrldq      xmm4,4
-        paddd       xmm4,xmm3
-        movq        rax,xmm4
-
-
-    ; begin epilog
-    add rsp, 16
-    pop rdi
-    pop rsi
-    RESTORE_GOT
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;unsigned int vp8_get16x16var_sse2
-;(
-;    unsigned char   *  src_ptr,
-;    int             source_stride,
-;    unsigned char   *  ref_ptr,
-;    int             recon_stride,
-;    unsigned int    *  SSE,
-;    int             *  Sum
-;)
-global sym(vp8_get16x16var_sse2) PRIVATE
-sym(vp8_get16x16var_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    push rbx
-    push rsi
-    push rdi
-    ; end prolog
-
-        mov         rsi,            arg(0) ;[src_ptr]
-        mov         rdi,            arg(2) ;[ref_ptr]
-
-        movsxd      rax,            DWORD PTR arg(1) ;[source_stride]
-        movsxd      rdx,            DWORD PTR arg(3) ;[recon_stride]
-
-        ; Prefetch data
-        lea             rcx,    [rax+rax*2]
-        prefetcht0      [rsi]
-        prefetcht0      [rsi+rax]
-        prefetcht0      [rsi+rax*2]
-        prefetcht0      [rsi+rcx]
-        lea             rbx,    [rsi+rax*4]
-        prefetcht0      [rbx]
-        prefetcht0      [rbx+rax]
-        prefetcht0      [rbx+rax*2]
-        prefetcht0      [rbx+rcx]
-
-        lea             rcx,    [rdx+rdx*2]
-        prefetcht0      [rdi]
-        prefetcht0      [rdi+rdx]
-        prefetcht0      [rdi+rdx*2]
-        prefetcht0      [rdi+rcx]
-        lea             rbx,    [rdi+rdx*4]
-        prefetcht0      [rbx]
-        prefetcht0      [rbx+rdx]
-        prefetcht0      [rbx+rdx*2]
-        prefetcht0      [rbx+rcx]
-
-        pxor        xmm0,           xmm0                        ; clear xmm0 for unpack
-        pxor        xmm7,           xmm7                        ; clear xmm7 for accumulating diffs
-
-        pxor        xmm6,           xmm6                        ; clear xmm6 for accumulating sse
-        mov         rcx,            16
-
-.var16loop:
-        movdqu      xmm1,           XMMWORD PTR [rsi]
-        movdqu      xmm2,           XMMWORD PTR [rdi]
-
-        prefetcht0      [rsi+rax*8]
-        prefetcht0      [rdi+rdx*8]
-
-        movdqa      xmm3,           xmm1
-        movdqa      xmm4,           xmm2
-
-
-        punpcklbw   xmm1,           xmm0
-        punpckhbw   xmm3,           xmm0
-
-        punpcklbw   xmm2,           xmm0
-        punpckhbw   xmm4,           xmm0
-
-
-        psubw       xmm1,           xmm2
-        psubw       xmm3,           xmm4
-
-        paddw       xmm7,           xmm1
-        pmaddwd     xmm1,           xmm1
-
-        paddw       xmm7,           xmm3
-        pmaddwd     xmm3,           xmm3
-
-        paddd       xmm6,           xmm1
-        paddd       xmm6,           xmm3
-
-        add         rsi,            rax
-        add         rdi,            rdx
-
-        sub         rcx,            1
-        jnz         .var16loop
-
-
-        movdqa      xmm1,           xmm6
-        pxor        xmm6,           xmm6
-
-        pxor        xmm5,           xmm5
-        punpcklwd   xmm6,           xmm7
-
-        punpckhwd   xmm5,           xmm7
-        psrad       xmm5,           16
-
-        psrad       xmm6,           16
-        paddd       xmm6,           xmm5
-
-        movdqa      xmm2,           xmm1
-        punpckldq   xmm1,           xmm0
-
-        punpckhdq   xmm2,           xmm0
-        movdqa      xmm7,           xmm6
-
-        paddd       xmm1,           xmm2
-        punpckldq   xmm6,           xmm0
-
-        punpckhdq   xmm7,           xmm0
-        paddd       xmm6,           xmm7
-
-        movdqa      xmm2,           xmm1
-        movdqa      xmm7,           xmm6
-
-        psrldq      xmm1,           8
-        psrldq      xmm6,           8
-
-        paddd       xmm7,           xmm6
-        paddd       xmm1,           xmm2
-
-        mov         rax,            arg(5) ;[Sum]
-        mov         rdi,            arg(4) ;[SSE]
-
-        movd DWORD PTR [rax],       xmm7
-        movd DWORD PTR [rdi],       xmm1
-
-
-    ; begin epilog
-    pop rdi
-    pop rsi
-    pop rbx
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-
-
-;unsigned int vp8_get8x8var_sse2
-;(
-;    unsigned char   *  src_ptr,
-;    int             source_stride,
-;    unsigned char   *  ref_ptr,
-;    int             recon_stride,
-;    unsigned int    *  SSE,
-;    int             *  Sum
-;)
-global sym(vp8_get8x8var_sse2) PRIVATE
-sym(vp8_get8x8var_sse2):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    SAVE_XMM 7
-    GET_GOT     rbx
-    push rsi
-    push rdi
-    sub         rsp, 16
-    ; end prolog
-
-        mov         rsi,            arg(0) ;[src_ptr]
-        mov         rdi,            arg(2) ;[ref_ptr]
-
-        movsxd      rax,            DWORD PTR arg(1) ;[source_stride]
-        movsxd      rdx,            DWORD PTR arg(3) ;[recon_stride]
-
-        pxor        xmm0,           xmm0                        ; clear xmm0 for unpack
-        pxor        xmm7,           xmm7                        ; clear xmm7 for accumulating diffs
-
-        movq        xmm1,           QWORD PTR [rsi]
-        movq        xmm2,           QWORD PTR [rdi]
-
-        punpcklbw   xmm1,           xmm0
-        punpcklbw   xmm2,           xmm0
-
-        psubsw      xmm1,           xmm2
-        paddw       xmm7,           xmm1
-
-        pmaddwd     xmm1,           xmm1
-
-        movq        xmm2,           QWORD PTR[rsi + rax]
-        movq        xmm3,           QWORD PTR[rdi + rdx]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-
-        movq        xmm2,           QWORD PTR[rsi + rax * 2]
-        movq        xmm3,           QWORD PTR[rdi + rdx * 2]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-
-        lea         rsi,            [rsi + rax * 2]
-        lea         rdi,            [rdi + rdx * 2]
-        movq        xmm2,           QWORD PTR[rsi + rax]
-        movq        xmm3,           QWORD PTR[rdi + rdx]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-        movq        xmm2,           QWORD PTR[rsi + rax *2]
-        movq        xmm3,           QWORD PTR[rdi + rdx *2]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-
-        lea         rsi,            [rsi + rax * 2]
-        lea         rdi,            [rdi + rdx * 2]
-
-
-        movq        xmm2,           QWORD PTR[rsi + rax]
-        movq        xmm3,           QWORD PTR[rdi + rdx]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-        movq        xmm2,           QWORD PTR[rsi + rax *2]
-        movq        xmm3,           QWORD PTR[rdi + rdx *2]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-
-        lea         rsi,            [rsi + rax * 2]
-        lea         rdi,            [rdi + rdx * 2]
-
-        movq        xmm2,           QWORD PTR[rsi + rax]
-        movq        xmm3,           QWORD PTR[rdi + rdx]
-
-        punpcklbw   xmm2,           xmm0
-        punpcklbw   xmm3,           xmm0
-
-        psubsw      xmm2,           xmm3
-        paddw       xmm7,           xmm2
-
-        pmaddwd     xmm2,           xmm2
-        paddd       xmm1,           xmm2
-
-
-        movdqa      xmm6,           xmm7
-        punpcklwd   xmm6,           xmm0
-
-        punpckhwd   xmm7,           xmm0
-        movdqa      xmm2,           xmm1
-
-        paddw       xmm6,           xmm7
-        punpckldq   xmm1,           xmm0
-
-        punpckhdq   xmm2,           xmm0
-        movdqa      xmm7,           xmm6
-
-        paddd       xmm1,           xmm2
-        punpckldq   xmm6,           xmm0
-
-        punpckhdq   xmm7,           xmm0
-        paddw       xmm6,           xmm7
-
-        movdqa      xmm2,           xmm1
-        movdqa      xmm7,           xmm6
-
-        psrldq      xmm1,           8
-        psrldq      xmm6,           8
-
-        paddw       xmm7,           xmm6
-        paddd       xmm1,           xmm2
-
-        mov         rax,            arg(5) ;[Sum]
-        mov         rdi,            arg(4) ;[SSE]
-
-        movq        rdx,            xmm7
-        movsx       rcx,            dx
-
-        mov  dword ptr [rax],       ecx
-        movd DWORD PTR [rdi],       xmm1
-
-    ; begin epilog
-    add rsp, 16
-    pop rdi
-    pop rsi
-    RESTORE_GOT
-    RESTORE_XMM
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 ;void vp8_filter_block2d_bil_var_sse2
 ;(
 ;    unsigned char *ref_ptr,
 ;    int ref_pixels_per_line,
 ;    unsigned char *src_ptr,
 ;    int src_pixels_per_line,
 ;    unsigned int Height,
 ;    int  xoffset,
--- a/media/libvpx/vp8/common/x86/variance_ssse3.c
+++ b/media/libvpx/vp8/common/x86/variance_ssse3.c
@@ -3,29 +3,21 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "./vp8_rtcd.h"
 #include "vpx_config.h"
 #include "vp8/common/variance.h"
 #include "vpx_ports/mem.h"
 
-extern unsigned int vp8_get16x16var_sse2
-(
-    const unsigned char *src_ptr,
-    int source_stride,
-    const unsigned char *ref_ptr,
-    int recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
 extern void vp8_half_horiz_vert_variance16x_h_sse2
 (
     const unsigned char *ref_ptr,
     int ref_pixels_per_line,
     const unsigned char *src_ptr,
     int src_pixels_per_line,
     unsigned int Height,
     int *sum,
--- a/media/libvpx/vp8/common/x86/vp8_asm_stubs.c
+++ b/media/libvpx/vp8/common/x86/vp8_asm_stubs.c
@@ -122,17 +122,17 @@ void vp8_sixtap_predict4x4_mmx
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 16*16);  /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[16*16]);  /* Temp data bufffer used in filtering */
     const short *HFilter, *VFilter;
     HFilter = vp8_six_tap_mmx[xoffset];
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line), FData2, src_pixels_per_line, 1, 9, 8, HFilter);
     VFilter = vp8_six_tap_mmx[yoffset];
     vp8_filter_block1dc_v6_mmx(FData2 + 8, dst_ptr, dst_pitch, 8, 4 , 4, 4, VFilter);
 
 }
 
@@ -143,17 +143,17 @@ void vp8_sixtap_predict16x16_mmx
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
 
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 24*24);  /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[24*24]);  /* Temp data bufffer used in filtering */
 
     const short *HFilter, *VFilter;
 
 
     HFilter = vp8_six_tap_mmx[xoffset];
 
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line),    FData2,   src_pixels_per_line, 1, 21, 32, HFilter);
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4,  FData2 + 4, src_pixels_per_line, 1, 21, 32, HFilter);
@@ -175,17 +175,17 @@ void vp8_sixtap_predict8x8_mmx
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
 
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256);    /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[256]);    /* Temp data bufffer used in filtering */
 
     const short *HFilter, *VFilter;
 
     HFilter = vp8_six_tap_mmx[xoffset];
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line),    FData2,   src_pixels_per_line, 1, 13, 16, HFilter);
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4,  FData2 + 4, src_pixels_per_line, 1, 13, 16, HFilter);
 
     VFilter = vp8_six_tap_mmx[yoffset];
@@ -201,17 +201,17 @@ void vp8_sixtap_predict8x4_mmx
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
 
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256);    /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[256]);    /* Temp data bufffer used in filtering */
 
     const short *HFilter, *VFilter;
 
     HFilter = vp8_six_tap_mmx[xoffset];
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line),    FData2,   src_pixels_per_line, 1, 9, 16, HFilter);
     vp8_filter_block1d_h6_mmx(src_ptr - (2 * src_pixels_per_line) + 4,  FData2 + 4, src_pixels_per_line, 1, 9, 16, HFilter);
 
     VFilter = vp8_six_tap_mmx[yoffset];
@@ -247,17 +247,17 @@ void vp8_sixtap_predict16x16_sse2
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 24*24);    /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[24*24]);    /* Temp data bufffer used in filtering */
 
     const short *HFilter, *VFilter;
 
     if (xoffset)
     {
         if (yoffset)
         {
             HFilter = vp8_six_tap_mmx[xoffset];
@@ -287,17 +287,17 @@ void vp8_sixtap_predict8x8_sse2
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256);  /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[256]);  /* Temp data bufffer used in filtering */
     const short *HFilter, *VFilter;
 
     if (xoffset)
     {
         if (yoffset)
         {
             HFilter = vp8_six_tap_mmx[xoffset];
             vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,   src_pixels_per_line, 1, 13, 16, HFilter);
@@ -325,17 +325,17 @@ void vp8_sixtap_predict8x4_sse2
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned short, FData2, 256);  /* Temp data bufffer used in filtering */
+    DECLARE_ALIGNED(16, unsigned short, FData2[256]);  /* Temp data bufffer used in filtering */
     const short *HFilter, *VFilter;
 
     if (xoffset)
     {
         if (yoffset)
         {
             HFilter = vp8_six_tap_mmx[xoffset];
             vp8_filter_block1d8_h6_sse2(src_ptr - (2 * src_pixels_per_line), FData2,   src_pixels_per_line, 1, 9, 16, HFilter);
@@ -427,17 +427,17 @@ void vp8_sixtap_predict16x16_ssse3
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 24*24);
+    DECLARE_ALIGNED(16, unsigned char, FData2[24*24]);
 
     if (xoffset)
     {
         if (yoffset)
         {
             vp8_filter_block1d16_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
                                           src_pixels_per_line, FData2,
                                           16, 21, xoffset);
@@ -475,17 +475,17 @@ void vp8_sixtap_predict8x8_ssse3
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 256);
+    DECLARE_ALIGNED(16, unsigned char, FData2[256]);
 
     if (xoffset)
     {
         if (yoffset)
         {
             vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
                                          src_pixels_per_line, FData2,
                                          8, 13, xoffset);
@@ -523,17 +523,17 @@ void vp8_sixtap_predict8x4_ssse3
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-    DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 256);
+    DECLARE_ALIGNED(16, unsigned char, FData2[256]);
 
     if (xoffset)
     {
         if (yoffset)
         {
             vp8_filter_block1d8_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
                                          src_pixels_per_line, FData2,
                                          8, 9, xoffset);
@@ -571,17 +571,17 @@ void vp8_sixtap_predict4x4_ssse3
     unsigned char  *src_ptr,
     int   src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     unsigned char *dst_ptr,
     int dst_pitch
 )
 {
-  DECLARE_ALIGNED_ARRAY(16, unsigned char, FData2, 4*9);
+  DECLARE_ALIGNED(16, unsigned char, FData2[4*9]);
 
   if (xoffset)
   {
       if (yoffset)
       {
           vp8_filter_block1d4_h6_ssse3(src_ptr - (2 * src_pixels_per_line),
                                        src_pixels_per_line,
                                        FData2, 4, 9, xoffset);
rename from media/libvpx/vp8/common/x86/variance_impl_mmx.asm
rename to media/libvpx/vp8/common/x86/vp8_variance_impl_mmx.asm
--- a/media/libvpx/vp8/common/x86/variance_impl_mmx.asm
+++ b/media/libvpx/vp8/common/x86/vp8_variance_impl_mmx.asm
@@ -6,514 +6,16 @@
 ;  tree. An additional intellectual property rights grant can be found
 ;  in the file PATENTS.  All contributing project authors may
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
 
 %include "vpx_ports/x86_abi_support.asm"
 
-;unsigned int vp8_get_mb_ss_mmx( short *src_ptr )
-global sym(vp8_get_mb_ss_mmx) PRIVATE
-sym(vp8_get_mb_ss_mmx):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 7
-    GET_GOT     rbx
-    push rsi
-    push rdi
-    sub         rsp, 8
-    ; end prolog
-
-        mov         rax, arg(0) ;src_ptr
-        mov         rcx, 16
-        pxor        mm4, mm4
-
-.NEXTROW:
-        movq        mm0, [rax]
-        movq        mm1, [rax+8]
-        movq        mm2, [rax+16]
-        movq        mm3, [rax+24]
-        pmaddwd     mm0, mm0
-        pmaddwd     mm1, mm1
-        pmaddwd     mm2, mm2
-        pmaddwd     mm3, mm3
-
-        paddd       mm4, mm0
-        paddd       mm4, mm1
-        paddd       mm4, mm2
-        paddd       mm4, mm3
-
-        add         rax, 32
-        dec         rcx
-        ja          .NEXTROW
-        movq        QWORD PTR [rsp], mm4
-
-        ;return sum[0]+sum[1];
-        movsxd      rax, dword ptr [rsp]
-        movsxd      rcx, dword ptr [rsp+4]
-        add         rax, rcx
-
-
-    ; begin epilog
-    add rsp, 8
-    pop rdi
-    pop rsi
-    RESTORE_GOT
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-;unsigned int vp8_get8x8var_mmx
-;(
-;    unsigned char *src_ptr,
-;    int  source_stride,
-;    unsigned char *ref_ptr,
-;    int  recon_stride,
-;    unsigned int *SSE,
-;    int *Sum
-;)
-global sym(vp8_get8x8var_mmx) PRIVATE
-sym(vp8_get8x8var_mmx):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push rsi
-    push rdi
-    push rbx
-    sub         rsp, 16
-    ; end prolog
-
-
-        pxor        mm5, mm5                    ; Blank mmx6
-        pxor        mm6, mm6                    ; Blank mmx7
-        pxor        mm7, mm7                    ; Blank mmx7
-
-        mov         rax, arg(0) ;[src_ptr]  ; Load base addresses
-        mov         rbx, arg(2) ;[ref_ptr]
-        movsxd      rcx, dword ptr arg(1) ;[source_stride]
-        movsxd      rdx, dword ptr arg(3) ;[recon_stride]
-
-        ; Row 1
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-
-        ; Row 2
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 3
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 4
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 5
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        ;              movq        mm4, [rbx + rdx]
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 6
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 7
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movq        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Row 8
-        movq        mm0, [rax]                  ; Copy eight bytes to mm0
-        movq        mm2, mm0                    ; Take copies
-        movq        mm3, mm1                    ; Take copies
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        punpckhbw   mm2, mm6                    ; unpack to higher prrcision
-        punpckhbw   mm3, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        psubsw      mm2, mm3                    ; A-B (high order) to MM2
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        paddw       mm5, mm2                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        pmaddwd     mm2, mm2                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        paddd       mm7, mm0                    ; accumulate in mm7
-        paddd       mm7, mm2                    ; accumulate in mm7
-
-        ; Now accumulate the final results.
-        movq        QWORD PTR [rsp+8], mm5      ; copy back accumulated results into normal memory
-        movq        QWORD PTR [rsp], mm7        ; copy back accumulated results into normal memory
-        movsx       rdx, WORD PTR [rsp+8]
-        movsx       rcx, WORD PTR [rsp+10]
-        movsx       rbx, WORD PTR [rsp+12]
-        movsx       rax, WORD PTR [rsp+14]
-        add         rdx, rcx
-        add         rbx, rax
-        add         rdx, rbx    ;XSum
-        movsxd      rax, DWORD PTR [rsp]
-        movsxd      rcx, DWORD PTR [rsp+4]
-        add         rax, rcx    ;XXSum
-        mov         rsi, arg(4) ;SSE
-        mov         rdi, arg(5) ;Sum
-        mov         dword ptr [rsi], eax
-        mov         dword ptr [rdi], edx
-        xor         rax, rax    ; return 0
-
-
-    ; begin epilog
-    add rsp, 16
-    pop rbx
-    pop rdi
-    pop rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-
-;unsigned int
-;vp8_get4x4var_mmx
-;(
-;    unsigned char *src_ptr,
-;    int  source_stride,
-;    unsigned char *ref_ptr,
-;    int  recon_stride,
-;    unsigned int *SSE,
-;    int *Sum
-;)
-global sym(vp8_get4x4var_mmx) PRIVATE
-sym(vp8_get4x4var_mmx):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 6
-    push rsi
-    push rdi
-    push rbx
-    sub         rsp, 16
-    ; end prolog
-
-
-        pxor        mm5, mm5                    ; Blank mmx6
-        pxor        mm6, mm6                    ; Blank mmx7
-        pxor        mm7, mm7                    ; Blank mmx7
-
-        mov         rax, arg(0) ;[src_ptr]  ; Load base addresses
-        mov         rbx, arg(2) ;[ref_ptr]
-        movsxd      rcx, dword ptr arg(1) ;[source_stride]
-        movsxd      rdx, dword ptr arg(3) ;[recon_stride]
-
-        ; Row 1
-        movd        mm0, [rax]                  ; Copy four bytes to mm0
-        movd        mm1, [rbx]                  ; Copy four bytes to mm1
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy four bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-
-        ; Row 2
-        movd        mm0, [rax]                  ; Copy four bytes to mm0
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy four bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        ; Row 3
-        movd        mm0, [rax]                  ; Copy four bytes to mm0
-        punpcklbw   mm0, mm6                    ; unpack to higher precision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy four bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        ; Row 4
-        movd        mm0, [rax]                  ; Copy four bytes to mm0
-
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-
-        paddw       mm5, mm0                    ; accumulate differences in mm5
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-
-        ; Now accumulate the final results.
-        movq        QWORD PTR [rsp+8], mm5      ; copy back accumulated results into normal memory
-        movq        QWORD PTR [rsp], mm7        ; copy back accumulated results into normal memory
-        movsx       rdx, WORD PTR [rsp+8]
-        movsx       rcx, WORD PTR [rsp+10]
-        movsx       rbx, WORD PTR [rsp+12]
-        movsx       rax, WORD PTR [rsp+14]
-        add         rdx, rcx
-        add         rbx, rax
-        add         rdx, rbx    ;XSum
-        movsxd      rax, DWORD PTR [rsp]
-        movsxd      rcx, DWORD PTR [rsp+4]
-        add         rax, rcx    ;XXSum
-        mov         rsi, arg(4) ;SSE
-        mov         rdi, arg(5) ;Sum
-        mov         dword ptr [rsi], eax
-        mov         dword ptr [rdi], edx
-        xor         rax, rax    ; return 0
-
-
-    ; begin epilog
-    add rsp, 16
-    pop rbx
-    pop rdi
-    pop rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
-
-
-;unsigned int
-;vp8_get4x4sse_cs_mmx
-;(
-;    unsigned char *src_ptr,
-;    int  source_stride,
-;    unsigned char *ref_ptr,
-;    int  recon_stride
-;)
-global sym(vp8_get4x4sse_cs_mmx) PRIVATE
-sym(vp8_get4x4sse_cs_mmx):
-    push        rbp
-    mov         rbp, rsp
-    SHADOW_ARGS_TO_STACK 4
-    push rsi
-    push rdi
-    push rbx
-    ; end prolog
-
-
-        pxor        mm6, mm6                    ; Blank mmx7
-        pxor        mm7, mm7                    ; Blank mmx7
-
-        mov         rax, arg(0) ;[src_ptr]  ; Load base addresses
-        mov         rbx, arg(2) ;[ref_ptr]
-        movsxd      rcx, dword ptr arg(1) ;[source_stride]
-        movsxd      rdx, dword ptr arg(3) ;[recon_stride]
-        ; Row 1
-        movd        mm0, [rax]                  ; Copy eight bytes to mm0
-        movd        mm1, [rbx]                  ; Copy eight bytes to mm1
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        ; Row 2
-        movd        mm0, [rax]                  ; Copy eight bytes to mm0
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        ; Row 3
-        movd        mm0, [rax]                  ; Copy eight bytes to mm0
-        punpcklbw   mm1, mm6
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        add         rbx,rdx                     ; Inc pointer into ref data
-        add         rax,rcx                     ; Inc pointer into the new data
-        movd        mm1, [rbx]                  ; Copy eight bytes to mm1
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        ; Row 4
-        movd        mm0, [rax]                  ; Copy eight bytes to mm0
-        punpcklbw   mm0, mm6                    ; unpack to higher prrcision
-        punpcklbw   mm1, mm6
-        psubsw      mm0, mm1                    ; A-B (low order) to MM0
-        pmaddwd     mm0, mm0                    ; square and accumulate
-        paddd       mm7, mm0                    ; accumulate in mm7
-
-        movq        mm0,    mm7                 ;
-        psrlq       mm7,    32
-
-        paddd       mm0,    mm7
-        movq        rax,    mm0
-
-
-    ; begin epilog
-    pop rbx
-    pop rdi
-    pop rsi
-    UNSHADOW_ARGS
-    pop         rbp
-    ret
-
 %define mmx_filter_shift            7
 
 ;void vp8_filter_block2d_bil4x4_var_mmx
 ;(
 ;    unsigned char *ref_ptr,
 ;    int ref_pixels_per_line,
 ;    unsigned char *src_ptr,
 ;    int src_pixels_per_line,
rename from media/libvpx/vp8/common/x86/variance_mmx.c
rename to media/libvpx/vp8/common/x86/vp8_variance_mmx.c
--- a/media/libvpx/vp8/common/x86/variance_mmx.c
+++ b/media/libvpx/vp8/common/x86/vp8_variance_mmx.c
@@ -3,16 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "./vp8_rtcd.h"
 #include "vpx_config.h"
 #include "vp8/common/variance.h"
 #include "vpx_ports/mem.h"
 #include "vp8/common/x86/filter_x86.h"
 
 extern void filter_block1d_h6_mmx
 (
     const unsigned char *src_ptr,
@@ -29,35 +30,16 @@ extern void filter_block1d_v6_mmx
     unsigned char *output_ptr,
     unsigned int pixels_per_line,
     unsigned int pixel_step,
     unsigned int output_height,
     unsigned int output_width,
     short *filter
 );
 
-extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr);
-extern unsigned int vp8_get8x8var_mmx
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
-extern unsigned int vp8_get4x4var_mmx
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
 extern void vp8_filter_block2d_bil4x4_var_mmx
 (
     const unsigned char *ref_ptr,
     int ref_pixels_per_line,
     const unsigned char *src_ptr,
     int src_pixels_per_line,
     const short *HFilter,
     const short *VFilter,
@@ -72,137 +54,16 @@ extern void vp8_filter_block2d_bil_var_m
     int src_pixels_per_line,
     unsigned int Height,
     const short *HFilter,
     const short *VFilter,
     int *sum,
     unsigned int *sumsquared
 );
 
-
-unsigned int vp8_variance4x4_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-    vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 4));
-
-}
-
-unsigned int vp8_variance8x8_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
-    *sse = var;
-
-    return (var - (((unsigned int)avg * avg) >> 6));
-
-}
-
-unsigned int vp8_mse16x16_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, sse2, sse3, var;
-    int sum0, sum1, sum2, sum3;
-
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
-
-    var = sse0 + sse1 + sse2 + sse3;
-    *sse = var;
-    return var;
-}
-
-
-unsigned int vp8_variance16x16_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, sse2, sse3, var;
-    int sum0, sum1, sum2, sum3, avg;
-
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
-
-    var = sse0 + sse1 + sse2 + sse3;
-    avg = sum0 + sum1 + sum2 + sum3;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 8));
-}
-
-unsigned int vp8_variance16x8_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
-
-}
-
-
-unsigned int vp8_variance8x16_mmx(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
-
-    vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
-
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
-
-    return (var - (((unsigned int)avg * avg) >> 7));
-
-}
-
-
 unsigned int vp8_sub_pixel_variance4x4_mmx
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     const unsigned char *dst_ptr,
     int dst_pixels_per_line,
@@ -281,30 +142,16 @@ unsigned int vp8_sub_pixel_variance16x16
     xxsum0 += xxsum1;
 
     *sse = xxsum0;
     return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
 
 
 }
 
-unsigned int vp8_sub_pixel_mse16x16_mmx(
-    const unsigned char  *src_ptr,
-    int  src_pixels_per_line,
-    int  xoffset,
-    int  yoffset,
-    const unsigned char *dst_ptr,
-    int dst_pixels_per_line,
-    unsigned int *sse
-)
-{
-    vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
-    return *sse;
-}
-
 unsigned int vp8_sub_pixel_variance16x8_mmx
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     const unsigned char *dst_ptr,
     int dst_pixels_per_line,
rename from media/libvpx/vp8/common/x86/variance_sse2.c
rename to media/libvpx/vp8/common/x86/vp8_variance_sse2.c
--- a/media/libvpx/vp8/common/x86/variance_sse2.c
+++ b/media/libvpx/vp8/common/x86/vp8_variance_sse2.c
@@ -3,16 +3,17 @@
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "./vp8_rtcd.h"
 #include "vpx_config.h"
 #include "vp8/common/variance.h"
 #include "vpx_ports/mem.h"
 #include "vp8/common/x86/filter_x86.h"
 
 extern void filter_block1d_h6_mmx(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
 extern void filter_block1d_v6_mmx(const short *src_ptr, unsigned char *output_ptr, unsigned int pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
 extern void filter_block1d8_h6_sse2(const unsigned char *src_ptr, unsigned short *output_ptr, unsigned int src_pixels_per_line, unsigned int pixel_step, unsigned int output_height, unsigned int output_width, short *filter);
@@ -25,48 +26,16 @@ extern void vp8_filter_block2d_bil4x4_va
     const unsigned char *src_ptr,
     int src_pixels_per_line,
     const short *HFilter,
     const short *VFilter,
     int *sum,
     unsigned int *sumsquared
 );
 
-extern unsigned int vp8_get4x4var_mmx
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
-
-unsigned int vp8_get_mb_ss_sse2
-(
-    const short *src_ptr
-);
-unsigned int vp8_get16x16var_sse2
-(
-    const unsigned char *src_ptr,
-    int source_stride,
-    const unsigned char *ref_ptr,
-    int recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
-unsigned int vp8_get8x8var_sse2
-(
-    const unsigned char *src_ptr,
-    int source_stride,
-    const unsigned char *ref_ptr,
-    int recon_stride,
-    unsigned int *SSE,
-    int *Sum
-);
 void vp8_filter_block2d_bil_var_sse2
 (
     const unsigned char *ref_ptr,
     int ref_pixels_per_line,
     const unsigned char *src_ptr,
     int src_pixels_per_line,
     unsigned int Height,
     int  xoffset,
@@ -130,125 +99,16 @@ void vp8_half_vert_variance16x_h_sse2
     int ref_pixels_per_line,
     const unsigned char *src_ptr,
     int src_pixels_per_line,
     unsigned int Height,
     int *sum,
     unsigned int *sumsquared
 );
 
-unsigned int vp8_variance4x4_wmt(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-    vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 4));
-
-}
-
-unsigned int vp8_variance8x8_wmt
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int var;
-    int avg;
-
-    vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 6));
-
-}
-
-
-unsigned int vp8_variance16x16_wmt
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0;
-    int sum0;
-
-
-    vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    *sse = sse0;
-    return (sse0 - (((unsigned int)sum0 * sum0) >> 8));
-}
-unsigned int vp8_mse16x16_wmt(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-
-    unsigned int sse0;
-    int sum0;
-    vp8_get16x16var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    *sse = sse0;
-    return sse0;
-
-}
-
-
-unsigned int vp8_variance16x8_wmt
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
-
-    vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_sse2(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
-
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
-
-}
-
-unsigned int vp8_variance8x16_wmt
-(
-    const unsigned char *src_ptr,
-    int  source_stride,
-    const unsigned char *ref_ptr,
-    int  recon_stride,
-    unsigned int *sse)
-{
-    unsigned int sse0, sse1, var;
-    int sum0, sum1, avg;
-
-    vp8_get8x8var_sse2(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
-    vp8_get8x8var_sse2(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
-
-    var = sse0 + sse1;
-    avg = sum0 + sum1;
-    *sse = var;
-    return (var - (((unsigned int)avg * avg) >> 7));
-
-}
-
 unsigned int vp8_sub_pixel_variance4x4_wmt
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     const unsigned char *dst_ptr,
     int dst_pixels_per_line,
@@ -373,30 +233,16 @@ unsigned int vp8_sub_pixel_variance16x16
         xsum0 += xsum1;
         xxsum0 += xxsum1;
     }
 
     *sse = xxsum0;
     return (xxsum0 - (((unsigned int)xsum0 * xsum0) >> 8));
 }
 
-unsigned int vp8_sub_pixel_mse16x16_wmt(
-    const unsigned char  *src_ptr,
-    int  src_pixels_per_line,
-    int  xoffset,
-    int  yoffset,
-    const unsigned char *dst_ptr,
-    int dst_pixels_per_line,
-    unsigned int *sse
-)
-{
-    vp8_sub_pixel_variance16x16_wmt(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
-    return *sse;
-}
-
 unsigned int vp8_sub_pixel_variance16x8_wmt
 (
     const unsigned char  *src_ptr,
     int  src_pixels_per_line,
     int  xoffset,
     int  yoffset,
     const unsigned char *dst_ptr,
     int dst_pixels_per_line,
--- a/media/libvpx/vp8/decoder/decodeframe.c
+++ b/media/libvpx/vp8/decoder/decodeframe.c
@@ -137,26 +137,26 @@ static void decode_macroblock(VP8D_COMP 
         throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
 
         if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
         {
             /* MB with corrupt residuals or corrupt mode/motion vectors.
              * Better to use the predictor as reconstruction.
              */
             pbi->frame_corrupt_residual = 1;
-            vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+            memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
             vp8_conceal_corrupt_mb(xd);
 
 
             corruption_detected = 1;
 
             /* force idct to be skipped for B_PRED and use the
              * prediction only for reconstruction
              * */
-            vpx_memset(xd->eobs, 0, 25);
+            memset(xd->eobs, 0, 25);
         }
     }
 #endif
 
     /* do prediction */
     if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
     {
         vp8_build_intra_predictors_mbuv_s(xd,
@@ -179,17 +179,17 @@ static void decode_macroblock(VP8D_COMP 
         }
         else
         {
             short *DQC = xd->dequant_y1;
             int dst_stride = xd->dst.y_stride;
 
             /* clear out residual eob info */
             if(xd->mode_info_context->mbmi.mb_skip_coeff)
-                vpx_memset(xd->eobs, 0, 25);
+                memset(xd->eobs, 0, 25);
 
             intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
 
             for (i = 0; i < 16; i++)
             {
                 BLOCKD *b = &xd->block[i];
                 unsigned char *dst = xd->dst.y_buffer + b->offset;
                 B_PREDICTION_MODE b_mode =
@@ -209,17 +209,17 @@ static void decode_macroblock(VP8D_COMP 
                     vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride);
                     }
                     else
                     {
                         vp8_dc_only_idct_add
                             (b->qcoeff[0] * DQC[0],
                                 dst, dst_stride,
                                 dst, dst_stride);
-                        vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                        memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                     }
                 }
             }
         }
     }
     else
     {
         vp8_build_inter_predictors_mb(xd);
@@ -246,24 +246,24 @@ static void decode_macroblock(VP8D_COMP 
 
                 /* do 2nd order transform on the dc block */
                 if (xd->eobs[24] > 1)
                 {
                     vp8_dequantize_b(b, xd->dequant_y2);
 
                     vp8_short_inv_walsh4x4(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0]));
                 }
                 else
                 {
                     b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
                     vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
                         xd->qcoeff);
-                    vpx_memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
+                    memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0]));
                 }
 
                 /* override the dc dequant constant in order to preserve the
                  * dc components
                  */
                 DQC = xd->dequant_y1_dc;
             }
 
@@ -318,45 +318,45 @@ static void yv12_extend_frame_top_c(YV12
     /***********/
     Border = ybf->border;
     plane_stride = ybf->y_stride;
     src_ptr1 = ybf->y_buffer - Border;
     dest_ptr1 = src_ptr1 - (Border * plane_stride);
 
     for (i = 0; i < (int)Border; i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 
 
     /***********/
     /* U Plane */
     /***********/
     plane_stride = ybf->uv_stride;
     Border /= 2;
     src_ptr1 = ybf->u_buffer - Border;
     dest_ptr1 = src_ptr1 - (Border * plane_stride);
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 
     /***********/
     /* V Plane */
     /***********/
 
     src_ptr1 = ybf->v_buffer - Border;
     dest_ptr1 = src_ptr1 - (Border * plane_stride);
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr1, src_ptr1, plane_stride);
+        memcpy(dest_ptr1, src_ptr1, plane_stride);
         dest_ptr1 += plane_stride;
     }
 }
 
 static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf)
 {
     int i;
     unsigned char *src_ptr1, *src_ptr2;
@@ -374,17 +374,17 @@ static void yv12_extend_frame_bottom_c(Y
     plane_height = ybf->y_height;
 
     src_ptr1 = ybf->y_buffer - Border;
     src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
     dest_ptr2 = src_ptr2 + plane_stride;
 
     for (i = 0; i < (int)Border; i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 
 
     /***********/
     /* U Plane */
     /***********/
     plane_stride = ybf->uv_stride;
@@ -392,31 +392,31 @@ static void yv12_extend_frame_bottom_c(Y
     Border /= 2;
 
     src_ptr1 = ybf->u_buffer - Border;
     src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
     dest_ptr2 = src_ptr2 + plane_stride;
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 
     /***********/
     /* V Plane */
     /***********/
 
     src_ptr1 = ybf->v_buffer - Border;
     src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride;
     dest_ptr2 = src_ptr2 + plane_stride;
 
     for (i = 0; i < (int)(Border); i++)
     {
-        vpx_memcpy(dest_ptr2, src_ptr2, plane_stride);
+        memcpy(dest_ptr2, src_ptr2, plane_stride);
         dest_ptr2 += plane_stride;
     }
 }
 
 static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf,
                                            unsigned char *y_src,
                                            unsigned char *u_src,
                                            unsigned char *v_src)
@@ -441,18 +441,18 @@ static void yv12_extend_frame_left_right
     /* copy the left and right most columns out */
     src_ptr1 = y_src;
     src_ptr2 = src_ptr1 + plane_width - 1;
     dest_ptr1 = src_ptr1 - Border;
     dest_ptr2 = src_ptr2 + 1;
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
         dest_ptr2 += plane_stride;
     }
 
     /***********/
     /* U Plane */
@@ -465,18 +465,18 @@ static void yv12_extend_frame_left_right
     /* copy the left and right most columns out */
     src_ptr1 = u_src;
     src_ptr2 = src_ptr1 + plane_width - 1;
     dest_ptr1 = src_ptr1 - Border;
     dest_ptr2 = src_ptr2 + 1;
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
         dest_ptr2 += plane_stride;
     }
 
     /***********/
     /* V Plane */
@@ -485,18 +485,18 @@ static void yv12_extend_frame_left_right
     /* copy the left and right most columns out */
     src_ptr1 = v_src;
     src_ptr2 = src_ptr1 + plane_width - 1;
     dest_ptr1 = src_ptr1 - Border;
     dest_ptr2 = src_ptr2 + 1;
 
     for (i = 0; i < plane_height; i++)
     {
-        vpx_memset(dest_ptr1, src_ptr1[0], Border);
-        vpx_memset(dest_ptr2, src_ptr2[0], Border);
+        memset(dest_ptr1, src_ptr1[0], Border);
+        memset(dest_ptr2, src_ptr2[0], Border);
         src_ptr1  += plane_stride;
         src_ptr2  += plane_stride;
         dest_ptr1 += plane_stride;
         dest_ptr2 += plane_stride;
     }
 }
 
 static void decode_mb_rows(VP8D_COMP *pbi)
@@ -563,17 +563,17 @@ static void decode_mb_rows(VP8D_COMP *pb
                 ibc = 0;
         }
 
         recon_yoffset = mb_row * recon_y_stride * 16;
         recon_uvoffset = mb_row * recon_uv_stride * 8;
 
         /* reset contexts */
         xd->above_context = pc->above_context;
-        vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
+        memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
 
         xd->left_available = 0;
 
         xd->mb_to_top_edge = -((mb_row * 16) << 3);
         xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
 
         xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
         xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
@@ -913,29 +913,29 @@ static void setup_token_decoder(VP8D_COM
 static void init_frame(VP8D_COMP *pbi)
 {
     VP8_COMMON *const pc = & pbi->common;
     MACROBLOCKD *const xd  = & pbi->mb;
 
     if (pc->frame_type == KEY_FRAME)
     {
         /* Various keyframe initializations */
-        vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
+        memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
 
         vp8_init_mbmode_probs(pc);
 
         vp8_default_coef_probs(pc);
 
         /* reset the segment feature data to 0 with delta coding (Default state). */
-        vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+        memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
         xd->mb_segement_abs_delta = SEGMENT_DELTADATA;
 
         /* reset the mode ref deltasa for loop filter */
-        vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
-        vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
+        memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
+        memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
 
         /* All buffers are implicitly updated on key frames. */
         pc->refresh_golden_frame = 1;
         pc->refresh_alt_ref_frame = 1;
         pc->copy_buffer_to_gf = 0;
         pc->copy_buffer_to_arf = 0;
 
         /* Note that Golden and Altref modes cannot be used on a key frame so
@@ -1064,22 +1064,21 @@ int vp8_decode_frame(VP8D_COMP *pbi)
             if (!pbi->ec_active || data + 6 < data_end)
             {
                 pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff;
                 pc->horiz_scale = clear[4] >> 6;
                 pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff;
                 pc->vert_scale = clear[6] >> 6;
             }
             data += 7;
-            clear += 7;
         }
         else
         {
-          vpx_memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
-          vpx_memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+          memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
+          memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG));
         }
     }
     if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME))
     {
         return -1;
     }
 
     init_frame(pbi);
@@ -1101,17 +1100,17 @@ int vp8_decode_frame(VP8D_COMP *pbi)
         /* Signal whether or not the segmentation map is being explicitly updated this frame. */
         xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc);
         xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
 
         if (xd->update_mb_segmentation_data)
         {
             xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc);
 
-            vpx_memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
+            memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data));
 
             /* For each segmentation feature (Quant and loop filter level) */
             for (i = 0; i < MB_LVL_MAX; i++)
             {
                 for (j = 0; j < MAX_MB_SEGMENTS; j++)
                 {
                     /* Frame level data */
                     if (vp8_read_bit(bc))
@@ -1125,17 +1124,17 @@ int vp8_decode_frame(VP8D_COMP *pbi)
                         xd->segment_feature_data[i][j] = 0;
                 }
             }
         }
 
         if (xd->update_mb_segmentation_map)
         {
             /* Which macro block level features are enabled */
-            vpx_memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
+            memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs));
 
             /* Read the probs used to decode the segment id for each macro block. */
             for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
             {
                 /* If not explicitly set value is defaulted to 255 by memset above */
                 if (vp8_read_bit(bc))
                     xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8);
             }
@@ -1274,17 +1273,17 @@ int vp8_decode_frame(VP8D_COMP *pbi)
     /* Assume we shouldn't refresh the probabilities if the bit is
      * missing */
     xd->corrupted |= vp8dx_bool_error(bc);
     if (pbi->ec_active && xd->corrupted)
         pc->refresh_entropy_probs = 0;
 #endif
     if (pc->refresh_entropy_probs == 0)
     {
-        vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
+        memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
     }
 
     pc->refresh_last_frame = pc->frame_type == KEY_FRAME  ||  vp8_read_bit(bc);
 
 #if CONFIG_ERROR_CONCEALMENT
     /* Assume we should refresh the last frame if the bit is missing */
     xd->corrupted |= vp8dx_bool_error(bc);
     if (pbi->ec_active && xd->corrupted)
@@ -1323,31 +1322,31 @@ int vp8_decode_frame(VP8D_COMP *pbi)
                         }
                         if (k > 0 && *p != pc->fc.coef_probs[i][j][k-1][l])
                             pbi->independent_partitions = 0;
 
                     }
     }
 
     /* clear out the coeff buffer */
-    vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
+    memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
 
     vp8_decode_mode_mvs(pbi);
 
 #if CONFIG_ERROR_CONCEALMENT
     if (pbi->ec_active &&
             pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows)
     {
         /* Motion vectors are missing in this frame. We will try to estimate
          * them and then continue decoding the frame as usual */
         vp8_estimate_missing_mvs(pbi);
     }
 #endif
 
-    vpx_memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
+    memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols);
     pbi->frame_corrupt_residual = 0;
 
 #if CONFIG_MULTITHREAD
     if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION)
     {
         unsigned int thread;
         vp8mt_decode_mb_rows(pbi, xd);
         vp8_yv12_extend_frame_borders(yv12_fb_new);
@@ -1376,17 +1375,17 @@ int vp8_decode_frame(VP8D_COMP *pbi)
             vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
                                "A stream must start with a complete key frame");
     }
 
     /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes  \n",bc->pos+pbi->bc2.pos); */
 
     if (pc->refresh_entropy_probs == 0)
     {
-        vpx_memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
+        memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc));
         pbi->independent_partitions = prev_independent_partitions;
     }
 
 #ifdef PACKET_TESTING
     {
         FILE *f = fopen("decompressor.VP8", "ab");
         unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8;
         fwrite((void *) &size, 4, 1, f);
--- a/media/libvpx/vp8/decoder/detokenize.c
+++ b/media/libvpx/vp8/decoder/detokenize.c
@@ -15,18 +15,18 @@
 #include "vpx_ports/mem.h"
 #include "detokenize.h"
 
 void vp8_reset_mb_tokens_context(MACROBLOCKD *x)
 {
     ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context);
     ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context);
 
-    vpx_memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);
-    vpx_memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1);