Bug 608066 - Update libvpx to v0.9.5. r=chris,khuey a=b-f
authorTimothy B. Terriberry <tterribe@vt.edu>
Mon, 08 Nov 2010 09:47:17 +0200
changeset 57090 2ef1a570e14eedbb0b6e9595ace7a78d77ac2298
parent 57089 a247e16e0c7ffe6a0fcc114927b714872a845462
child 57091 af89c96d0939db005d103a6b86cd6efc8590be4d
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewerschris, khuey, b-f
bugs608066
milestone2.0b8pre
Bug 608066 - Update libvpx to v0.9.5. r=chris,khuey a=b-f
config/autoconf.mk.in
configure.in
media/libvpx/Makefile.in
media/libvpx/README_MOZILLA
media/libvpx/build/make/ads2gas.pl
media/libvpx/build/make/obj_int_extract.c
media/libvpx/frame_buf_ref.patch
media/libvpx/reduce-warnings-1.patch
media/libvpx/solaris.patch
media/libvpx/subpixel-qword.patch
media/libvpx/update.sh
media/libvpx/vp8/common/alloccommon.c
media/libvpx/vp8/common/alloccommon.h
media/libvpx/vp8/common/arm/arm_systemdependent.c
media/libvpx/vp8/common/arm/armv6/bilinearfilter_v6.asm
media/libvpx/vp8/common/arm/armv6/copymem16x16_v6.asm
media/libvpx/vp8/common/arm/armv6/copymem8x4_v6.asm
media/libvpx/vp8/common/arm/armv6/copymem8x8_v6.asm
media/libvpx/vp8/common/arm/armv6/dc_only_idct_add_v6.asm
media/libvpx/vp8/common/arm/armv6/filter_v6.asm
media/libvpx/vp8/common/arm/armv6/idct_v6.asm
media/libvpx/vp8/common/arm/armv6/iwalsh_v6.asm
media/libvpx/vp8/common/arm/armv6/loopfilter_v6.asm
media/libvpx/vp8/common/arm/armv6/recon_v6.asm
media/libvpx/vp8/common/arm/armv6/simpleloopfilter_v6.asm
media/libvpx/vp8/common/arm/armv6/sixtappredict8x4_v6.asm
media/libvpx/vp8/common/arm/bilinearfilter_arm.c
media/libvpx/vp8/common/arm/filter_arm.c
media/libvpx/vp8/common/arm/idct_arm.h
media/libvpx/vp8/common/arm/loopfilter_arm.c
media/libvpx/vp8/common/arm/loopfilter_arm.h
media/libvpx/vp8/common/arm/neon/bilinearpredict16x16_neon.asm
media/libvpx/vp8/common/arm/neon/bilinearpredict4x4_neon.asm
media/libvpx/vp8/common/arm/neon/bilinearpredict8x4_neon.asm
media/libvpx/vp8/common/arm/neon/bilinearpredict8x8_neon.asm
media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
media/libvpx/vp8/common/arm/neon/copymem16x16_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x4_neon.asm
media/libvpx/vp8/common/arm/neon/copymem8x8_neon.asm
media/libvpx/vp8/common/arm/neon/dc_only_idct_add_neon.asm
media/libvpx/vp8/common/arm/neon/iwalsh_neon.asm
media/libvpx/vp8/common/arm/neon/loopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
media/libvpx/vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
media/libvpx/vp8/common/arm/neon/mbloopfilter_neon.asm
media/libvpx/vp8/common/arm/neon/recon16x16mb_neon.asm
media/libvpx/vp8/common/arm/neon/recon2b_neon.asm
media/libvpx/vp8/common/arm/neon/recon4b_neon.asm
media/libvpx/vp8/common/arm/neon/recon_neon.c
media/libvpx/vp8/common/arm/neon/reconb_neon.asm
media/libvpx/vp8/common/arm/neon/save_neon_reg.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_1_neon.asm
media/libvpx/vp8/common/arm/neon/shortidct4x4llm_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict16x16_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict4x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x4_neon.asm
media/libvpx/vp8/common/arm/neon/sixtappredict8x8_neon.asm
media/libvpx/vp8/common/arm/recon_arm.h
media/libvpx/vp8/common/arm/reconintra_arm.c
media/libvpx/vp8/common/arm/subpixel_arm.h
media/libvpx/vp8/common/arm/vpx_asm_offsets.c
media/libvpx/vp8/common/blockd.c
media/libvpx/vp8/common/blockd.h
media/libvpx/vp8/common/coefupdateprobs.h
media/libvpx/vp8/common/common.h
media/libvpx/vp8/common/common_types.h
media/libvpx/vp8/common/debugmodes.c
media/libvpx/vp8/common/defaultcoefcounts.h
media/libvpx/vp8/common/entropy.c
media/libvpx/vp8/common/entropy.h
media/libvpx/vp8/common/entropymode.c
media/libvpx/vp8/common/entropymode.h
media/libvpx/vp8/common/entropymv.c
media/libvpx/vp8/common/entropymv.h
media/libvpx/vp8/common/extend.c
media/libvpx/vp8/common/extend.h
media/libvpx/vp8/common/filter_c.c
media/libvpx/vp8/common/findnearmv.c
media/libvpx/vp8/common/findnearmv.h
media/libvpx/vp8/common/g_common.h
media/libvpx/vp8/common/generic/systemdependent.c
media/libvpx/vp8/common/header.h
media/libvpx/vp8/common/idct.h
media/libvpx/vp8/common/idctllm.c
media/libvpx/vp8/common/invtrans.c
media/libvpx/vp8/common/invtrans.h
media/libvpx/vp8/common/loopfilter.c
media/libvpx/vp8/common/loopfilter.h
media/libvpx/vp8/common/loopfilter_filters.c
media/libvpx/vp8/common/mbpitch.c
media/libvpx/vp8/common/modecont.c
media/libvpx/vp8/common/modecont.h
media/libvpx/vp8/common/modecontext.c
media/libvpx/vp8/common/mv.h
media/libvpx/vp8/common/onyx.h
media/libvpx/vp8/common/onyxc_int.h
media/libvpx/vp8/common/onyxd.h
media/libvpx/vp8/common/postproc.c
media/libvpx/vp8/common/postproc.h
media/libvpx/vp8/common/ppflags.h
media/libvpx/vp8/common/pragmas.h
media/libvpx/vp8/common/predictdc.c
media/libvpx/vp8/common/predictdc.h
media/libvpx/vp8/common/preproc.h
media/libvpx/vp8/common/quant_common.c
media/libvpx/vp8/common/quant_common.h
media/libvpx/vp8/common/recon.c
media/libvpx/vp8/common/recon.h
media/libvpx/vp8/common/reconinter.c
media/libvpx/vp8/common/reconinter.h
media/libvpx/vp8/common/reconintra.c
media/libvpx/vp8/common/reconintra.h
media/libvpx/vp8/common/reconintra4x4.c
media/libvpx/vp8/common/reconintra4x4.h
media/libvpx/vp8/common/segmentation_common.h
media/libvpx/vp8/common/setupintrarecon.c
media/libvpx/vp8/common/setupintrarecon.h
media/libvpx/vp8/common/subpixel.h
media/libvpx/vp8/common/swapyv12buffer.c
media/libvpx/vp8/common/swapyv12buffer.h
media/libvpx/vp8/common/systemdependent.h
media/libvpx/vp8/common/textblit.c
media/libvpx/vp8/common/threading.h
media/libvpx/vp8/common/treecoder.c
media/libvpx/vp8/common/treecoder.h
media/libvpx/vp8/common/type_aliases.h
media/libvpx/vp8/common/vpxerrors.h
media/libvpx/vp8/common/x86/idct_x86.h
media/libvpx/vp8/common/x86/idctllm_mmx.asm
media/libvpx/vp8/common/x86/idctllm_sse2.asm
media/libvpx/vp8/common/x86/iwalsh_mmx.asm
media/libvpx/vp8/common/x86/iwalsh_sse2.asm
media/libvpx/vp8/common/x86/loopfilter_mmx.asm
media/libvpx/vp8/common/x86/loopfilter_sse2.asm
media/libvpx/vp8/common/x86/loopfilter_x86.c
media/libvpx/vp8/common/x86/loopfilter_x86.h
media/libvpx/vp8/common/x86/postproc_mmx.asm
media/libvpx/vp8/common/x86/postproc_sse2.asm
media/libvpx/vp8/common/x86/postproc_x86.h
media/libvpx/vp8/common/x86/recon_mmx.asm
media/libvpx/vp8/common/x86/recon_sse2.asm
media/libvpx/vp8/common/x86/recon_x86.h
media/libvpx/vp8/common/x86/subpixel_mmx.asm
media/libvpx/vp8/common/x86/subpixel_sse2.asm
media/libvpx/vp8/common/x86/subpixel_ssse3.asm
media/libvpx/vp8/common/x86/subpixel_x86.h
media/libvpx/vp8/common/x86/vp8_asm_stubs.c
media/libvpx/vp8/common/x86/x86_systemdependent.c
media/libvpx/vp8/decoder/arm/arm_dsystemdependent.c
media/libvpx/vp8/decoder/arm/armv6/dequant_dc_idct_v6.asm
media/libvpx/vp8/decoder/arm/armv6/dequant_idct_v6.asm
media/libvpx/vp8/decoder/arm/armv6/dequantize_v6.asm
media/libvpx/vp8/decoder/arm/armv6/idct_blk_v6.c
media/libvpx/vp8/decoder/arm/dboolhuff_arm.h
media/libvpx/vp8/decoder/arm/dequantize_arm.c
media/libvpx/vp8/decoder/arm/dequantize_arm.h
media/libvpx/vp8/decoder/arm/detokenize.asm
media/libvpx/vp8/decoder/arm/detokenize_arm.h
media/libvpx/vp8/decoder/arm/neon/dequant_idct_neon.asm
media/libvpx/vp8/decoder/arm/neon/dequantizeb_neon.asm
media/libvpx/vp8/decoder/arm/neon/idct_blk_neon.c
media/libvpx/vp8/decoder/arm/neon/idct_dequant_0_2x_neon.asm
media/libvpx/vp8/decoder/arm/neon/idct_dequant_dc_0_2x_neon.asm
media/libvpx/vp8/decoder/arm/neon/idct_dequant_dc_full_2x_neon.asm
media/libvpx/vp8/decoder/arm/neon/idct_dequant_full_2x_neon.asm
media/libvpx/vp8/decoder/dboolhuff.c
media/libvpx/vp8/decoder/dboolhuff.h
media/libvpx/vp8/decoder/decodemv.c
media/libvpx/vp8/decoder/decodemv.h
media/libvpx/vp8/decoder/decoderthreading.h
media/libvpx/vp8/decoder/decodframe.c
media/libvpx/vp8/decoder/demode.c
media/libvpx/vp8/decoder/demode.h
media/libvpx/vp8/decoder/dequantize.c
media/libvpx/vp8/decoder/dequantize.h
media/libvpx/vp8/decoder/detokenize.c
media/libvpx/vp8/decoder/detokenize.h
media/libvpx/vp8/decoder/generic/dsystemdependent.c
media/libvpx/vp8/decoder/idct_blk.c
media/libvpx/vp8/decoder/onyxd_if.c
media/libvpx/vp8/decoder/onyxd_int.h
media/libvpx/vp8/decoder/reconintra_mt.c
media/libvpx/vp8/decoder/reconintra_mt.h
media/libvpx/vp8/decoder/threading.c
media/libvpx/vp8/decoder/treereader.h
media/libvpx/vp8/decoder/x86/dequantize_mmx.asm
media/libvpx/vp8/decoder/x86/dequantize_x86.h
media/libvpx/vp8/decoder/x86/idct_blk_mmx.c
media/libvpx/vp8/decoder/x86/idct_blk_sse2.c
media/libvpx/vp8/decoder/x86/x86_dsystemdependent.c
media/libvpx/vp8/vp8_dx_iface.c
media/libvpx/vpx/internal/vpx_codec_internal.h
media/libvpx/vpx/src/vpx_codec.c
media/libvpx/vpx/src/vpx_decoder.c
media/libvpx/vpx/src/vpx_decoder_compat.c
media/libvpx/vpx/src/vpx_encoder.c
media/libvpx/vpx/src/vpx_image.c
media/libvpx/vpx/vp8.h
media/libvpx/vpx/vp8cx.h
media/libvpx/vpx/vp8dx.h
media/libvpx/vpx/vp8e.h
media/libvpx/vpx/vpx_codec.h
media/libvpx/vpx/vpx_codec_impl_bottom.h
media/libvpx/vpx/vpx_codec_impl_top.h
media/libvpx/vpx/vpx_decoder.h
media/libvpx/vpx/vpx_decoder_compat.h
media/libvpx/vpx/vpx_encoder.h
media/libvpx/vpx/vpx_image.h
media/libvpx/vpx/vpx_integer.h
media/libvpx/vpx_config.h
media/libvpx/vpx_config_arm-linux-gcc.c
media/libvpx/vpx_config_arm-linux-gcc.h
media/libvpx/vpx_config_c.c
media/libvpx/vpx_config_generic-gnu.c
media/libvpx/vpx_config_generic-gnu.h
media/libvpx/vpx_config_x86-darwin9-gcc.h
media/libvpx/vpx_config_x86-linux-gcc.h
media/libvpx/vpx_config_x86-win32-vs8.h
media/libvpx/vpx_config_x86_64-darwin9-gcc.h
media/libvpx/vpx_config_x86_64-linux-gcc.h
media/libvpx/vpx_mem/include/vpx_mem_intrnl.h
media/libvpx/vpx_mem/vpx_mem.c
media/libvpx/vpx_mem/vpx_mem.h
media/libvpx/vpx_ports/arm.h
media/libvpx/vpx_ports/arm_cpudetect.c
media/libvpx/vpx_ports/config.h
media/libvpx/vpx_ports/emms.asm
media/libvpx/vpx_ports/mem.h
media/libvpx/vpx_ports/vpx_timer.h
media/libvpx/vpx_ports/x86.h
media/libvpx/vpx_ports/x86_abi_support.asm
media/libvpx/vpx_scale/generic/gen_scalers.c
media/libvpx/vpx_scale/generic/scalesystemdependant.c
media/libvpx/vpx_scale/generic/vpxscale.c
media/libvpx/vpx_scale/generic/yv12config.c
media/libvpx/vpx_scale/generic/yv12extend.c
media/libvpx/vpx_scale/scale_mode.h
media/libvpx/vpx_scale/vpxscale.h
media/libvpx/vpx_scale/yv12config.h
media/libvpx/vpx_scale/yv12extend.h
--- a/config/autoconf.mk.in
+++ b/config/autoconf.mk.in
@@ -160,17 +160,21 @@ MOZ_RAW = @MOZ_RAW@
 MOZ_SYDNEYAUDIO = @MOZ_SYDNEYAUDIO@
 MOZ_WAVE = @MOZ_WAVE@
 MOZ_MEDIA = @MOZ_MEDIA@
 MOZ_VORBIS = @MOZ_VORBIS@
 MOZ_TREMOR = @MOZ_TREMOR@
 MOZ_WEBM = @MOZ_WEBM@
 VPX_AS = @VPX_AS@
 VPX_ASFLAGS = @VPX_ASFLAGS@
+VPX_DASH_C_FLAG = @VPX_DASH_C_FLAG@
+VPX_AS_CONVERSION = @VPX_AS_CONVERSION@
+VPX_ASM_SUFFIX = @VPX_ASM_SUFFIX@
 VPX_X86_ASM = @VPX_X86_ASM@
+VPX_ARM_ASM = @VPX_ARM_ASM@
 NS_PRINTING = @NS_PRINTING@
 MOZ_CRASHREPORTER = @MOZ_CRASHREPORTER@
 MOZ_HELP_VIEWER = @MOZ_HELP_VIEWER@
 MOC= @MOC@
 MOZ_NSS_PATCH = @MOZ_NSS_PATCH@
 MOZ_WEBGL = @MOZ_WEBGL@
 
 MOZ_JAVAXPCOM = @MOZ_JAVAXPCOM@
--- a/configure.in
+++ b/configure.in
@@ -4953,17 +4953,21 @@ MOZ_RAW=
 MOZ_SYDNEYAUDIO=
 MOZ_VORBIS=
 MOZ_TREMOR=
 MOZ_WAVE=1
 MOZ_MEDIA=
 MOZ_WEBM=1
 VPX_AS=
 VPX_ASFLAGS=
+VPX_AS_DASH_C_FLAG=
+VPX_AS_CONVERSION=
+VPX_ASM_SUFFIX=
 VPX_X86_ASM=
+VPX_ARM_ASM=
 MOZ_PANGO=1
 MOZ_PERMISSIONS=1
 MOZ_PLACES=1
 MOZ_PLAINTEXT_EDITOR_ONLY=
 MOZ_PLUGINS=1
 MOZ_PREF_EXTENSIONS=1
 MOZ_PROFILELOCKING=1
 MOZ_PSM=1
@@ -6040,18 +6044,20 @@ if test -n "$MOZ_WEBM" -a -z "$MOZ_NATIV
     ;;
     *)
         MOZ_VORBIS=1
     ;;
     esac
 
 
     dnl Detect if we can use an assembler to compile optimized assembly for libvpx.
-    dnl We currently require yasm on all platforms and require yasm 1.1.0 on Win32.
+    dnl We currently require yasm on all x86 platforms and require yasm 1.1.0 on Win32.
+    dnl We currently require gcc on all arm platforms.
     VPX_AS=$YASM
+    VPX_ASM_SUFFIX=asm
 
     dnl See if we have assembly on this platform.  
     case "$OS_ARCH:$CPU_ARCH" in
     Linux:x86)
       VPX_ASFLAGS="-f elf32 -rnasm -pnasm"
       VPX_X86_ASM=1
     ;;
     Linux:x86_64)
@@ -6088,24 +6094,37 @@ if test -n "$MOZ_WEBM" -a -z "$MOZ_NATIV
         elif test -n "$COMPILE_ENVIRONMENT" -a "$_YASM_MAJOR_VERSION" -lt "1" -o \( "$_YASM_MAJOR_VERSION" -eq "1" -a "$_YASM_MINOR_VERSION" -lt "1" \) ; then
           AC_MSG_ERROR([yasm 1.1 or greater is required to build libvpx on Win32, but you appear to have version $_YASM_MAJOR_VERSION.$_YASM_MINOR_VERSION.  Upgrade to the newest version (included in MozillaBuild 1.5.1 and newer) or configure with --disable-webm (which disables the WebM video format). See https://developer.mozilla.org/en/YASM for more details.])
         else
           VPX_ASFLAGS="-f win32 -rnasm -pnasm -DPIC"
           VPX_X86_ASM=1
         fi
       fi
     ;;
+    *:arm*)
+      if test -n "$GNU_AS" ; then
+        VPX_AS=$AS
+        dnl These flags are a lie; they're just used to enable the requisite
+        dnl opcodes; actual arch detection is done at runtime.
+        VPX_ASFLAGS="-march=armv7-a -mfpu=neon"
+        VPX_DASH_C_FLAG="-c"
+        VPX_AS_CONVERSION="$PERL ${srcdir}/media/libvpx/build/make/ads2gas.pl"
+        VPX_ASM_SUFFIX="$ASM_SUFFIX"
+        VPX_ARM_ASM=1
+      fi
     esac
 
     if test -n "$COMPILE_ENVIRONMENT" -a -n "$VPX_X86_ASM" -a -z "$VPX_AS"; then
       AC_MSG_ERROR([yasm is a required build tool for this architecture when webm is enabled. You may either install yasm or --disable-webm (which disables the WebM video format). See https://developer.mozilla.org/en/YASM for more details.])
     fi
 
     if test -n "$VPX_X86_ASM"; then
       AC_DEFINE(VPX_X86_ASM)
+    elif test -n "$VPX_ARM_ASM"; then
+      AC_DEFINE(VPX_ARM_ASM)
     else
       AC_MSG_WARN([No assembler or assembly support for libvpx. Using unoptimized C routines.])
     fi
 fi
 
 dnl ========================================================
 dnl = Disable Wave decoder support
 dnl ========================================================
@@ -9077,17 +9096,21 @@ AC_SUBST(MOZ_SYDNEYAUDIO)
 AC_SUBST(MOZ_WAVE)
 AC_SUBST(MOZ_VORBIS)
 AC_SUBST(MOZ_TREMOR)
 AC_SUBST(MOZ_WEBM)
 AC_SUBST(MOZ_OGG)
 AC_SUBST(MOZ_ALSA_LIBS)
 AC_SUBST(VPX_AS)
 AC_SUBST(VPX_ASFLAGS)
+AC_SUBST(VPX_DASH_C_FLAG)
+AC_SUBST(VPX_AS_CONVERSION)
+AC_SUBST(VPX_ASM_SUFFIX)
 AC_SUBST(VPX_X86_ASM)
+AC_SUBST(VPX_ARM_ASM)
 
 if test "$USING_HCC"; then
    CC='${topsrcdir}/build/hcc'
    CC="$CC '$_OLDCC'"
    CXX='${topsrcdir}/build/hcpp'
    CXX="$CXX '$_OLDCXX'"
    AC_SUBST(CC)
    AC_SUBST(CXX)
--- a/media/libvpx/Makefile.in
+++ b/media/libvpx/Makefile.in
@@ -48,46 +48,58 @@ MODULE = vpx
 LIBRARY_NAME = vpx
 FORCE_STATIC_LIB= 1
 
 LOCAL_INCLUDES += \
   -I. \
   -I$(topsrcdir)/media/libvpx \
   -I$(topsrcdir)/media/libvpx/vp8/ \
   -I$(topsrcdir)/media/libvpx/vp8/common/ \
+  -I$(topsrcdir)/media/libvpx/vp8/common/arm \
   -I$(topsrcdir)/media/libvpx/vp8/common/x86 \
   -I$(topsrcdir)/media/libvpx/vp8/decoder \
+  -I$(topsrcdir)/media/libvpx/vp8/decoder/arm \
   -I$(topsrcdir)/media/libvpx/vp8/decoder/x86 \
   -I$(topsrcdir)/media/libvpx/vpx_codec \
   -I$(topsrcdir)/media/libvpx/vpx_mem/ \
   -I$(topsrcdir)/media/libvpx/vpx_mem/include \
   -I$(topsrcdir)/media/libvpx/vpx_ports/ \
   -I$(topsrcdir)/media/libvpx/vpx_scale/ \
   $(NULL)
 
 VPATH += \
+  $(srcdir)/build/make \
   $(srcdir)/vpx \
   $(srcdir)/vpx/src \
   $(srcdir)/vpx_mem \
   $(srcdir)/vpx_mem/include \
   $(srcdir)/vpx_ports \
   $(srcdir)/vpx_scale \
+  $(srcdir)/vpx_scale/arm \
   $(srcdir)/vpx_scale/generic \
   $(srcdir)/vp8 \
   $(srcdir)/vp8/common \
+  $(srcdir)/vp8/common/arm \
+  $(srcdir)/vp8/common/arm/armv6 \
+  $(srcdir)/vp8/common/arm/neon \
   $(srcdir)/vp8/common/generic \
   $(srcdir)/vp8/common/x86 \
   $(srcdir)/vp8/decoder \
+  $(srcdir)/vp8/decoder/arm \
+  $(srcdir)/vp8/decoder/arm/armv6 \
+  $(srcdir)/vp8/decoder/arm/neon \
   $(srcdir)/vp8/decoder/generic \
   $(srcdir)/vp8/decoder/x86 \
   $(NULL)
 
-ASM_SUFFIX=asm
+#Setup the libvpx assembler config.
 AS=$(VPX_AS)
-ASFLAGS=$(VPX_ASFLAGS) -I$(topsrcdir)/media/libvpx/ -I$(topsrcdir)/media/libvpx/vpx_ports/
+ASFLAGS=$(VPX_ASFLAGS) -I. -I$(topsrcdir)/media/libvpx/ -I$(topsrcdir)/media/libvpx/vpx_ports/
+AS_DASH_C_FLAG=$(VPX_DASH_C_FLAG)
+ASM_SUFFIX=$(VPX_ASM_SUFFIX)
 
 EXPORTS_NAMESPACES = vpx
 
 EXPORTS_vpx = \
   vp8.h \
   vp8cx.h \
   vp8dx.h \
   vp8e.h \
@@ -99,16 +111,17 @@ EXPORTS_vpx = \
   vpx_encoder.h\
   vpx_image.h \
   vpx_mem_intrnl.h \
   vpx_mem.h \
   config.h \
   mem.h \
   vpx_integer.h \
   vpx_timer.h \
+  arm.h \
   x86.h \
   scale_mode.h \
   vpxscale.h \
   yv12config.h \
   yv12extend.h \
   $(NULL)
 
 CSRCS += \
@@ -140,19 +153,19 @@ CSRCS += \
   reconintra4x4.c \
   setupintrarecon.c \
   swapyv12buffer.c \
   textblit.c \
   treecoder.c \
   dboolhuff.c \
   decodemv.c \
   decodframe.c \
-  demode.c \
   dequantize.c \
   detokenize.c \
+  reconintra_mt.c \
   idct_blk.c \
   onyxd_if.c \
   threading.c \
   vp8_dx_iface.c \
   vpx_codec.c \
   vpx_decoder.c \
   vpx_decoder_compat.c \
   vpx_encoder.c \
@@ -163,16 +176,17 @@ CSRCS += \
   scalesystemdependant.c \
   yv12config.c \
   yv12extend.c \
   $(NULL)
 
 ifdef VPX_X86_ASM
 # Building on an x86 platform with a supported assembler, include
 # the optimized assembly in the build.
+
 CSRCS += \
   idct_blk_mmx.c \
   idct_blk_sse2.c \
   loopfilter_x86.c \
   vp8_asm_stubs.c \
   x86_systemdependent.c \
   x86_dsystemdependent.c \
   $(NULL)
@@ -191,17 +205,126 @@ ASFILES += \
   subpixel_mmx.asm \
   subpixel_sse2.asm \
   subpixel_ssse3.asm \
   dequantize_mmx.asm \
   emms.asm \
   $(NULL)
 
 endif
- 
+
+ifdef VPX_ARM_ASM
+# Building on an ARM platform with a supported assembler, include
+# the optimized assembly in the build.
+
+# The Android NDK doesn't pre-define anything to indicate the OS it's on, so
+# do it for them.
+ifeq ($(OS_TARGET),Android)
+DEFINES += -D__linux__
+endif
+
+CSRCS += \
+  arm_cpudetect.c \
+  arm_systemdependent.c \
+  bilinearfilter_arm.c \
+  filter_arm.c \
+  loopfilter_arm.c \
+  reconintra_arm.c \
+  arm_dsystemdependent.c \
+  dequantize_arm.c \
+  idct_blk_v6.c \
+  idct_blk_neon.c \
+  recon_neon.c \
+  $(NULL)
+
+VPX_ASFILES = \
+  detokenize.asm \
+  bilinearfilter_v6.asm \
+  copymem8x4_v6.asm \
+  copymem8x8_v6.asm \
+  copymem16x16_v6.asm \
+  dc_only_idct_add_v6.asm \
+  iwalsh_v6.asm \
+  filter_v6.asm \
+  idct_v6.asm \
+  loopfilter_v6.asm \
+  recon_v6.asm \
+  simpleloopfilter_v6.asm \
+  sixtappredict8x4_v6.asm \
+  bilinearpredict4x4_neon.asm \
+  bilinearpredict8x4_neon.asm \
+  bilinearpredict8x8_neon.asm \
+  bilinearpredict16x16_neon.asm \
+  copymem8x4_neon.asm \
+  copymem8x8_neon.asm \
+  copymem16x16_neon.asm \
+  dc_only_idct_add_neon.asm \
+  iwalsh_neon.asm \
+  loopfilter_neon.asm \
+  loopfiltersimplehorizontaledge_neon.asm \
+  loopfiltersimpleverticaledge_neon.asm \
+  mbloopfilter_neon.asm \
+  recon2b_neon.asm \
+  recon4b_neon.asm \
+  reconb_neon.asm \
+  shortidct4x4llm_1_neon.asm \
+  shortidct4x4llm_neon.asm \
+  sixtappredict4x4_neon.asm \
+  sixtappredict8x4_neon.asm \
+  sixtappredict8x8_neon.asm \
+  sixtappredict16x16_neon.asm \
+  recon16x16mb_neon.asm \
+  buildintrapredictorsmby_neon.asm \
+  save_neon_reg.asm \
+  dequant_dc_idct_v6.asm \
+  dequant_idct_v6.asm \
+  dequantize_v6.asm \
+  idct_dequant_dc_full_2x_neon.asm \
+  idct_dequant_dc_0_2x_neon.asm \
+  dequant_idct_neon.asm \
+  idct_dequant_full_2x_neon.asm \
+  idct_dequant_0_2x_neon.asm \
+  dequantizeb_neon.asm \
+  $(NULL)
+
+# The ARM asm needs to extract the offsets of various C struct members.
+# We need a program that runs on the host to pull them out of a .o file.
+HOST_CSRCS = obj_int_extract.c
+HOST_PROGRAM = host_obj_int_extract$(HOST_BIN_SUFFIX)
+
+ifdef VPX_AS_CONVERSION
+# The ARM asm is written in ARM RVCT syntax, but we actually build it with
+# gas using GNU syntax. Add some rules to perform the conversion.
+VPX_CONVERTED_ASFILES = $(addsuffix .$(ASM_SUFFIX), $(VPX_ASFILES))
+
+ASFILES += $(VPX_CONVERTED_ASFILES)
+GARBAGE += $(VPX_CONVERTED_ASFILES)
+
+%.asm.$(ASM_SUFFIX): %.asm
+	$(VPX_AS_CONVERSION) < $< > $@
+
+vpx_asm_offsets.asm: vpx_asm_offsets.$(OBJ_SUFFIX) $(HOST_PROGRAM)
+	./$(HOST_PROGRAM) rvds $< | $(VPX_AS_CONVERSION) > $@
+
+detokenize.asm.$(OBJ_SUFFIX): vpx_asm_offsets.asm
+
+else
+ASFILES += $(VPX_ASFILES)
+
+vpx_asm_offsets.asm: vpx_asm_offsets.$(OBJ_SUFFIX) $(HOST_PROGRAM)
+	./$(HOST_PROGRAM) rvds $< > $@
+
+detokenize.$(OBJ_SUFFIX): vpx_asm_offsets.asm
+
+endif
+
+GARBAGE += vpx_asm_offsets.$(OBJ_SUFFIX) vpx_asm_offsets.asm
+
+endif
+
 include $(topsrcdir)/config/rules.mk
 
 # Workaround a bug of Sun Studio (CR 6963410)
 ifdef SOLARIS_SUNPRO_CC
 ifeq (86,$(findstring 86,$(OS_TEST)))
 filter_c.o: filter_c.c Makefile.in
 	$(REPORT_BUILD)
 	@$(MAKE_DEPS_AUTO_CC)
--- a/media/libvpx/README_MOZILLA
+++ b/media/libvpx/README_MOZILLA
@@ -1,2 +1,2 @@
-Using libvpx pulled from git://review.webmproject.org/libvpx.git
-Commit ID: 0dd78af3e9b089eacc9af280adfb5549fc7ecdcd
+Using the v0.9.5 release pulled from
+http://webm.googlecode.com/files/libvpx-v0.9.5.zip
new file mode 100755
--- /dev/null
+++ b/media/libvpx/build/make/ads2gas.pl
@@ -0,0 +1,150 @@
+#!/usr/bin/perl
+##
+##  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+##
+##  Use of this source code is governed by a BSD-style license
+##  that can be found in the LICENSE file in the root of the source
+##  tree. An additional intellectual property rights grant can be found
+##  in the file PATENTS.  All contributing project authors may
+##  be found in the AUTHORS file in the root of the source tree.
+##
+
+
+# ads2gas.pl
+# Author: Eric Fung (efung (at) acm.org)
+#
+# Convert ARM Developer Suite 1.0.1 syntax assembly source to GNU as format
+#
+# Usage: cat inputfile | perl ads2gas.pl > outputfile
+#
+print "@ This file was created from a .asm file\n";
+print "@  using the ads2gas.pl script.\n";
+print "\t.equ DO1STROUNDING, 0\n";
+
+while (<STDIN>)
+{
+    # Comment character
+    s/;/@/g;
+
+    # Hexadecimal constants prefaced by 0x
+    s/#&/#0x/g;
+
+    # Convert :OR: to |
+    s/:OR:/ | /g;
+
+    # Convert :AND: to &
+    s/:AND:/ & /g;
+
+    # Convert :NOT: to ~
+    s/:NOT:/ ~ /g;
+
+    # Convert :SHL: to <<
+    s/:SHL:/ << /g;
+
+    # Convert :SHR: to >>
+    s/:SHR:/ >> /g;
+
+    # Convert ELSE to .else
+    s/ELSE/.else/g;
+
+    # Convert ENDIF to .endif
+    s/ENDIF/.endif/g;
+
+    # Convert ELSEIF to .elseif
+    s/ELSEIF/.elseif/g;
+
+    # Convert LTORG to .ltorg
+    s/LTORG/.ltorg/g;
+
+    # Convert IF :DEF:to .if
+    # gcc doesn't have the ability to do a conditional
+    # if defined variable that is set by IF :DEF: on
+    # armasm, so convert it to a normal .if and then
+    # make sure to define a value elesewhere
+    if (s/\bIF :DEF:\b/.if /g)
+    {
+        s/=/==/g;
+    }
+
+    # Convert IF to .if
+    if (s/\bIF\b/.if/g)
+    {
+        s/=+/==/g;
+    }
+
+    # Convert INCLUDE to .INCLUDE "file"
+    s/INCLUDE(\s*)(.*)$/.include $1\"$2\"/;
+
+    # Code directive (ARM vs Thumb)
+    s/CODE([0-9][0-9])/.code $1/;
+
+    # No AREA required
+    s/^\s*AREA.*$/.text/;
+
+    # DCD to .word
+    # This one is for incoming symbols
+    s/DCD\s+\|(\w*)\|/.long $1/;
+
+    # DCW to .short
+    s/DCW\s+\|(\w*)\|/.short $1/;
+    s/DCW(.*)/.short $1/;
+
+    # Constants defined in scope
+    s/DCD(.*)/.long $1/;
+    s/DCB(.*)/.byte $1/;
+
+    # RN to .req
+    if (s/RN\s+([Rr]\d+|lr)/.req $1/)
+    {
+        print;
+        next;
+    }
+
+    # Make function visible to linker, and make additional symbol with
+    # prepended underscore
+    s/EXPORT\s+\|([\$\w]*)\|/.global $1 \n\t.type $1, function/;
+    s/IMPORT\s+\|([\$\w]*)\|/.global $1/;
+
+    # No vertical bars required; make additional symbol with prepended
+    # underscore
+    s/^\|(\$?\w+)\|/_$1\n\t$1:/g;
+
+    # Labels need trailing colon
+#   s/^(\w+)/$1:/ if !/EQU/;
+    # put the colon at the end of the line in the macro
+    s/^([a-zA-Z_0-9\$]+)/$1:/ if !/EQU/;
+
+    # Strip ALIGN
+    s/\sALIGN/@ ALIGN/g;
+
+    # Strip ARM
+    s/\sARM/@ ARM/g;
+
+    # Strip REQUIRE8
+    #s/\sREQUIRE8/@ REQUIRE8/g;
+    s/\sREQUIRE8/@ /g;      #EQU cause problem
+
+    # Strip PRESERVE8
+    s/\sPRESERVE8/@ PRESERVE8/g;
+
+    # Strip PROC and ENDPROC
+    s/\sPROC/@/g;
+    s/\sENDP/@/g;
+
+    # EQU directive
+    s/(.*)EQU(.*)/.equ $1, $2/;
+
+    # Begin macro definition
+    if (/MACRO/) {
+        $_ = <STDIN>;
+        s/^/.macro/;
+        s/\$//g;                # remove formal param reference
+        s/;/@/g;                # change comment characters
+    }
+
+    # For macros, use \ to reference formal params
+    s/\$/\\/g;                  # End macro definition
+    s/MEND/.endm/;              # No need to tell it where to stop assembling
+    next if /^\s*END\s*$/;
+    print;
+}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/build/make/obj_int_extract.c
@@ -0,0 +1,756 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "vpx_config.h"
+
+#if defined(_MSC_VER)
+#include <io.h>
+#include <share.h>
+#include "vpx/vpx_integer.h"
+#else
+#include <stdint.h>
+#include <unistd.h>
+#endif
+
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+
+typedef enum
+{
+    OUTPUT_FMT_PLAIN,
+    OUTPUT_FMT_RVDS,
+    OUTPUT_FMT_GAS,
+} output_fmt_t;
+
+int log_msg(const char *fmt, ...)
+{
+    int res;
+    va_list ap;
+    va_start(ap, fmt);
+    res = vfprintf(stderr, fmt, ap);
+    va_end(ap);
+    return res;
+}
+
+#if defined(__GNUC__) && __GNUC__
+
+#if defined(__MACH__)
+
+#include <mach-o/loader.h>
+#include <mach-o/nlist.h>
+
+int parse_macho(uint8_t *base_buf, size_t sz)
+{
+    int i, j;
+    struct mach_header header;
+    uint8_t *buf = base_buf;
+    int base_data_section = 0;
+
+    memcpy(&header, buf, sizeof(struct mach_header));
+    buf += sizeof(struct mach_header);
+
+    if (header.magic != MH_MAGIC)
+    {
+        log_msg("Bad magic number for object file. 0x%x expected, 0x%x found.\n",
+                header.magic, MH_MAGIC);
+        goto bail;
+    }
+
+    if (header.cputype != CPU_TYPE_ARM)
+    {
+        log_msg("Bad cputype for object file. Currently only tested for CPU_TYPE_ARM.\n");
+        goto bail;
+    }
+
+    if (header.filetype != MH_OBJECT)
+    {
+        log_msg("Bad filetype for object file. Currently only tested for MH_OBJECT.\n");
+        goto bail;
+    }
+
+    for (i = 0; i < header.ncmds; i++)
+    {
+        struct load_command lc;
+        struct symtab_command sc;
+        struct segment_command seg_c;
+
+        memcpy(&lc, buf, sizeof(struct load_command));
+
+        if (lc.cmd == LC_SEGMENT)
+        {
+            uint8_t *seg_buf = buf;
+            struct section s;
+
+            memcpy(&seg_c, buf, sizeof(struct segment_command));
+
+            seg_buf += sizeof(struct segment_command);
+
+            for (j = 0; j < seg_c.nsects; j++)
+            {
+                memcpy(&s, seg_buf + (j * sizeof(struct section)), sizeof(struct section));
+
+                // Need to get this offset which is the start of the symbol table
+                // before matching the strings up with symbols.
+                base_data_section = s.offset;
+            }
+        }
+        else if (lc.cmd == LC_SYMTAB)
+        {
+            uint8_t *sym_buf = base_buf;
+            uint8_t *str_buf = base_buf;
+
+            if (base_data_section != 0)
+            {
+                memcpy(&sc, buf, sizeof(struct symtab_command));
+
+                if (sc.cmdsize != sizeof(struct symtab_command))
+                    log_msg("Can't find symbol table!\n");
+
+                sym_buf += sc.symoff;
+                str_buf += sc.stroff;
+
+                for (j = 0; j < sc.nsyms; j++)
+                {
+                    struct nlist nl;
+                    int val;
+
+                    memcpy(&nl, sym_buf + (j * sizeof(struct nlist)), sizeof(struct nlist));
+
+                    val = *((int *)(base_buf + base_data_section + nl.n_value));
+
+                    // Location of string is cacluated each time from the
+                    // start of the string buffer.  On darwin the symbols
+                    // are prefixed by "_".  On other platforms it is not
+                    // so it needs to be removed.  That is the reason for
+                    // the +1.
+                    printf("%-40s EQU %5d\n", str_buf + nl.n_un.n_strx + 1, val);
+                }
+            }
+        }
+
+        buf += lc.cmdsize;
+    }
+
+    return 0;
+bail:
+    return 1;
+
+}
+
+int main(int argc, char **argv)
+{
+    int fd;
+    char *f;
+    struct stat stat_buf;
+    uint8_t *file_buf;
+    int res;
+
+    if (argc < 2 || argc > 3)
+    {
+        fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
+        fprintf(stderr, "  <obj file>\tMachO format object file to parse\n");
+        fprintf(stderr, "Output Formats:\n");
+        fprintf(stderr, "  gas  - compatible with GNU assembler\n");
+        fprintf(stderr, "  rvds - compatible with armasm\n");
+        goto bail;
+    }
+
+    f = argv[2];
+
+    if (!((!strcmp(argv[1], "rvds")) || (!strcmp(argv[1], "gas"))))
+        f = argv[1];
+
+    fd = open(f, O_RDONLY);
+
+    if (fd < 0)
+    {
+        perror("Unable to open file");
+        goto bail;
+    }
+
+    if (fstat(fd, &stat_buf))
+    {
+        perror("stat");
+        goto bail;
+    }
+
+    file_buf = malloc(stat_buf.st_size);
+
+    if (!file_buf)
+    {
+        perror("malloc");
+        goto bail;
+    }
+
+    if (read(fd, file_buf, stat_buf.st_size) != stat_buf.st_size)
+    {
+        perror("read");
+        goto bail;
+    }
+
+    if (close(fd))
+    {
+        perror("close");
+        goto bail;
+    }
+
+    res = parse_macho(file_buf, stat_buf.st_size);
+    free(file_buf);
+
+    if (!res)
+        return EXIT_SUCCESS;
+
+bail:
+    return EXIT_FAILURE;
+}
+
+#else
+#include "elf.h"
+
+#define COPY_STRUCT(dst, buf, ofst, sz) do {\
+        if(ofst + sizeof((*(dst))) > sz) goto bail;\
+        memcpy(dst, buf+ofst, sizeof((*(dst))));\
+    } while(0)
+
+#define ENDIAN_ASSIGN(val, memb) do {\
+        if(!elf->le_data) {log_msg("Big Endian data not supported yet!\n");goto bail;}\
+        (val) = (memb);\
+    } while(0)
+
+#define ENDIAN_ASSIGN_IN_PLACE(memb) do {\
+        ENDIAN_ASSIGN(memb, memb);\
+    } while(0)
+
+typedef struct
+{
+    uint8_t     *buf; /* Buffer containing ELF data */
+    size_t       sz;  /* Buffer size */
+    int          le_data;   /* Data is little-endian */
+    Elf32_Ehdr   hdr;
+} elf_obj_t;
+
+int parse_elf32_header(elf_obj_t *elf)
+{
+    int res;
+    /* Verify ELF32 header */
+    COPY_STRUCT(&elf->hdr, elf->buf, 0, elf->sz);
+    res = elf->hdr.e_ident[EI_MAG0] == ELFMAG0;
+    res &= elf->hdr.e_ident[EI_MAG1] == ELFMAG1;
+    res &= elf->hdr.e_ident[EI_MAG2] == ELFMAG2;
+    res &= elf->hdr.e_ident[EI_MAG3] == ELFMAG3;
+    res &= elf->hdr.e_ident[EI_CLASS] == ELFCLASS32;
+    res &= elf->hdr.e_ident[EI_DATA] == ELFDATA2LSB
+           || elf->hdr.e_ident[EI_DATA] == ELFDATA2MSB;
+
+    if (!res) goto bail;
+
+    elf->le_data = elf->hdr.e_ident[EI_DATA] == ELFDATA2LSB;
+
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_type);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_machine);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_version);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_entry);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_phoff);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_shoff);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_flags);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_ehsize);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_phentsize);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_phnum);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_shentsize);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_shnum);
+    ENDIAN_ASSIGN_IN_PLACE(elf->hdr.e_shstrndx);
+    return 0;
+bail:
+    return 1;
+}
+
+int parse_elf32_section(elf_obj_t *elf, int idx, Elf32_Shdr *hdr)
+{
+    if (idx >= elf->hdr.e_shnum)
+        goto bail;
+
+    COPY_STRUCT(hdr, elf->buf, elf->hdr.e_shoff + idx * elf->hdr.e_shentsize,
+                elf->sz);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_name);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_type);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_flags);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_addr);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_offset);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_size);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_link);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_info);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_addralign);
+    ENDIAN_ASSIGN_IN_PLACE(hdr->sh_entsize);
+    return 0;
+bail:
+    return 1;
+}
+
+char *parse_elf32_string_table(elf_obj_t *elf, int s_idx, int idx)
+{
+    Elf32_Shdr shdr;
+
+    if (parse_elf32_section(elf, s_idx, &shdr))
+    {
+        log_msg("Failed to parse ELF string table: section %d, index %d\n",
+                s_idx, idx);
+        return "";
+    }
+
+    return (char *)(elf->buf + shdr.sh_offset + idx);
+}
+
+int parse_elf32_symbol(elf_obj_t *elf, unsigned int ofst, Elf32_Sym *sym)
+{
+    COPY_STRUCT(sym, elf->buf, ofst, elf->sz);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_name);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_value);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_size);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_info);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_other);
+    ENDIAN_ASSIGN_IN_PLACE(sym->st_shndx);
+    return 0;
+bail:
+    return 1;
+}
+
+int parse_elf32(uint8_t *buf, size_t sz, output_fmt_t mode)
+{
+    elf_obj_t  elf;
+    Elf32_Shdr shdr;
+    unsigned int ofst;
+    int         i;
+    Elf32_Off strtab_off;   /* save String Table offset for later use */
+
+    memset(&elf, 0, sizeof(elf));
+    elf.buf = buf;
+    elf.sz = sz;
+
+    /* Parse Header */
+    if (parse_elf32_header(&elf))
+    {
+        log_msg("Parse error: File does not appear to be valid ELF32\n");
+        return 1;
+    }
+
+    for (i = 0; i < elf.hdr.e_shnum; i++)
+    {
+        parse_elf32_section(&elf, i, &shdr);
+
+        if (shdr.sh_type == SHT_STRTAB)
+        {
+            char strtsb_name[128];
+
+            strcpy(strtsb_name, (char *)(elf.buf + shdr.sh_offset + shdr.sh_name));
+
+            if (!(strcmp(strtsb_name, ".shstrtab")))
+            {
+                log_msg("found section: %s\n", strtsb_name);
+                strtab_off = shdr.sh_offset;
+                break;
+            }
+        }
+    }
+
+    /* Parse all Symbol Tables */
+    for (i = 0; i < elf.hdr.e_shnum; i++)
+    {
+
+        parse_elf32_section(&elf, i, &shdr);
+
+        if (shdr.sh_type == SHT_SYMTAB)
+        {
+            for (ofst = shdr.sh_offset;
+                 ofst < shdr.sh_offset + shdr.sh_size;
+                 ofst += shdr.sh_entsize)
+            {
+                Elf32_Sym sym;
+
+                parse_elf32_symbol(&elf, ofst, &sym);
+
+                /* For all OBJECTS (data objects), extract the value from the
+                 * proper data segment.
+                 */
+                if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT && sym.st_name)
+                    log_msg("found data object %s\n",
+                            parse_elf32_string_table(&elf,
+                                                     shdr.sh_link,
+                                                     sym.st_name));
+
+                if (ELF32_ST_TYPE(sym.st_info) == STT_OBJECT
+                    && sym.st_size == 4)
+                {
+                    Elf32_Shdr dhdr;
+                    int32_t      val;
+                    char section_name[128];
+
+                    parse_elf32_section(&elf, sym.st_shndx, &dhdr);
+
+                    /* For explanition - refer to _MSC_VER version of code */
+                    strcpy(section_name, (char *)(elf.buf + strtab_off + dhdr.sh_name));
+                    log_msg("Section_name: %s, Section_type: %d\n", section_name, dhdr.sh_type);
+
+                    if (!(strcmp(section_name, ".bss")))
+                    {
+                        val = 0;
+                    }
+                    else
+                    {
+                        memcpy(&val,
+                               elf.buf + dhdr.sh_offset + sym.st_value,
+                               sizeof(val));
+                    }
+
+                    if (!elf.le_data)
+                    {
+                        log_msg("Big Endian data not supported yet!\n");
+                        goto bail;
+                    }\
+
+                    switch (mode)
+                    {
+                    case OUTPUT_FMT_RVDS:
+                        printf("%-40s EQU %5d\n",
+                               parse_elf32_string_table(&elf,
+                                                        shdr.sh_link,
+                                                        sym.st_name),
+                               val);
+                        break;
+                    case OUTPUT_FMT_GAS:
+                        printf(".equ %-40s, %5d\n",
+                               parse_elf32_string_table(&elf,
+                                                        shdr.sh_link,
+                                                        sym.st_name),
+                               val);
+                        break;
+                    default:
+                        printf("%s = %d\n",
+                               parse_elf32_string_table(&elf,
+                                                        shdr.sh_link,
+                                                        sym.st_name),
+                               val);
+                    }
+                }
+            }
+        }
+    }
+
+    if (mode == OUTPUT_FMT_RVDS)
+        printf("    END\n");
+
+    return 0;
+bail:
+    log_msg("Parse error: File does not appear to be valid ELF32\n");
+    return 1;
+}
+
+int main(int argc, char **argv)
+{
+    int fd;
+    output_fmt_t mode;
+    char *f;
+    struct stat stat_buf;
+    uint8_t *file_buf;
+    int res;
+
+    if (argc < 2 || argc > 3)
+    {
+        fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
+        fprintf(stderr, "  <obj file>\tELF format object file to parse\n");
+        fprintf(stderr, "Output Formats:\n");
+        fprintf(stderr, "  gas  - compatible with GNU assembler\n");
+        fprintf(stderr, "  rvds - compatible with armasm\n");
+        goto bail;
+    }
+
+    f = argv[2];
+
+    if (!strcmp(argv[1], "rvds"))
+        mode = OUTPUT_FMT_RVDS;
+    else if (!strcmp(argv[1], "gas"))
+        mode = OUTPUT_FMT_GAS;
+    else
+        f = argv[1];
+
+
+    fd = open(f, O_RDONLY);
+
+    if (fd < 0)
+    {
+        perror("Unable to open file");
+        goto bail;
+    }
+
+    if (fstat(fd, &stat_buf))
+    {
+        perror("stat");
+        goto bail;
+    }
+
+    file_buf = malloc(stat_buf.st_size);
+
+    if (!file_buf)
+    {
+        perror("malloc");
+        goto bail;
+    }
+
+    if (read(fd, file_buf, stat_buf.st_size) != stat_buf.st_size)
+    {
+        perror("read");
+        goto bail;
+    }
+
+    if (close(fd))
+    {
+        perror("close");
+        goto bail;
+    }
+
+    res = parse_elf32(file_buf, stat_buf.st_size, mode);
+    //res = parse_coff(file_buf, stat_buf.st_size);
+    free(file_buf);
+
+    if (!res)
+        return EXIT_SUCCESS;
+
+bail:
+    return EXIT_FAILURE;
+}
+#endif
+#endif
+
+
+#if defined(_MSC_VER)
+/*  See "Microsoft Portable Executable and Common Object File Format Specification"
+    for reference.
+*/
+#define get_le32(x) ((*(x)) | (*(x+1)) << 8 |(*(x+2)) << 16 | (*(x+3)) << 24 )
+#define get_le16(x) ((*(x)) | (*(x+1)) << 8)
+
+int parse_coff(unsigned __int8 *buf, size_t sz)
+{
+    unsigned int nsections, symtab_ptr, symtab_sz, strtab_ptr;
+    unsigned int sectionrawdata_ptr;
+    unsigned int i;
+    unsigned __int8 *ptr;
+    unsigned __int32 symoffset;
+    FILE *fp;
+
+    char **sectionlist;  //this array holds all section names in their correct order.
+    //it is used to check if the symbol is in .bss or .data section.
+
+    nsections = get_le16(buf + 2);
+    symtab_ptr = get_le32(buf + 8);
+    symtab_sz = get_le32(buf + 12);
+    strtab_ptr = symtab_ptr + symtab_sz * 18;
+
+    if (nsections > 96)
+        goto bail;
+
+    sectionlist = malloc(nsections * sizeof * sectionlist);
+
+    //log_msg("COFF: Found %u symbols in %u sections.\n", symtab_sz, nsections);
+
+    /*
+    The size of optional header is always zero for an obj file. So, the section header
+    follows the file header immediately.
+    */
+
+    ptr = buf + 20;     //section header
+
+    for (i = 0; i < nsections; i++)
+    {
+        char sectionname[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+        strncpy(sectionname, ptr, 8);
+        //log_msg("COFF: Parsing section %s\n",sectionname);
+
+        sectionlist[i] = malloc(strlen(sectionname) + 1);
+        strcpy(sectionlist[i], sectionname);
+
+        if (!strcmp(sectionname, ".data")) sectionrawdata_ptr = get_le32(ptr + 20);
+
+        ptr += 40;
+    }
+
+    //log_msg("COFF: Symbol table at offset %u\n", symtab_ptr);
+    //log_msg("COFF: raw data pointer ofset for section .data is %u\n", sectionrawdata_ptr);
+
+    fp = fopen("vpx_asm_offsets.asm", "w");
+
+    if (fp == NULL)
+    {
+        perror("open file");
+        goto bail;
+    }
+
+    /*  The compiler puts the data with non-zero offset in .data section, but puts the data with
+        zero offset in .bss section. So, if the data in in .bss section, set offset=0.
+        Note from Wiki: In an object module compiled from C, the bss section contains
+        the local variables (but not functions) that were declared with the static keyword,
+        except for those with non-zero initial values. (In C, static variables are initialized
+        to zero by default.) It also contains the non-local (both extern and static) variables
+        that are also initialized to zero (either explicitly or by default).
+        */
+    //move to symbol table
+    /* COFF symbol table:
+        offset      field
+        0           Name(*)
+        8           Value
+        12          SectionNumber
+        14          Type
+        16          StorageClass
+        17          NumberOfAuxSymbols
+        */
+    ptr = buf + symtab_ptr;
+
+    for (i = 0; i < symtab_sz; i++)
+    {
+        __int16 section = get_le16(ptr + 12); //section number
+
+        if (section > 0 && ptr[16] == 2)
+        {
+            //if(section > 0 && ptr[16] == 3 && get_le32(ptr+8)) {
+
+            if (get_le32(ptr))
+            {
+                char name[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
+                strncpy(name, ptr, 8);
+                //log_msg("COFF: Parsing symbol %s\n",name);
+                fprintf(fp, "%-40s EQU ", name);
+            }
+            else
+            {
+                //log_msg("COFF: Parsing symbol %s\n",
+                //        buf + strtab_ptr + get_le32(ptr+4));
+                fprintf(fp, "%-40s EQU ", buf + strtab_ptr + get_le32(ptr + 4));
+            }
+
+            if (!(strcmp(sectionlist[section-1], ".bss")))
+            {
+                symoffset = 0;
+            }
+            else
+            {
+                symoffset = get_le32(buf + sectionrawdata_ptr + get_le32(ptr + 8));
+            }
+
+            //log_msg("      Section: %d\n",section);
+            //log_msg("      Class:   %d\n",ptr[16]);
+            //log_msg("      Address: %u\n",get_le32(ptr+8));
+            //log_msg("      Offset: %u\n", symoffset);
+
+            fprintf(fp, "%5d\n", symoffset);
+        }
+
+        ptr += 18;
+    }
+
+    fprintf(fp, "    END\n");
+    fclose(fp);
+
+    for (i = 0; i < nsections; i++)
+    {
+        free(sectionlist[i]);
+    }
+
+    free(sectionlist);
+
+    return 0;
+bail:
+
+    for (i = 0; i < nsections; i++)
+    {
+        free(sectionlist[i]);
+    }
+
+    free(sectionlist);
+
+    return 1;
+}
+
+int main(int argc, char **argv)
+{
+    int fd;
+    output_fmt_t mode;
+    const char *f;
+    struct _stat stat_buf;
+    unsigned __int8 *file_buf;
+    int res;
+
+    if (argc < 2 || argc > 3)
+    {
+        fprintf(stderr, "Usage: %s [output format] <obj file>\n\n", argv[0]);
+        fprintf(stderr, "  <obj file>\tELF format object file to parse\n");
+        fprintf(stderr, "Output Formats:\n");
+        fprintf(stderr, "  gas  - compatible with GNU assembler\n");
+        fprintf(stderr, "  rvds - compatible with armasm\n");
+        goto bail;
+    }
+
+    f = argv[2];
+
+    if (!strcmp(argv[1], "rvds"))
+        mode = OUTPUT_FMT_RVDS;
+    else if (!strcmp(argv[1], "gas"))
+        mode = OUTPUT_FMT_GAS;
+    else
+        f = argv[1];
+
+    if (_sopen_s(&fd, f, _O_BINARY, _SH_DENYNO, _S_IREAD | _S_IWRITE))
+    {
+        perror("Unable to open file");
+        goto bail;
+    }
+
+    if (_fstat(fd, &stat_buf))
+    {
+        perror("stat");
+        goto bail;
+    }
+
+    file_buf = malloc(stat_buf.st_size);
+
+    if (!file_buf)
+    {
+        perror("malloc");
+        goto bail;
+    }
+
+    if (_read(fd, file_buf, stat_buf.st_size) != stat_buf.st_size)
+    {
+        perror("read");
+        goto bail;
+    }
+
+    if (_close(fd))
+    {
+        perror("close");
+        goto bail;
+    }
+
+    res = parse_coff(file_buf, stat_buf.st_size);
+
+    free(file_buf);
+
+    if (!res)
+        return EXIT_SUCCESS;
+
+bail:
+    return EXIT_FAILURE;
+}
+#endif
deleted file mode 100644
--- a/media/libvpx/frame_buf_ref.patch
+++ /dev/null
@@ -1,113 +0,0 @@
-diff --git a/media/libvpx/vp8/decoder/decodframe.c b/media/libvpx/vp8/decoder/decodframe.c
---- a/media/libvpx/vp8/decoder/decodframe.c
-+++ b/media/libvpx/vp8/decoder/decodframe.c
-@@ -462,17 +462,17 @@ static void setup_token_decoder(VP8D_COM
-         {
-             partition_size = read_partition_size(partition_size_ptr);
-         }
-         else
-         {
-             partition_size = user_data_end - partition;
-         }
- 
--        if (partition + partition_size > user_data_end)
-+        if (user_data_end - partition < partition_size)
-             vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                                "Truncated packet or corrupt partition "
-                                "%d length", i + 1);
- 
-         if (vp8dx_start_decode(bool_decoder, IF_RTCD(&pbi->dboolhuff),
-                                partition, partition_size))
-             vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
-                                "Failed to allocate bool decoder %d", i + 1);
-@@ -564,30 +564,33 @@ static void init_frame(VP8D_COMP *pbi)
- 
- int vp8_decode_frame(VP8D_COMP *pbi)
- {
-     vp8_reader *const bc = & pbi->bc;
-     VP8_COMMON *const pc = & pbi->common;
-     MACROBLOCKD *const xd  = & pbi->mb;
-     const unsigned char *data = (const unsigned char *)pbi->Source;
-     const unsigned char *const data_end = data + pbi->source_sz;
--    int first_partition_length_in_bytes;
-+    unsigned int first_partition_length_in_bytes;
- 
-     int mb_row;
-     int i, j, k, l;
-     const int *const mb_feature_data_bits = vp8_mb_feature_data_bits;
- 
-+    if (data_end - data < 3)
-+        vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-+                           "Truncated packet");
-     pc->frame_type = (FRAME_TYPE)(data[0] & 1);
-     pc->version = (data[0] >> 1) & 7;
-     pc->show_frame = (data[0] >> 4) & 1;
-     first_partition_length_in_bytes =
-         (data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
-     data += 3;
- 
--    if (data + first_partition_length_in_bytes > data_end)
-+    if (data_end - data < first_partition_length_in_bytes)
-         vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
-                            "Truncated packet or corrupt partition 0 length");
-     vp8_setup_version(pc);
- 
-     if (pc->frame_type == KEY_FRAME)
-     {
-         const int Width = pc->Width;
-         const int Height = pc->Height;
-diff --git a/media/libvpx/vp8/decoder/onyxd_if.c b/media/libvpx/vp8/decoder/onyxd_if.c
---- a/media/libvpx/vp8/decoder/onyxd_if.c
-+++ b/media/libvpx/vp8/decoder/onyxd_if.c
-@@ -318,45 +318,49 @@ int vp8dx_receive_compressed_data(VP8D_P
- 
-     if (ptr == 0)
-     {
-         return -1;
-     }
- 
-     pbi->common.error.error_code = VPX_CODEC_OK;
- 
-+    cm->new_fb_idx = get_free_fb (cm);
-+
-     if (setjmp(pbi->common.error.jmp))
-     {
-         pbi->common.error.setjmp = 0;
-+        if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
-+          cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
-         return -1;
-     }
- 
-     pbi->common.error.setjmp = 1;
- 
- #if HAVE_ARMV7
-     vp8_push_neon(dx_store_reg);
- #endif
- 
-     vpx_usec_timer_start(&timer);
- 
-     //cm->current_video_frame++;
-     pbi->Source = source;
-     pbi->source_sz = size;
- 
--    cm->new_fb_idx = get_free_fb (cm);
--
-     retcode = vp8_decode_frame(pbi);
- 
-     if (retcode < 0)
-     {
- #if HAVE_ARMV7
-         vp8_pop_neon(dx_store_reg);
- #endif
-         pbi->common.error.error_code = VPX_CODEC_ERROR;
-         pbi->common.error.setjmp = 0;
-+        if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
-+          cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
-         return retcode;
-     }
- 
-     if (pbi->b_multithreaded_lf && pbi->common.filter_level != 0)
-         vp8_stop_lfthread(pbi);
- 
-     if (swap_frame_buffers (cm))
-     {
deleted file mode 100644
--- a/media/libvpx/reduce-warnings-1.patch
+++ /dev/null
@@ -1,168 +0,0 @@
-diff --git a/media/libvpx/vp8/common/blockd.h b/media/libvpx/vp8/common/blockd.h
---- a/media/libvpx/vp8/common/blockd.h
-+++ b/media/libvpx/vp8/common/blockd.h
-@@ -90,17 +90,17 @@ typedef enum
-     MB_MODE_COUNT
- } MB_PREDICTION_MODE;
- 
- // Macroblock level features
- typedef enum
- {
-     MB_LVL_ALT_Q = 0,               // Use alternate Quantizer ....
-     MB_LVL_ALT_LF = 1,              // Use alternate loop filter value...
--    MB_LVL_MAX = 2,                 // Number of MB level features supported
-+    MB_LVL_MAX = 2                 // Number of MB level features supported
- 
- } MB_LVL_FEATURES;
- 
- // Segment Feature Masks
- #define SEGMENT_ALTQ    0x01
- #define SEGMENT_ALT_LF  0x02
- 
- #define VP8_YMODES  (B_PRED + 1)
-diff --git a/media/libvpx/vp8/common/ppflags.h b/media/libvpx/vp8/common/ppflags.h
---- a/media/libvpx/vp8/common/ppflags.h
-+++ b/media/libvpx/vp8/common/ppflags.h
-@@ -15,12 +15,12 @@ enum
- {
-     VP8D_NOFILTERING    = 0,
-     VP8D_DEBLOCK        = 1,
-     VP8D_DEMACROBLOCK   = 2,
-     VP8D_ADDNOISE       = 4,
-     VP8D_DEBUG_LEVEL1   = 8,
-     VP8D_DEBUG_LEVEL2   = 16,
-     VP8D_DEBUG_LEVEL3   = 32,
--    VP8D_DEBUG_LEVEL4   = 64,
-+    VP8D_DEBUG_LEVEL4   = 64
- };
- 
- #endif
-diff --git a/media/libvpx/vpx/vp8.h b/media/libvpx/vpx/vp8.h
---- a/media/libvpx/vpx/vp8.h
-+++ b/media/libvpx/vpx/vp8.h
-@@ -48,17 +48,17 @@ enum vp8_dec_control_id
-  *
-  * The set of macros define VP8 decoder post processing flags
-  */
- enum vp8_postproc_level
- {
-     VP8_NOFILTERING    = 0,
-     VP8_DEBLOCK        = 1,
-     VP8_DEMACROBLOCK   = 2,
--    VP8_ADDNOISE       = 4,
-+    VP8_ADDNOISE       = 4
- };
- 
- /*!\brief post process flags
-  *
-  * This define a structure that describe the post processing settings. For
-  * the best objective measure (using thet PSNR metric) set post_proc_flag
-  * to VP8_DEBLOCK and deblocking_level to 1.
-  */
-diff --git a/media/libvpx/vpx/vpx_codec.h b/media/libvpx/vpx/vpx_codec.h
---- a/media/libvpx/vpx/vpx_codec.h
-+++ b/media/libvpx/vpx/vpx_codec.h
-@@ -57,17 +57,17 @@ extern "C" {
- #define DEPRECATED
- #define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
- #endif
- #endif
- 
-     /*!\brief Decorator indicating a function is potentially unused */
- #ifdef UNUSED
- #elif __GNUC__
--#define UNUSED __attribute__ ((unused));
-+#define UNUSED __attribute__ ((unused))
- #else
- #define UNUSED
- #endif
- 
-     /*!\brief Current ABI version number
-      *
-      * \internal
-      * If this file is altered in any way that changes the ABI, this value
-@@ -123,17 +123,17 @@ extern "C" {
-         /*!\brief An application-supplied parameter is not valid.
-          *
-          */
-         VPX_CODEC_INVALID_PARAM,
- 
-         /*!\brief An iterator reached the end of list.
-          *
-          */
--        VPX_CODEC_LIST_END,
-+        VPX_CODEC_LIST_END
- 
-     }
-     vpx_codec_err_t;
- 
- 
-     /*! \brief Codec capabilities bitfield
-      *
-      *  Each codec advertises the capabilities it supports as part of its
-diff --git a/media/libvpx/vpx/vpx_decoder_compat.h b/media/libvpx/vpx/vpx_decoder_compat.h
---- a/media/libvpx/vpx/vpx_decoder_compat.h
-+++ b/media/libvpx/vpx/vpx_decoder_compat.h
-@@ -73,17 +73,17 @@ extern "C" {
-         /*!\brief An application-supplied parameter is not valid.
-          *
-          */
-         VPX_DEC_INVALID_PARAM = VPX_CODEC_INVALID_PARAM,
- 
-         /*!\brief An iterator reached the end of list.
-          *
-          */
--        VPX_DEC_LIST_END = VPX_CODEC_LIST_END,
-+        VPX_DEC_LIST_END = VPX_CODEC_LIST_END
- 
-     }
-     vpx_dec_err_t;
- 
-     /*! \brief Decoder capabilities bitfield
-      *
-      *  Each decoder advertises the capabilities it supports as part of its
-      *  ::vpx_dec_iface_t interface structure. Capabilities are extra interfaces
-diff --git a/media/libvpx/vpx/vpx_encoder.h b/media/libvpx/vpx/vpx_encoder.h
---- a/media/libvpx/vpx/vpx_encoder.h
-+++ b/media/libvpx/vpx/vpx_encoder.h
-@@ -166,17 +166,17 @@ extern "C" {
-     } vpx_rational_t; /**< alias for struct vpx_rational */
- 
- 
-     /*!\brief Multi-pass Encoding Pass */
-     enum vpx_enc_pass
-     {
-         VPX_RC_ONE_PASS,   /**< Single pass mode */
-         VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */
--        VPX_RC_LAST_PASS,  /**< Final pass of multi-pass mode */
-+        VPX_RC_LAST_PASS  /**< Final pass of multi-pass mode */
-     };
- 
- 
-     /*!\brief Rate control mode */
-     enum vpx_rc_mode
-     {
-         VPX_VBR, /**< Variable Bit Rate (VBR) mode */
-         VPX_CBR  /**< Constant Bit Rate (CBR) mode */
-diff --git a/media/libvpx/vpx/vpx_image.h b/media/libvpx/vpx/vpx_image.h
---- a/media/libvpx/vpx/vpx_image.h
-+++ b/media/libvpx/vpx/vpx_image.h
-@@ -50,17 +50,17 @@ extern "C" {
-         VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
-         VPX_IMG_FMT_ARGB,     /**< 32 bit packed ARGB, alpha=255 */
-         VPX_IMG_FMT_ARGB_LE,  /**< 32 bit packed BGRA, alpha=255 */
-         VPX_IMG_FMT_RGB565_LE,  /**< 16 bit per pixel, gggbbbbb rrrrrggg */
-         VPX_IMG_FMT_RGB555_LE,  /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
-         VPX_IMG_FMT_YV12    = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
-         VPX_IMG_FMT_I420    = VPX_IMG_FMT_PLANAR | 2,
-         VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
--        VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,  /** < planar 4:2:0 format with vpx color space */
-+        VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4  /** < planar 4:2:0 format with vpx color space */
-     }
-     vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
- 
- #if !defined(VPX_CODEC_DISABLE_COMPAT) || !VPX_CODEC_DISABLE_COMPAT
- #define IMG_FMT_PLANAR         VPX_IMG_FMT_PLANAR     /**< \deprecated Use #VPX_IMG_FMT_PLANAR */
- #define IMG_FMT_UV_FLIP        VPX_IMG_FMT_UV_FLIP    /**< \deprecated Use #VPX_IMG_FMT_UV_FLIP */
- #define IMG_FMT_HAS_ALPHA      VPX_IMG_FMT_HAS_ALPHA  /**< \deprecated Use #VPX_IMG_FMT_HAS_ALPHA */
- 
--- a/media/libvpx/solaris.patch
+++ b/media/libvpx/solaris.patch
@@ -1,26 +1,21 @@
 diff --git a/media/libvpx/vp8/common/loopfilter_filters.c b/media/libvpx/vp8/common/loopfilter_filters.c
 --- a/media/libvpx/vp8/common/loopfilter_filters.c
 +++ b/media/libvpx/vp8/common/loopfilter_filters.c
-@@ -8,16 +8,19 @@
-  *  be found in the AUTHORS file in the root of the source tree.
-  */
- 
+@@ -11,10 +11,14 @@
  
  #include <stdlib.h>
  #include "loopfilter.h"
  #include "onyxc_int.h"
  
 +#ifdef __SUNPRO_C
 +#define __inline inline
 +#endif
- 
- #define NEW_LOOPFILTER_MASK
- 
++
  typedef unsigned char uc;
  
  static __inline signed char vp8_signed_char_clamp(int t)
  {
      t = (t < -128 ? -128 : t);
 diff --git a/media/libvpx/vpx/internal/vpx_codec_internal.h b/media/libvpx/vpx/internal/vpx_codec_internal.h
 --- a/media/libvpx/vpx/internal/vpx_codec_internal.h
 +++ b/media/libvpx/vpx/internal/vpx_codec_internal.h
@@ -104,24 +99,24 @@ diff --git a/media/libvpx/vpx_ports/mem.
  #else
  #warning No alignment directives known for this compiler.
  #define DECLARE_ALIGNED(n,typ,val)  typ val
  #endif
  #endif
 diff --git a/media/libvpx/vpx_ports/x86.h b/media/libvpx/vpx_ports/x86.h
 --- a/media/libvpx/vpx_ports/x86.h
 +++ b/media/libvpx/vpx_ports/x86.h
-@@ -26,16 +26,36 @@
+@@ -45,16 +45,36 @@
+ #define cpuid(func,ax,bx,cx,dx)\
      __asm__ __volatile__ (\
-                           "pushl %%ebx     \n\t" \
-                           "cpuid           \n\t" \
-                           "movl  %%ebx, %1 \n\t" \
-                           "popl  %%ebx     \n\t" \
-                           : "=a" (ax), "=r" (bx), "=c" (cx), "=d" (dx) \
-                           : "a"  (func));
+                           "mov %%ebx, %%edi   \n\t" \
+                           "cpuid              \n\t" \
+                           "xchg %%edi, %%ebx  \n\t" \
+                           : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
+                           : "a" (func));
  #endif
 +#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
 +#if ARCH_X86_64
 +#define cpuid(func,ax,bx,cx,dx)\
 +    asm volatile (\
 +                  "xchg %rsi, %rbx \n\t" \
 +                  "cpuid           \n\t" \
 +                  "movl %ebx, %edi \n\t" \
deleted file mode 100644
--- a/media/libvpx/subpixel-qword.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/media/libvpx/vp8/common/x86/subpixel_sse2.asm b/media/libvpx/vp8/common/x86/subpixel_sse2.asm
---- a/media/libvpx/vp8/common/x86/subpixel_sse2.asm
-+++ b/media/libvpx/vp8/common/x86/subpixel_sse2.asm
-@@ -1003,17 +1003,17 @@ next_row8x8:
-         paddw       xmm3,       xmm7
- 
-         movdqa      xmm7,       xmm4
- 
-         paddw       xmm3,       [rd GLOBAL]         ; xmm3 += round value
-         psraw       xmm3,       VP8_FILTER_SHIFT        ; xmm3 /= 128
- 
-         packuswb    xmm3,       xmm0
--        movq        [rdi],      xmm3                 ; store the results in the destination
-+        movq        QWORD PTR [rdi], xmm3           ; store the results in the destination
- 
-         add         rsp,        16                 ; next line
-         add         rdi,        rdx
- 
-         cmp         rdi,        rcx
-         jne         next_row8x8
- 
-     ;add rsp, 144
--- a/media/libvpx/update.sh
+++ b/media/libvpx/update.sh
@@ -74,29 +74,40 @@ commonFiles=(
   vp8/common/recon.c
   vp8/common/reconinter.c
   vp8/common/reconintra4x4.c
   vp8/common/reconintra.c
   vp8/common/setupintrarecon.c
   vp8/common/swapyv12buffer.c
   vp8/common/textblit.c
   vp8/common/treecoder.c
+  vp8/common/arm/arm_systemdependent.c
+  vp8/common/arm/bilinearfilter_arm.c
+  vp8/common/arm/filter_arm.c
+  vp8/common/arm/loopfilter_arm.c
+  vp8/common/arm/reconintra_arm.c
+  vp8/common/arm/vpx_asm_offsets.c
+  vp8/common/arm/neon/recon_neon.c
   vp8/common/x86/loopfilter_x86.c
   vp8/common/x86/vp8_asm_stubs.c
   vp8/common/x86/x86_systemdependent.c
   vp8/decoder/dboolhuff.c
   vp8/decoder/decodemv.c
   vp8/decoder/decodframe.c
-  vp8/decoder/demode.c
   vp8/decoder/dequantize.c
   vp8/decoder/detokenize.c
+  vp8/decoder/reconintra_mt.c
   vp8/decoder/generic/dsystemdependent.c
   vp8/decoder/idct_blk.c
   vp8/decoder/onyxd_if.c
   vp8/decoder/threading.c
+  vp8/decoder/arm/arm_dsystemdependent.c
+  vp8/decoder/arm/dequantize_arm.c
+  vp8/decoder/arm/armv6/idct_blk_v6.c
+  vp8/decoder/arm/neon/idct_blk_neon.c
   vp8/decoder/x86/idct_blk_mmx.c
   vp8/decoder/x86/idct_blk_sse2.c
   vp8/decoder/x86/x86_dsystemdependent.c
   vp8/vp8_dx_iface.c
   vpx/src/vpx_codec.c
   vpx/src/vpx_decoder.c
   vpx/src/vpx_decoder_compat.c
   vpx/src/vpx_encoder.c
@@ -133,78 +144,135 @@ commonFiles=(
   vp8/common/pragmas.h
   vp8/common/predictdc.h
   vp8/common/preproc.h
   vp8/common/quant_common.h
   vp8/common/recon.h
   vp8/common/reconinter.h
   vp8/common/reconintra4x4.h
   vp8/common/reconintra.h
-  vp8/common/segmentation_common.h
   vp8/common/setupintrarecon.h
   vp8/common/subpixel.h
   vp8/common/swapyv12buffer.h
   vp8/common/systemdependent.h
   vp8/common/threading.h
   vp8/common/treecoder.h
   vp8/common/type_aliases.h
   vp8/common/vpxerrors.h
+  vp8/common/arm/idct_arm.h
+  vp8/common/arm/loopfilter_arm.h
+  vp8/common/arm/recon_arm.h
+  vp8/common/arm/subpixel_arm.h
   vp8/common/x86/idct_x86.h
   vp8/common/x86/loopfilter_x86.h
   vp8/common/x86/postproc_x86.h
   vp8/common/x86/recon_x86.h
   vp8/common/x86/subpixel_x86.h
   vp8/decoder/dboolhuff.h
   vp8/decoder/decodemv.h
   vp8/decoder/decoderthreading.h
-  vp8/decoder/demode.h
   vp8/decoder/dequantize.h
   vp8/decoder/detokenize.h
   vp8/decoder/onyxd_int.h
+  vp8/decoder/reconintra_mt.h
   vp8/decoder/treereader.h
+  vp8/decoder/arm/dboolhuff_arm.h
+  vp8/decoder/arm/dequantize_arm.h
+  vp8/decoder/arm/detokenize_arm.h
   vp8/decoder/x86/dequantize_x86.h
   vpx/internal/vpx_codec_internal.h
   vpx/vp8cx.h
   vpx/vp8dx.h
   vpx/vp8e.h
   vpx/vp8.h
   vpx/vpx_codec.h
   vpx/vpx_codec_impl_bottom.h
   vpx/vpx_codec_impl_top.h
   vpx/vpx_decoder_compat.h
   vpx/vpx_decoder.h
   vpx/vpx_encoder.h
   vpx/vpx_image.h
   vpx/vpx_integer.h
   vpx_mem/include/vpx_mem_intrnl.h
   vpx_mem/vpx_mem.h
+  vpx_ports/arm_cpudetect.c
   vpx_ports/config.h
   vpx_ports/mem.h
   vpx_ports/vpx_timer.h
+  vpx_ports/arm.h
   vpx_ports/x86.h
   vpx_scale/scale_mode.h
   vpx_scale/vpxscale.h
   vpx_scale/yv12config.h
   vpx_scale/yv12extend.h
+  vp8/common/arm/armv6/bilinearfilter_v6.asm
+  vp8/common/arm/armv6/copymem8x4_v6.asm
+  vp8/common/arm/armv6/copymem8x8_v6.asm
+  vp8/common/arm/armv6/copymem16x16_v6.asm
+  vp8/common/arm/armv6/dc_only_idct_add_v6.asm
+  vp8/common/arm/armv6/iwalsh_v6.asm
+  vp8/common/arm/armv6/filter_v6.asm
+  vp8/common/arm/armv6/idct_v6.asm
+  vp8/common/arm/armv6/loopfilter_v6.asm
+  vp8/common/arm/armv6/recon_v6.asm
+  vp8/common/arm/armv6/simpleloopfilter_v6.asm
+  vp8/common/arm/armv6/sixtappredict8x4_v6.asm
+  vp8/common/arm/neon/bilinearpredict4x4_neon.asm
+  vp8/common/arm/neon/bilinearpredict8x4_neon.asm
+  vp8/common/arm/neon/bilinearpredict8x8_neon.asm
+  vp8/common/arm/neon/bilinearpredict16x16_neon.asm
+  vp8/common/arm/neon/copymem8x4_neon.asm
+  vp8/common/arm/neon/copymem8x8_neon.asm
+  vp8/common/arm/neon/copymem16x16_neon.asm
+  vp8/common/arm/neon/dc_only_idct_add_neon.asm
+  vp8/common/arm/neon/iwalsh_neon.asm
+  vp8/common/arm/neon/loopfilter_neon.asm
+  vp8/common/arm/neon/loopfiltersimplehorizontaledge_neon.asm
+  vp8/common/arm/neon/loopfiltersimpleverticaledge_neon.asm
+  vp8/common/arm/neon/mbloopfilter_neon.asm
+  vp8/common/arm/neon/recon2b_neon.asm
+  vp8/common/arm/neon/recon4b_neon.asm
+  vp8/common/arm/neon/reconb_neon.asm
+  vp8/common/arm/neon/shortidct4x4llm_1_neon.asm
+  vp8/common/arm/neon/shortidct4x4llm_neon.asm
+  vp8/common/arm/neon/sixtappredict4x4_neon.asm
+  vp8/common/arm/neon/sixtappredict8x4_neon.asm
+  vp8/common/arm/neon/sixtappredict8x8_neon.asm
+  vp8/common/arm/neon/sixtappredict16x16_neon.asm
+  vp8/common/arm/neon/recon16x16mb_neon.asm
+  vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
+  vp8/common/arm/neon/save_neon_reg.asm
+  vp8/decoder/arm/detokenize.asm
+  vp8/decoder/arm/armv6/dequant_dc_idct_v6.asm
+  vp8/decoder/arm/armv6/dequant_idct_v6.asm
+  vp8/decoder/arm/armv6/dequantize_v6.asm
+  vp8/decoder/arm/neon/idct_dequant_dc_full_2x_neon.asm
+  vp8/decoder/arm/neon/idct_dequant_dc_0_2x_neon.asm
+  vp8/decoder/arm/neon/dequant_idct_neon.asm
+  vp8/decoder/arm/neon/idct_dequant_full_2x_neon.asm
+  vp8/decoder/arm/neon/idct_dequant_0_2x_neon.asm
+  vp8/decoder/arm/neon/dequantizeb_neon.asm
   vp8/common/x86/idctllm_mmx.asm
   vp8/common/x86/idctllm_sse2.asm
   vp8/common/x86/iwalsh_mmx.asm
   vp8/common/x86/iwalsh_sse2.asm
   vp8/common/x86/loopfilter_mmx.asm
   vp8/common/x86/loopfilter_sse2.asm
   vp8/common/x86/postproc_mmx.asm
   vp8/common/x86/postproc_sse2.asm
   vp8/common/x86/recon_mmx.asm
   vp8/common/x86/recon_sse2.asm
   vp8/common/x86/subpixel_mmx.asm
   vp8/common/x86/subpixel_sse2.asm
   vp8/common/x86/subpixel_ssse3.asm
   vp8/decoder/x86/dequantize_mmx.asm
   vpx_ports/emms.asm
   vpx_ports/x86_abi_support.asm
+  build/make/ads2gas.pl
+  build/make/obj_int_extract.c
   LICENSE
   PATENTS
 )
 
 # configure files specific to x86-win32-vs8
 cp $1/objdir/x86-win32-vs8/vpx_config.c vpx_config_x86-win32-vs8.c
 cp $1/objdir/x86-win32-vs8/vpx_config.asm vpx_config_x86-win32-vs8.asm
 cp $1/objdir/x86-win32-vs8/vpx_config.h vpx_config_x86-win32-vs8.h
@@ -213,41 +281,39 @@ cp $1/objdir/x86-win32-vs8/vpx_config.h 
 cp $1/objdir/x86-win32-vs8/vpx_version.h vpx_version.h
 
 # Config files for x86-linux-gcc and Solaris x86
 cp $1/objdir/x86-linux-gcc/vpx_config.c vpx_config_x86-linux-gcc.c
 cp $1/objdir/x86-linux-gcc/vpx_config.asm vpx_config_x86-linux-gcc.asm
 cp $1/objdir/x86-linux-gcc/vpx_config.h vpx_config_x86-linux-gcc.h
 
 # Config files for x86_64-linux-gcc and Solaris x86_64
-cp $1/objdir/x86_64-linux-gcc/vpx_config.c vpx_config_x86-linux-gcc.c
-cp $1/objdir/x86_64-linux-gcc/vpx_config.asm vpx_config_x86-linux-gcc.asm
-cp $1/objdir/x86_64-linux-gcc/vpx_config.h vpx_config_x86-linux-gcc.h
+cp $1/objdir/x86_64-linux-gcc/vpx_config.c vpx_config_x86_64-linux-gcc.c
+cp $1/objdir/x86_64-linux-gcc/vpx_config.asm vpx_config_x86_64-linux-gcc.asm
+cp $1/objdir/x86_64-linux-gcc/vpx_config.h vpx_config_x86_64-linux-gcc.h
 
 # Copy config files for mac...
 cp $1/objdir/x86-darwin9-gcc/vpx_config.c vpx_config_x86-darwin9-gcc.c
 cp $1/objdir/x86-darwin9-gcc/vpx_config.asm vpx_config_x86-darwin9-gcc.asm
 cp $1/objdir/x86-darwin9-gcc/vpx_config.h vpx_config_x86-darwin9-gcc.h
 
 # Copy config files for Mac64
 cp $1/objdir/x86_64-darwin9-gcc/vpx_config.c vpx_config_x86_64-darwin9-gcc.c
 cp $1/objdir/x86_64-darwin9-gcc/vpx_config.asm vpx_config_x86_64-darwin9-gcc.asm
 cp $1/objdir/x86_64-darwin9-gcc/vpx_config.h vpx_config_x86_64-darwin9-gcc.h
 
+# Config files for arm-linux-gcc
+cp $1/objdir/armv7-linux-gcc/vpx_config.c vpx_config_arm-linux-gcc.c
+cp $1/objdir/armv7-linux-gcc/vpx_config.h vpx_config_arm-linux-gcc.h
+
 # Config files for generic-gnu
 cp $1/objdir/generic-gnu/vpx_config.c vpx_config_generic-gnu.c
 cp $1/objdir/generic-gnu/vpx_config.h vpx_config_generic-gnu.h
 
 # Copy common source files into mozilla tree.
 for f in ${commonFiles[@]}
 do
   mkdir -p -v `dirname $f`
   cp -v $1/$f $f
 done
 
-# Patch to reduce compiler warnings, so we can compile with -Werror in mozilla.
-patch -p3 < reduce-warnings-1.patch
-patch -p3 < subpixel-qword.patch
 # Patch to compile with Sun Studio on Solaris
 patch -p3 < solaris.patch
-# Patch to fix frame buffer reference counting and parition length overflow
-#  checks.
-patch -p3 < frame_buf_ref.patch
--- a/media/libvpx/vp8/common/alloccommon.c
+++ b/media/libvpx/vp8/common/alloccommon.c
@@ -1,10 +1,10 @@
 /*
- *  Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
@@ -51,17 +51,17 @@ void vp8_de_alloc_frame_buffers(VP8_COMM
 }
 
 int vp8_alloc_frame_buffers(VP8_COMMON *oci, int width, int height)
 {
     int i;
 
     vp8_de_alloc_frame_buffers(oci);
 
-    // our internal buffers are always multiples of 16
+    /* our internal buffers are always multiples of 16 */
     if ((width & 0xf) != 0)
         width += 16 - (width & 0xf);
 
     if ((height & 0xf) != 0)
         height += 16 - (height & 0xf);
 
 
     for (i = 0; i < NUM_YV12_BUFFERS; i++)
@@ -148,17 +148,17 @@ void vp8_setup_version(VP8_COMMON *cm)
         break;
     case 3:
         cm->no_lpf = 1;
         cm->simpler_lpf = 1;
         cm->use_bilinear_mc_filter = 1;
         cm->full_pixel = 1;
         break;
     default:
-        //4,5,6,7 are reserved for future use
+        /*4,5,6,7 are reserved for future use*/
         cm->no_lpf = 0;
         cm->simpler_lpf = 0;
         cm->use_bilinear_mc_filter = 0;
         cm->full_pixel = 0;
         break;
     }
 }
 void vp8_create_common(VP8_COMMON *oci)
@@ -172,20 +172,20 @@ void vp8_create_common(VP8_COMMON *oci)
     oci->no_lpf = 0;
     oci->simpler_lpf = 0;
     oci->use_bilinear_mc_filter = 0;
     oci->full_pixel = 0;
     oci->multi_token_partition = ONE_PARTITION;
     oci->clr_type = REG_YUV;
     oci->clamp_type = RECON_CLAMP_REQUIRED;
 
-    // Initialise reference frame sign bias structure to defaults
+    /* Initialise reference frame sign bias structure to defaults */
     vpx_memset(oci->ref_frame_sign_bias, 0, sizeof(oci->ref_frame_sign_bias));
 
-    // Default disable buffer to buffer copying
+    /* Default disable buffer to buffer copying */
     oci->copy_buffer_to_gf = 0;
     oci->copy_buffer_to_arf = 0;
 }
 
 void vp8_remove_common(VP8_COMMON *oci)
 {
     vp8_de_alloc_frame_buffers(oci);
 }
--- a/media/libvpx/vp8/common/alloccommon.h
+++ b/media/libvpx/vp8/common/alloccommon.h
@@ -1,10 +1,10 @@
 /*
- *  Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  *
  *  Use of this source code is governed by a BSD-style license
  *  that can be found in the LICENSE file in the root of the source
  *  tree. An additional intellectual property rights grant can be found
  *  in the file PATENTS.  All contributing project authors may
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/arm_systemdependent.c
@@ -0,0 +1,136 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_ports/config.h"
+#include "vpx_ports/arm.h"
+#include "g_common.h"
+#include "pragmas.h"
+#include "subpixel.h"
+#include "loopfilter.h"
+#include "recon.h"
+#include "idct.h"
+#include "onyxc_int.h"
+
+extern void (*vp8_build_intra_predictors_mby_ptr)(MACROBLOCKD *x);
+extern void vp8_build_intra_predictors_mby(MACROBLOCKD *x);
+extern void vp8_build_intra_predictors_mby_neon(MACROBLOCKD *x);
+
+extern void (*vp8_build_intra_predictors_mby_s_ptr)(MACROBLOCKD *x);
+extern void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x);
+extern void vp8_build_intra_predictors_mby_s_neon(MACROBLOCKD *x);
+
+void vp8_arch_arm_common_init(VP8_COMMON *ctx)
+{
+#if CONFIG_RUNTIME_CPU_DETECT
+    VP8_COMMON_RTCD *rtcd = &ctx->rtcd;
+    int flags = arm_cpu_caps();
+    int has_edsp = flags & HAS_EDSP;
+    int has_media = flags & HAS_MEDIA;
+    int has_neon = flags & HAS_NEON;
+    rtcd->flags = flags;
+
+    /* Override default functions with fastest ones for this CPU. */
+#if HAVE_ARMV6
+    if (has_media)
+    {
+        rtcd->subpix.sixtap16x16   = vp8_sixtap_predict16x16_armv6;
+        rtcd->subpix.sixtap8x8     = vp8_sixtap_predict8x8_armv6;
+        rtcd->subpix.sixtap8x4     = vp8_sixtap_predict8x4_armv6;
+        rtcd->subpix.sixtap4x4     = vp8_sixtap_predict_armv6;
+        rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_armv6;
+        rtcd->subpix.bilinear8x8   = vp8_bilinear_predict8x8_armv6;
+        rtcd->subpix.bilinear8x4   = vp8_bilinear_predict8x4_armv6;
+        rtcd->subpix.bilinear4x4   = vp8_bilinear_predict4x4_armv6;
+
+        rtcd->idct.idct1        = vp8_short_idct4x4llm_1_v6;
+        rtcd->idct.idct16       = vp8_short_idct4x4llm_v6_dual;
+        rtcd->idct.iwalsh1      = vp8_short_inv_walsh4x4_1_v6;
+        rtcd->idct.iwalsh16     = vp8_short_inv_walsh4x4_v6;
+
+        rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_armv6;
+        rtcd->loopfilter.normal_b_v  = vp8_loop_filter_bv_armv6;
+        rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_armv6;
+        rtcd->loopfilter.normal_b_h  = vp8_loop_filter_bh_armv6;
+        rtcd->loopfilter.simple_mb_v = vp8_loop_filter_mbvs_armv6;
+        rtcd->loopfilter.simple_b_v  = vp8_loop_filter_bvs_armv6;
+        rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_armv6;
+        rtcd->loopfilter.simple_b_h  = vp8_loop_filter_bhs_armv6;
+
+        rtcd->recon.copy16x16   = vp8_copy_mem16x16_v6;
+        rtcd->recon.copy8x8     = vp8_copy_mem8x8_v6;
+        rtcd->recon.copy8x4     = vp8_copy_mem8x4_v6;
+        rtcd->recon.recon       = vp8_recon_b_armv6;
+        rtcd->recon.recon2      = vp8_recon2b_armv6;
+        rtcd->recon.recon4      = vp8_recon4b_armv6;
+    }
+#endif
+
+#if HAVE_ARMV7
+    if (has_neon)
+    {
+        rtcd->subpix.sixtap16x16   = vp8_sixtap_predict16x16_neon;
+        rtcd->subpix.sixtap8x8     = vp8_sixtap_predict8x8_neon;
+        rtcd->subpix.sixtap8x4     = vp8_sixtap_predict8x4_neon;
+        rtcd->subpix.sixtap4x4     = vp8_sixtap_predict_neon;
+        rtcd->subpix.bilinear16x16 = vp8_bilinear_predict16x16_neon;
+        rtcd->subpix.bilinear8x8   = vp8_bilinear_predict8x8_neon;
+        rtcd->subpix.bilinear8x4   = vp8_bilinear_predict8x4_neon;
+        rtcd->subpix.bilinear4x4   = vp8_bilinear_predict4x4_neon;
+
+        rtcd->idct.idct1        = vp8_short_idct4x4llm_1_neon;
+        rtcd->idct.idct16       = vp8_short_idct4x4llm_neon;
+        rtcd->idct.iwalsh1      = vp8_short_inv_walsh4x4_1_neon;
+        rtcd->idct.iwalsh16     = vp8_short_inv_walsh4x4_neon;
+
+        rtcd->loopfilter.normal_mb_v = vp8_loop_filter_mbv_neon;
+        rtcd->loopfilter.normal_b_v  = vp8_loop_filter_bv_neon;
+        rtcd->loopfilter.normal_mb_h = vp8_loop_filter_mbh_neon;
+        rtcd->loopfilter.normal_b_h  = vp8_loop_filter_bh_neon;
+        rtcd->loopfilter.simple_mb_v = vp8_loop_filter_mbvs_neon;
+        rtcd->loopfilter.simple_b_v  = vp8_loop_filter_bvs_neon;
+        rtcd->loopfilter.simple_mb_h = vp8_loop_filter_mbhs_neon;
+        rtcd->loopfilter.simple_b_h  = vp8_loop_filter_bhs_neon;
+
+        rtcd->recon.copy16x16   = vp8_copy_mem16x16_neon;
+        rtcd->recon.copy8x8     = vp8_copy_mem8x8_neon;
+        rtcd->recon.copy8x4     = vp8_copy_mem8x4_neon;
+        rtcd->recon.recon       = vp8_recon_b_neon;
+        rtcd->recon.recon2      = vp8_recon2b_neon;
+        rtcd->recon.recon4      = vp8_recon4b_neon;
+        rtcd->recon.recon_mb    = vp8_recon_mb_neon;
+
+    }
+#endif
+
+#endif
+
+#if HAVE_ARMV6
+#if CONFIG_RUNTIME_CPU_DETECT
+    if (has_media)
+#endif
+    {
+        vp8_build_intra_predictors_mby_ptr = vp8_build_intra_predictors_mby;
+        vp8_build_intra_predictors_mby_s_ptr = vp8_build_intra_predictors_mby_s;
+    }
+#endif
+
+#if HAVE_ARMV7
+#if CONFIG_RUNTIME_CPU_DETECT
+    if (has_neon)
+#endif
+    {
+        vp8_build_intra_predictors_mby_ptr =
+         vp8_build_intra_predictors_mby_neon;
+        vp8_build_intra_predictors_mby_s_ptr =
+         vp8_build_intra_predictors_mby_s_neon;
+    }
+#endif
+}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/bilinearfilter_v6.asm
@@ -0,0 +1,238 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_filter_block2d_bil_first_pass_armv6|
+    EXPORT  |vp8_filter_block2d_bil_second_pass_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+
+;-------------------------------------
+; r0    unsigned char *src_ptr,
+; r1    unsigned short *output_ptr,
+; r2    unsigned int src_pixels_per_line,
+; r3    unsigned int output_height,
+; stack    unsigned int output_width,
+; stack    const short *vp8_filter
+;-------------------------------------
+; The output is transposed stroed in output array to make it easy for second pass filtering.
+|vp8_filter_block2d_bil_first_pass_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #40]                  ; vp8_filter address
+    ldr     r4, [sp, #36]                   ; output width
+
+    mov     r12, r3                         ; outer-loop counter
+    sub     r2, r2, r4                      ; src increment for height loop
+
+    ;;IF ARCHITECTURE=6
+    pld     [r0]
+    ;;ENDIF
+
+    ldr     r5, [r11]                       ; load up filter coefficients
+
+    mov     r3, r3, lsl #1                  ; output_height*2
+    add     r3, r3, #2                      ; plus 2 to make output buffer 4-bit aligned since height is actually (height+1)
+
+    mov     r11, r1                         ; save output_ptr for each row
+
+    cmp     r5, #128                        ; if filter coef = 128, then skip the filter
+    beq     bil_null_1st_filter
+
+|bil_height_loop_1st_v6|
+    ldrb    r6, [r0]                        ; load source data
+    ldrb    r7, [r0, #1]
+    ldrb    r8, [r0, #2]
+    mov     lr, r4, lsr #2                  ; 4-in-parellel loop counter
+
+|bil_width_loop_1st_v6|
+    ldrb    r9, [r0, #3]
+    ldrb    r10, [r0, #4]
+
+    pkhbt   r6, r6, r7, lsl #16             ; src[1] | src[0]
+    pkhbt   r7, r7, r8, lsl #16             ; src[2] | src[1]
+
+    smuad   r6, r6, r5                      ; apply the filter
+    pkhbt   r8, r8, r9, lsl #16             ; src[3] | src[2]
+    smuad   r7, r7, r5
+    pkhbt   r9, r9, r10, lsl #16            ; src[4] | src[3]
+
+    smuad   r8, r8, r5
+    smuad   r9, r9, r5
+
+    add     r0, r0, #4
+    subs    lr, lr, #1
+
+    add     r6, r6, #0x40                   ; round_shift_and_clamp
+    add     r7, r7, #0x40
+    usat    r6, #16, r6, asr #7
+    usat    r7, #16, r7, asr #7
+
+    strh    r6, [r1], r3                    ; result is transposed and stored
+
+    add     r8, r8, #0x40                   ; round_shift_and_clamp
+    strh    r7, [r1], r3
+    add     r9, r9, #0x40
+    usat    r8, #16, r8, asr #7
+    usat    r9, #16, r9, asr #7
+
+    strh    r8, [r1], r3                    ; result is transposed and stored
+
+    ldrneb  r6, [r0]                        ; load source data
+    strh    r9, [r1], r3
+
+    ldrneb  r7, [r0, #1]
+    ldrneb  r8, [r0, #2]
+
+    bne     bil_width_loop_1st_v6
+
+    add     r0, r0, r2                      ; move to next input row
+    subs    r12, r12, #1
+
+    ;;IF ARCHITECTURE=6
+    pld     [r0]
+    ;;ENDIF
+
+    add     r11, r11, #2                    ; move over to next column
+    mov     r1, r11
+
+    bne     bil_height_loop_1st_v6
+
+    ldmia   sp!, {r4 - r11, pc}
+
+|bil_null_1st_filter|
+|bil_height_loop_null_1st|
+    mov     lr, r4, lsr #2                  ; loop counter
+
+|bil_width_loop_null_1st|
+    ldrb    r6, [r0]                        ; load data
+    ldrb    r7, [r0, #1]
+    ldrb    r8, [r0, #2]
+    ldrb    r9, [r0, #3]
+
+    strh    r6, [r1], r3                    ; store it to immediate buffer
+    add     r0, r0, #4
+    strh    r7, [r1], r3
+    subs    lr, lr, #1
+    strh    r8, [r1], r3
+    strh    r9, [r1], r3
+
+    bne     bil_width_loop_null_1st
+
+    subs    r12, r12, #1
+    add     r0, r0, r2                      ; move to next input line
+    add     r11, r11, #2                    ; move over to next column
+    mov     r1, r11
+
+    bne     bil_height_loop_null_1st
+
+    ldmia   sp!, {r4 - r11, pc}
+
+    ENDP  ; |vp8_filter_block2d_bil_first_pass_armv6|
+
+
+;---------------------------------
+; r0    unsigned short *src_ptr,
+; r1    unsigned char *output_ptr,
+; r2    int output_pitch,
+; r3    unsigned int  output_height,
+; stack unsigned int  output_width,
+; stack const short *vp8_filter
+;---------------------------------
+|vp8_filter_block2d_bil_second_pass_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #40]                  ; vp8_filter address
+    ldr     r4, [sp, #36]                   ; output width
+
+    ldr     r5, [r11]                       ; load up filter coefficients
+    mov     r12, r4                         ; outer-loop counter = width, since we work on transposed data matrix
+    mov     r11, r1
+
+    cmp     r5, #128                        ; if filter coef = 128, then skip the filter
+    beq     bil_null_2nd_filter
+
+|bil_height_loop_2nd|
+    ldr     r6, [r0]                        ; load the data
+    ldr     r8, [r0, #4]
+    ldrh    r10, [r0, #8]
+    mov     lr, r3, lsr #2                  ; loop counter
+
+|bil_width_loop_2nd|
+    pkhtb   r7, r6, r8                      ; src[1] | src[2]
+    pkhtb   r9, r8, r10                     ; src[3] | src[4]
+
+    smuad   r6, r6, r5                      ; apply filter
+    smuad   r8, r8, r5                      ; apply filter
+
+    subs    lr, lr, #1
+
+    smuadx  r7, r7, r5                      ; apply filter
+    smuadx  r9, r9, r5                      ; apply filter
+
+    add     r0, r0, #8
+
+    add     r6, r6, #0x40                   ; round_shift_and_clamp
+    add     r7, r7, #0x40
+    usat    r6, #8, r6, asr #7
+    usat    r7, #8, r7, asr #7
+    strb    r6, [r1], r2                    ; the result is transposed back and stored
+
+    add     r8, r8, #0x40                   ; round_shift_and_clamp
+    strb    r7, [r1], r2
+    add     r9, r9, #0x40
+    usat    r8, #8, r8, asr #7
+    usat    r9, #8, r9, asr #7
+    strb    r8, [r1], r2                    ; the result is transposed back and stored
+
+    ldrne   r6, [r0]                        ; load data
+    strb    r9, [r1], r2
+    ldrne   r8, [r0, #4]
+    ldrneh  r10, [r0, #8]
+
+    bne     bil_width_loop_2nd
+
+    subs    r12, r12, #1
+    add     r0, r0, #4                      ; update src for next row
+    add     r11, r11, #1
+    mov     r1, r11
+
+    bne     bil_height_loop_2nd
+    ldmia   sp!, {r4 - r11, pc}
+
+|bil_null_2nd_filter|
+|bil_height_loop_null_2nd|
+    mov     lr, r3, lsr #2
+
+|bil_width_loop_null_2nd|
+    ldr     r6, [r0], #4                    ; load data
+    subs    lr, lr, #1
+    ldr     r8, [r0], #4
+
+    strb    r6, [r1], r2                    ; store data
+    mov     r7, r6, lsr #16
+    strb    r7, [r1], r2
+    mov     r9, r8, lsr #16
+    strb    r8, [r1], r2
+    strb    r9, [r1], r2
+
+    bne     bil_width_loop_null_2nd
+
+    subs    r12, r12, #1
+    add     r0, r0, #4
+    add     r11, r11, #1
+    mov     r1, r11
+
+    bne     bil_height_loop_null_2nd
+
+    ldmia   sp!, {r4 - r11, pc}
+    ENDP  ; |vp8_filter_block2d_second_pass_armv6|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/copymem16x16_v6.asm
@@ -0,0 +1,182 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem16x16_v6|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void copy_mem16x16_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem16x16_v6| PROC
+    stmdb       sp!, {r4 - r7}
+    ;push   {r4-r7}
+
+    ;preload
+    pld     [r0]
+    pld     [r0, r1]
+    pld     [r0, r1, lsl #1]
+
+    ands    r4, r0, #15
+    beq     copy_mem16x16_fast
+
+    ands    r4, r0, #7
+    beq     copy_mem16x16_8
+
+    ands    r4, r0, #3
+    beq     copy_mem16x16_4
+
+    ;copy one byte each time
+    ldrb    r4, [r0]
+    ldrb    r5, [r0, #1]
+    ldrb    r6, [r0, #2]
+    ldrb    r7, [r0, #3]
+
+    mov     r12, #16
+
+copy_mem16x16_1_loop
+    strb    r4, [r2]
+    strb    r5, [r2, #1]
+    strb    r6, [r2, #2]
+    strb    r7, [r2, #3]
+
+    ldrb    r4, [r0, #4]
+    ldrb    r5, [r0, #5]
+    ldrb    r6, [r0, #6]
+    ldrb    r7, [r0, #7]
+
+    subs    r12, r12, #1
+
+    strb    r4, [r2, #4]
+    strb    r5, [r2, #5]
+    strb    r6, [r2, #6]
+    strb    r7, [r2, #7]
+
+    ldrb    r4, [r0, #8]
+    ldrb    r5, [r0, #9]
+    ldrb    r6, [r0, #10]
+    ldrb    r7, [r0, #11]
+
+    strb    r4, [r2, #8]
+    strb    r5, [r2, #9]
+    strb    r6, [r2, #10]
+    strb    r7, [r2, #11]
+
+    ldrb    r4, [r0, #12]
+    ldrb    r5, [r0, #13]
+    ldrb    r6, [r0, #14]
+    ldrb    r7, [r0, #15]
+
+    add     r0, r0, r1
+
+    strb    r4, [r2, #12]
+    strb    r5, [r2, #13]
+    strb    r6, [r2, #14]
+    strb    r7, [r2, #15]
+
+    add     r2, r2, r3
+
+    ldrneb  r4, [r0]
+    ldrneb  r5, [r0, #1]
+    ldrneb  r6, [r0, #2]
+    ldrneb  r7, [r0, #3]
+
+    bne     copy_mem16x16_1_loop
+
+    ldmia       sp!, {r4 - r7}
+    ;pop        {r4-r7}
+    mov     pc, lr
+
+;copy 4 bytes each time
+copy_mem16x16_4
+    ldr     r4, [r0]
+    ldr     r5, [r0, #4]
+    ldr     r6, [r0, #8]
+    ldr     r7, [r0, #12]
+
+    mov     r12, #16
+
+copy_mem16x16_4_loop
+    subs    r12, r12, #1
+    add     r0, r0, r1
+
+    str     r4, [r2]
+    str     r5, [r2, #4]
+    str     r6, [r2, #8]
+    str     r7, [r2, #12]
+
+    add     r2, r2, r3
+
+    ldrne   r4, [r0]
+    ldrne   r5, [r0, #4]
+    ldrne   r6, [r0, #8]
+    ldrne   r7, [r0, #12]
+
+    bne     copy_mem16x16_4_loop
+
+    ldmia       sp!, {r4 - r7}
+    ;pop        {r4-r7}
+    mov     pc, lr
+
+;copy 8 bytes each time
+copy_mem16x16_8
+    sub     r1, r1, #16
+    sub     r3, r3, #16
+
+    mov     r12, #16
+
+copy_mem16x16_8_loop
+    ldmia   r0!, {r4-r5}
+    ;ldm        r0, {r4-r5}
+    ldmia   r0!, {r6-r7}
+
+    add     r0, r0, r1
+
+    stmia   r2!, {r4-r5}
+    subs    r12, r12, #1
+    ;stm        r2, {r4-r5}
+    stmia   r2!, {r6-r7}
+
+    add     r2, r2, r3
+
+    bne     copy_mem16x16_8_loop
+
+    ldmia       sp!, {r4 - r7}
+    ;pop        {r4-r7}
+    mov     pc, lr
+
+;copy 16 bytes each time
+copy_mem16x16_fast
+    ;sub        r1, r1, #16
+    ;sub        r3, r3, #16
+
+    mov     r12, #16
+
+copy_mem16x16_fast_loop
+    ldmia   r0, {r4-r7}
+    ;ldm        r0, {r4-r7}
+    add     r0, r0, r1
+
+    subs    r12, r12, #1
+    stmia   r2, {r4-r7}
+    ;stm        r2, {r4-r7}
+    add     r2, r2, r3
+
+    bne     copy_mem16x16_fast_loop
+
+    ldmia       sp!, {r4 - r7}
+    ;pop        {r4-r7}
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem16x16_v6|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/copymem8x4_v6.asm
@@ -0,0 +1,128 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem8x4_v6|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void vp8_copy_mem8x4_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem8x4_v6| PROC
+    ;push   {r4-r5}
+    stmdb  sp!, {r4-r5}
+
+    ;preload
+    pld     [r0]
+    pld     [r0, r1]
+    pld     [r0, r1, lsl #1]
+
+    ands    r4, r0, #7
+    beq     copy_mem8x4_fast
+
+    ands    r4, r0, #3
+    beq     copy_mem8x4_4
+
+    ;copy 1 byte each time
+    ldrb    r4, [r0]
+    ldrb    r5, [r0, #1]
+
+    mov     r12, #4
+
+copy_mem8x4_1_loop
+    strb    r4, [r2]
+    strb    r5, [r2, #1]
+
+    ldrb    r4, [r0, #2]
+    ldrb    r5, [r0, #3]
+
+    subs    r12, r12, #1
+
+    strb    r4, [r2, #2]
+    strb    r5, [r2, #3]
+
+    ldrb    r4, [r0, #4]
+    ldrb    r5, [r0, #5]
+
+    strb    r4, [r2, #4]
+    strb    r5, [r2, #5]
+
+    ldrb    r4, [r0, #6]
+    ldrb    r5, [r0, #7]
+
+    add     r0, r0, r1
+
+    strb    r4, [r2, #6]
+    strb    r5, [r2, #7]
+
+    add     r2, r2, r3
+
+    ldrneb  r4, [r0]
+    ldrneb  r5, [r0, #1]
+
+    bne     copy_mem8x4_1_loop
+
+    ldmia       sp!, {r4 - r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+;copy 4 bytes each time
+copy_mem8x4_4
+    ldr     r4, [r0]
+    ldr     r5, [r0, #4]
+
+    mov     r12, #4
+
+copy_mem8x4_4_loop
+    subs    r12, r12, #1
+    add     r0, r0, r1
+
+    str     r4, [r2]
+    str     r5, [r2, #4]
+
+    add     r2, r2, r3
+
+    ldrne   r4, [r0]
+    ldrne   r5, [r0, #4]
+
+    bne     copy_mem8x4_4_loop
+
+    ldmia  sp!, {r4-r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+;copy 8 bytes each time
+copy_mem8x4_fast
+    ;sub        r1, r1, #8
+    ;sub        r3, r3, #8
+
+    mov     r12, #4
+
+copy_mem8x4_fast_loop
+    ldmia   r0, {r4-r5}
+    ;ldm        r0, {r4-r5}
+    add     r0, r0, r1
+
+    subs    r12, r12, #1
+    stmia   r2, {r4-r5}
+    ;stm        r2, {r4-r5}
+    add     r2, r2, r3
+
+    bne     copy_mem8x4_fast_loop
+
+    ldmia  sp!, {r4-r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem8x4_v6|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/copymem8x8_v6.asm
@@ -0,0 +1,128 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_copy_mem8x8_v6|
+    ; ARM
+    ; REQUIRE8
+    ; PRESERVE8
+
+    AREA    Block, CODE, READONLY ; name this block of code
+;void copy_mem8x8_v6( unsigned char *src, int src_stride, unsigned char *dst, int dst_stride)
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_copy_mem8x8_v6| PROC
+    ;push   {r4-r5}
+    stmdb  sp!, {r4-r5}
+
+    ;preload
+    pld     [r0]
+    pld     [r0, r1]
+    pld     [r0, r1, lsl #1]
+
+    ands    r4, r0, #7
+    beq     copy_mem8x8_fast
+
+    ands    r4, r0, #3
+    beq     copy_mem8x8_4
+
+    ;copy 1 byte each time
+    ldrb    r4, [r0]
+    ldrb    r5, [r0, #1]
+
+    mov     r12, #8
+
+copy_mem8x8_1_loop
+    strb    r4, [r2]
+    strb    r5, [r2, #1]
+
+    ldrb    r4, [r0, #2]
+    ldrb    r5, [r0, #3]
+
+    subs    r12, r12, #1
+
+    strb    r4, [r2, #2]
+    strb    r5, [r2, #3]
+
+    ldrb    r4, [r0, #4]
+    ldrb    r5, [r0, #5]
+
+    strb    r4, [r2, #4]
+    strb    r5, [r2, #5]
+
+    ldrb    r4, [r0, #6]
+    ldrb    r5, [r0, #7]
+
+    add     r0, r0, r1
+
+    strb    r4, [r2, #6]
+    strb    r5, [r2, #7]
+
+    add     r2, r2, r3
+
+    ldrneb  r4, [r0]
+    ldrneb  r5, [r0, #1]
+
+    bne     copy_mem8x8_1_loop
+
+    ldmia       sp!, {r4 - r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+;copy 4 bytes each time
+copy_mem8x8_4
+    ldr     r4, [r0]
+    ldr     r5, [r0, #4]
+
+    mov     r12, #8
+
+copy_mem8x8_4_loop
+    subs    r12, r12, #1
+    add     r0, r0, r1
+
+    str     r4, [r2]
+    str     r5, [r2, #4]
+
+    add     r2, r2, r3
+
+    ldrne   r4, [r0]
+    ldrne   r5, [r0, #4]
+
+    bne     copy_mem8x8_4_loop
+
+    ldmia       sp!, {r4 - r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+;copy 8 bytes each time
+copy_mem8x8_fast
+    ;sub        r1, r1, #8
+    ;sub        r3, r3, #8
+
+    mov     r12, #8
+
+copy_mem8x8_fast_loop
+    ldmia   r0, {r4-r5}
+    ;ldm        r0, {r4-r5}
+    add     r0, r0, r1
+
+    subs    r12, r12, #1
+    stmia   r2, {r4-r5}
+    ;stm        r2, {r4-r5}
+    add     r2, r2, r3
+
+    bne     copy_mem8x8_fast_loop
+
+    ldmia  sp!, {r4-r5}
+    ;pop        {r4-r5}
+    mov     pc, lr
+
+    ENDP  ; |vp8_copy_mem8x8_v6|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/dc_only_idct_add_v6.asm
@@ -0,0 +1,67 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license and patent
+;  grant that can be found in the LICENSE file in the root of the source
+;  tree. All contributing project authors may be found in the AUTHORS
+;  file in the root of the source tree.
+;
+
+    EXPORT  |vp8_dc_only_idct_add_v6|
+
+    AREA    |.text|, CODE, READONLY
+
+;void vp8_dc_only_idct_add_v6(short input_dc, unsigned char *pred_ptr,
+;                             unsigned char *dst_ptr, int pitch, int stride)
+; r0  input_dc
+; r1  pred_ptr
+; r2  dest_ptr
+; r3  pitch
+; sp  stride
+
+|vp8_dc_only_idct_add_v6| PROC
+    stmdb       sp!, {r4 - r7, lr}
+
+    add         r0, r0, #4                ; input_dc += 4
+    ldr         r12, c0x0000FFFF
+    ldr         r4, [r1], r3
+    ldr         r6, [r1], r3
+    and         r0, r12, r0, asr #3       ; input_dc >> 3 + mask
+    ldr         lr, [sp, #20]
+    orr         r0, r0, r0, lsl #16       ; a1 | a1
+
+    uxtab16     r5, r0, r4                ; a1+2 | a1+0
+    uxtab16     r4, r0, r4, ror #8        ; a1+3 | a1+1
+    uxtab16     r7, r0, r6
+    uxtab16     r6, r0, r6, ror #8
+    usat16      r5, #8, r5
+    usat16      r4, #8, r4
+    usat16      r7, #8, r7
+    usat16      r6, #8, r6
+    orr         r5, r5, r4, lsl #8
+    orr         r7, r7, r6, lsl #8
+    ldr         r4, [r1], r3
+    ldr         r6, [r1]
+    str         r5, [r2], lr
+    str         r7, [r2], lr
+
+    uxtab16     r5, r0, r4
+    uxtab16     r4, r0, r4, ror #8
+    uxtab16     r7, r0, r6
+    uxtab16     r6, r0, r6, ror #8
+    usat16      r5, #8, r5
+    usat16      r4, #8, r4
+    usat16      r7, #8, r7
+    usat16      r6, #8, r6
+    orr         r5, r5, r4, lsl #8
+    orr         r7, r7, r6, lsl #8
+    str         r5, [r2], lr
+    str         r7, [r2]
+
+    ldmia       sp!, {r4 - r7, pc}
+
+    ENDP  ; |vp8_dc_only_idct_add_v6|
+
+; Constant Pool
+c0x0000FFFF DCD 0x0000FFFF
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/filter_v6.asm
@@ -0,0 +1,443 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_filter_block2d_first_pass_armv6|
+    EXPORT  |vp8_filter_block2d_second_pass_armv6|
+    EXPORT  |vp8_filter4_block2d_second_pass_armv6|
+    EXPORT  |vp8_filter_block2d_first_pass_only_armv6|
+    EXPORT  |vp8_filter_block2d_second_pass_only_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+;-------------------------------------
+; r0    unsigned char *src_ptr
+; r1    short         *output_ptr
+; r2    unsigned int src_pixels_per_line
+; r3    unsigned int output_width
+; stack unsigned int output_height
+; stack const short *vp8_filter
+;-------------------------------------
+; vp8_filter the input and put in the output array.  Apply the 6 tap FIR filter with
+; the output being a 2 byte value and the intput being a 1 byte value.
+|vp8_filter_block2d_first_pass_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #40]                  ; vp8_filter address
+    ldr     r7, [sp, #36]                   ; output height
+
+    sub     r2, r2, r3                      ; inside loop increments input array,
+                                            ; so the height loop only needs to add
+                                            ; r2 - width to the input pointer
+
+    mov     r3, r3, lsl #1                  ; multiply width by 2 because using shorts
+    add     r12, r3, #16                    ; square off the output
+    sub     sp, sp, #4
+
+    ;;IF ARCHITECTURE=6
+    ;pld        [r0, #-2]
+    ;;pld       [r0, #30]
+    ;;ENDIF
+
+    ldr     r4, [r11]                       ; load up packed filter coefficients
+    ldr     r5, [r11, #4]
+    ldr     r6, [r11, #8]
+
+    str     r1, [sp]                        ; push destination to stack
+    mov     r7, r7, lsl #16                 ; height is top part of counter
+
+; six tap filter
+|height_loop_1st_6|
+    ldrb    r8, [r0, #-2]                   ; load source data
+    ldrb    r9, [r0, #-1]
+    ldrb    r10, [r0], #2
+    orr     r7, r7, r3, lsr #2              ; construct loop counter
+
+|width_loop_1st_6|
+    ldrb    r11, [r0, #-1]
+
+    pkhbt   lr, r8, r9, lsl #16             ; r9 | r8
+    pkhbt   r8, r9, r10, lsl #16            ; r10 | r9
+
+    ldrb    r9, [r0]
+
+    smuad   lr, lr, r4                      ; apply the filter
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+    smuad   r8, r8, r4
+    pkhbt   r11, r11, r9, lsl #16           ; r9 | r11
+
+    smlad   lr, r10, r5, lr
+    ldrb    r10, [r0, #1]
+    smlad   r8, r11, r5, r8
+    ldrb    r11, [r0, #2]
+
+    sub     r7, r7, #1
+
+    pkhbt   r9, r9, r10, lsl #16            ; r10 | r9
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+
+    smlad   lr, r9, r6, lr
+    smlad   r11, r10, r6, r8
+
+    ands    r10, r7, #0xff                  ; test loop counter
+
+    add     lr, lr, #0x40                   ; round_shift_and_clamp
+    ldrneb  r8, [r0, #-2]                   ; load data for next loop
+    usat    lr, #8, lr, asr #7
+    add     r11, r11, #0x40
+    ldrneb  r9, [r0, #-1]
+    usat    r11, #8, r11, asr #7
+
+    strh    lr, [r1], r12                   ; result is transposed and stored, which
+                                            ; will make second pass filtering easier.
+    ldrneb  r10, [r0], #2
+    strh    r11, [r1], r12
+
+    bne     width_loop_1st_6
+
+    ;;add       r9, r2, #30                 ; attempt to load 2 adjacent cache lines
+    ;;IF ARCHITECTURE=6
+    ;pld        [r0, r2]
+    ;;pld       [r0, r9]
+    ;;ENDIF
+
+    ldr     r1, [sp]                        ; load and update dst address
+    subs    r7, r7, #0x10000
+    add     r0, r0, r2                      ; move to next input line
+    add     r1, r1, #2                      ; move over to next column
+    str     r1, [sp]
+
+    bne     height_loop_1st_6
+
+    add     sp, sp, #4
+    ldmia   sp!, {r4 - r11, pc}
+
+    ENDP
+
+;---------------------------------
+; r0    short         *src_ptr,
+; r1    unsigned char *output_ptr,
+; r2    unsigned int output_pitch,
+; r3    unsigned int cnt,
+; stack const short *vp8_filter
+;---------------------------------
+|vp8_filter_block2d_second_pass_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #36]                  ; vp8_filter address
+    sub     sp, sp, #4
+    mov     r7, r3, lsl #16                 ; height is top part of counter
+    str     r1, [sp]                        ; push destination to stack
+
+    ldr     r4, [r11]                       ; load up packed filter coefficients
+    ldr     r5, [r11, #4]
+    ldr     r6, [r11, #8]
+
+    pkhbt   r12, r5, r4                     ; pack the filter differently
+    pkhbt   r11, r6, r5
+
+    sub     r0, r0, #4                      ; offset input buffer
+
+|height_loop_2nd|
+    ldr     r8, [r0]                        ; load the data
+    ldr     r9, [r0, #4]
+    orr     r7, r7, r3, lsr #1              ; loop counter
+
+|width_loop_2nd|
+    smuad   lr, r4, r8                      ; apply filter
+    sub     r7, r7, #1
+    smulbt  r8, r4, r8
+
+    ldr     r10, [r0, #8]
+
+    smlad   lr, r5, r9, lr
+    smladx  r8, r12, r9, r8
+
+    ldrh    r9, [r0, #12]
+
+    smlad   lr, r6, r10, lr
+    smladx  r8, r11, r10, r8
+
+    add     r0, r0, #4
+    smlatb  r10, r6, r9, r8
+
+    add     lr, lr, #0x40                   ; round_shift_and_clamp
+    ands    r8, r7, #0xff
+    usat    lr, #8, lr, asr #7
+    add     r10, r10, #0x40
+    strb    lr, [r1], r2                    ; the result is transposed back and stored
+    usat    r10, #8, r10, asr #7
+
+    ldrne   r8, [r0]                        ; load data for next loop
+    ldrne   r9, [r0, #4]
+    strb    r10, [r1], r2
+
+    bne     width_loop_2nd
+
+    ldr     r1, [sp]                        ; update dst for next loop
+    subs    r7, r7, #0x10000
+    add     r0, r0, #16                     ; updata src for next loop
+    add     r1, r1, #1
+    str     r1, [sp]
+
+    bne     height_loop_2nd
+
+    add     sp, sp, #4
+    ldmia   sp!, {r4 - r11, pc}
+
+    ENDP
+
+;---------------------------------
+; r0    short         *src_ptr,
+; r1    unsigned char *output_ptr,
+; r2    unsigned int output_pitch,
+; r3    unsigned int cnt,
+; stack const short *vp8_filter
+;---------------------------------
+|vp8_filter4_block2d_second_pass_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #36]                  ; vp8_filter address
+    mov     r7, r3, lsl #16                 ; height is top part of counter
+
+    ldr     r4, [r11]                       ; load up packed filter coefficients
+    add     lr, r1, r3                      ; save final destination pointer
+    ldr     r5, [r11, #4]
+    ldr     r6, [r11, #8]
+
+    pkhbt   r12, r5, r4                     ; pack the filter differently
+    pkhbt   r11, r6, r5
+    mov     r4, #0x40                       ; rounding factor (for smlad{x})
+
+|height_loop_2nd_4|
+    ldrd    r8, [r0, #-4]                   ; load the data
+    orr     r7, r7, r3, lsr #1              ; loop counter
+
+|width_loop_2nd_4|
+    ldr     r10, [r0, #4]!
+    smladx  r6, r9, r12, r4                 ; apply filter
+    pkhbt   r8, r9, r8
+    smlad   r5, r8, r12, r4
+    pkhbt   r8, r10, r9
+    smladx  r6, r10, r11, r6
+    sub     r7, r7, #1
+    smlad   r5, r8, r11, r5
+
+    mov     r8, r9                          ; shift the data for the next loop
+    mov     r9, r10
+
+    usat    r6, #8, r6, asr #7              ; shift and clamp
+    usat    r5, #8, r5, asr #7
+
+    strb    r5, [r1], r2                    ; the result is transposed back and stored
+    tst     r7, #0xff
+    strb    r6, [r1], r2
+
+    bne     width_loop_2nd_4
+
+    subs    r7, r7, #0x10000
+    add     r0, r0, #16                     ; update src for next loop
+    sub     r1, lr, r7, lsr #16             ; update dst for next loop
+
+    bne     height_loop_2nd_4
+
+    ldmia   sp!, {r4 - r11, pc}
+
+    ENDP
+
+;------------------------------------
+; r0    unsigned char *src_ptr
+; r1    unsigned char *output_ptr,
+; r2    unsigned int src_pixels_per_line
+; r3    unsigned int cnt,
+; stack unsigned int output_pitch,
+; stack const short *vp8_filter
+;------------------------------------
+|vp8_filter_block2d_first_pass_only_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r4, [sp, #36]                   ; output pitch
+    ldr     r11, [sp, #40]                  ; HFilter address
+    sub     sp, sp, #8
+
+    mov     r7, r3
+    sub     r2, r2, r3                      ; inside loop increments input array,
+                                            ; so the height loop only needs to add
+                                            ; r2 - width to the input pointer
+
+    sub     r4, r4, r3
+    str     r4, [sp]                        ; save modified output pitch
+    str     r2, [sp, #4]
+
+    mov     r2, #0x40
+
+    ldr     r4, [r11]                       ; load up packed filter coefficients
+    ldr     r5, [r11, #4]
+    ldr     r6, [r11, #8]
+
+; six tap filter
+|height_loop_1st_only_6|
+    ldrb    r8, [r0, #-2]                   ; load data
+    ldrb    r9, [r0, #-1]
+    ldrb    r10, [r0], #2
+
+    mov     r12, r3, lsr #1                 ; loop counter
+
+|width_loop_1st_only_6|
+    ldrb    r11, [r0, #-1]
+
+    pkhbt   lr, r8, r9, lsl #16             ; r9 | r8
+    pkhbt   r8, r9, r10, lsl #16            ; r10 | r9
+
+    ldrb    r9, [r0]
+
+;;  smuad   lr, lr, r4
+    smlad   lr, lr, r4, r2
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+;;  smuad   r8, r8, r4
+    smlad   r8, r8, r4, r2
+    pkhbt   r11, r11, r9, lsl #16           ; r9 | r11
+
+    smlad   lr, r10, r5, lr
+    ldrb    r10, [r0, #1]
+    smlad   r8, r11, r5, r8
+    ldrb    r11, [r0, #2]
+
+    subs    r12, r12, #1
+
+    pkhbt   r9, r9, r10, lsl #16            ; r10 | r9
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+
+    smlad   lr, r9, r6, lr
+    smlad   r10, r10, r6, r8
+
+;;  add     lr, lr, #0x40                   ; round_shift_and_clamp
+    ldrneb  r8, [r0, #-2]                   ; load data for next loop
+    usat    lr, #8, lr, asr #7
+;;  add     r10, r10, #0x40
+    strb    lr, [r1], #1                    ; store the result
+    usat    r10, #8, r10, asr #7
+
+    ldrneb  r9, [r0, #-1]
+    strb    r10, [r1], #1
+    ldrneb  r10, [r0], #2
+
+    bne     width_loop_1st_only_6
+
+    ;;add       r9, r2, #30                 ; attempt to load 2 adjacent cache lines
+    ;;IF ARCHITECTURE=6
+    ;pld        [r0, r2]
+    ;;pld       [r0, r9]
+    ;;ENDIF
+
+    ldr     lr, [sp]                        ; load back output pitch
+    ldr     r12, [sp, #4]                   ; load back output pitch
+    subs    r7, r7, #1
+    add     r0, r0, r12                     ; updata src for next loop
+    add     r1, r1, lr                      ; update dst for next loop
+
+    bne     height_loop_1st_only_6
+
+    add     sp, sp, #8
+    ldmia   sp!, {r4 - r11, pc}
+    ENDP  ; |vp8_filter_block2d_first_pass_only_armv6|
+
+
+;------------------------------------
+; r0    unsigned char *src_ptr,
+; r1    unsigned char *output_ptr,
+; r2    unsigned int src_pixels_per_line
+; r3    unsigned int cnt,
+; stack unsigned int output_pitch,
+; stack const short *vp8_filter
+;------------------------------------
+|vp8_filter_block2d_second_pass_only_armv6| PROC
+    stmdb   sp!, {r4 - r11, lr}
+
+    ldr     r11, [sp, #40]                  ; VFilter address
+    ldr     r12, [sp, #36]                  ; output pitch
+
+    mov     r7, r3, lsl #16                 ; height is top part of counter
+    sub     r0, r0, r2, lsl #1              ; need 6 elements for filtering, 2 before, 3 after
+
+    sub     sp, sp, #8
+
+    ldr     r4, [r11]                       ; load up packed filter coefficients
+    ldr     r5, [r11, #4]
+    ldr     r6, [r11, #8]
+
+    str     r0, [sp]                        ; save r0 to stack
+    str     r1, [sp, #4]                    ; save dst to stack
+
+; six tap filter
+|width_loop_2nd_only_6|
+    ldrb    r8, [r0], r2                    ; load data
+    orr     r7, r7, r3                      ; loop counter
+    ldrb    r9, [r0], r2
+    ldrb    r10, [r0], r2
+
+|height_loop_2nd_only_6|
+    ; filter first column in this inner loop, than, move to next colum.
+    ldrb    r11, [r0], r2
+
+    pkhbt   lr, r8, r9, lsl #16             ; r9 | r8
+    pkhbt   r8, r9, r10, lsl #16            ; r10 | r9
+
+    ldrb    r9, [r0], r2
+
+    smuad   lr, lr, r4
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+    smuad   r8, r8, r4
+    pkhbt   r11, r11, r9, lsl #16           ; r9 | r11
+
+    smlad   lr, r10, r5, lr
+    ldrb    r10, [r0], r2
+    smlad   r8, r11, r5, r8
+    ldrb    r11, [r0]
+
+    sub     r7, r7, #2
+    sub     r0, r0, r2, lsl #2
+
+    pkhbt   r9, r9, r10, lsl #16            ; r10 | r9
+    pkhbt   r10, r10, r11, lsl #16          ; r11 | r10
+
+    smlad   lr, r9, r6, lr
+    smlad   r10, r10, r6, r8
+
+    ands    r9, r7, #0xff
+
+    add     lr, lr, #0x40                   ; round_shift_and_clamp
+    ldrneb  r8, [r0], r2                    ; load data for next loop
+    usat    lr, #8, lr, asr #7
+    add     r10, r10, #0x40
+    strb    lr, [r1], r12                   ; store the result for the column
+    usat    r10, #8, r10, asr #7
+
+    ldrneb  r9, [r0], r2
+    strb    r10, [r1], r12
+    ldrneb  r10, [r0], r2
+
+    bne     height_loop_2nd_only_6
+
+    ldr     r0, [sp]
+    ldr     r1, [sp, #4]
+    subs    r7, r7, #0x10000
+    add     r0, r0, #1                      ; move to filter next column
+    str     r0, [sp]
+    add     r1, r1, #1
+    str     r1, [sp, #4]
+
+    bne     width_loop_2nd_only_6
+
+    add     sp, sp, #8
+
+    ldmia   sp!, {r4 - r11, pc}
+    ENDP  ; |vp8_filter_block2d_second_pass_only_armv6|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/idct_v6.asm
@@ -0,0 +1,345 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+;                   r0  r1  r2  r3  r4  r5  r6  r7  r8  r9  r10 r11 r12     r14
+    EXPORT  |vp8_short_idct4x4llm_1_v6|
+    EXPORT  |vp8_short_idct4x4llm_v6|
+    EXPORT  |vp8_short_idct4x4llm_v6_scott|
+    EXPORT  |vp8_short_idct4x4llm_v6_dual|
+
+    AREA    |.text|, CODE, READONLY
+
+;********************************************************************************
+;*  void short_idct4x4llm_1_v6(INT16 * input, INT16 * output, INT32 pitch)
+;*      r0  INT16 * input
+;*      r1  INT16 * output
+;*      r2  INT32 pitch
+;*  bench:  3/5
+;********************************************************************************
+
+|vp8_short_idct4x4llm_1_v6| PROC         ;   cycles  in  out pit
+            ;
+    ldrsh   r0, [r0]    ; load input[0] 1, r0 un 2
+    add r0, r0, #4  ;   1   +4
+    stmdb   sp!, {r4, r5, lr}   ; make room for wide writes 1                   backup
+    mov r0, r0, asr #3  ; (input[0] + 4) >> 3   1, r0 req`d ^1  >> 3
+    pkhbt   r4, r0, r0, lsl #16 ; pack r0 into r4   1, r0 req`d ^1                  pack
+    mov r5, r4  ; expand                        expand
+
+    strd    r4, [r1], r2    ; *output = r0, post inc    1
+    strd    r4, [r1], r2    ;   1
+    strd    r4, [r1], r2    ;   1
+    strd    r4, [r1]    ;   1
+            ;
+    ldmia   sp!, {r4, r5, pc}   ; replace vars, return                      restore
+    ENDP        ; |vp8_short_idct4x4llm_1_v6|
+;********************************************************************************
+;********************************************************************************
+;********************************************************************************
+
+;********************************************************************************
+;*  void short_idct4x4llm_v6(INT16 * input, INT16 * output, INT32 pitch)
+;*      r0  INT16 * input
+;*      r1  INT16 * output
+;*      r2  INT32 pitch
+;*  bench:
+;********************************************************************************
+
+|vp8_short_idct4x4llm_v6| PROC           ;   cycles  in  out pit
+            ;
+    stmdb   sp!, {r4-r11, lr}   ; backup registers  1                   backup
+            ;
+    mov r4, #0x00004E00 ;   1                   cst
+    orr r4, r4, #0x0000007B ; cospi8sqrt2minus1
+    mov r5, #0x00008A00 ;   1                       cst
+    orr r5, r5, #0x0000008C ; sinpi8sqrt2
+            ;
+    mov r6, #4  ; i=4   1                           i
+loop1           ;
+    ldrsh   r12, [r0, #8]   ; input[4]  1, r12 unavail 2                                                    [4]
+    ldrsh   r3, [r0, #24]   ; input[12] 1, r3 unavail 2             [12]
+    ldrsh   r8, [r0, #16]   ; input[8]  1, r8 unavail 2                                 [8]
+    ldrsh   r7, [r0], #0x2  ; input[0]  1, r7 unavail 2 ++                          [0]
+    smulwb  r10, r5, r12    ; ([4] * sinpi8sqrt2) >> 16 1, r10 un 2, r12/r5 ^1                                          t1
+    smulwb  r11, r4, r3 ; ([12] * cospi8sqrt2minus1) >> 16  1, r11 un 2, r3/r4 ^1                                               t2
+    add r9, r7, r8  ; a1 = [0] + [8]    1                                       a1
+    sub r7, r7, r8  ; b1 = [0] - [8]    1                               b1
+    add r11, r3, r11    ; temp2 1
+    rsb r11, r11, r10   ; c1 = temp1 - temp2    1                                               c1
+    smulwb  r3, r5, r3  ; ([12] * sinpi8sqrt2) >> 16    1, r3 un 2, r3/r5 ^ 1               t2
+    smulwb  r10, r4, r12    ; ([4] * cospi8sqrt2minus1) >> 16   1, r10 un 2, r12/r4 ^1                                          t1
+    add r8, r7, r11 ; b1 + c1   1                                   b+c
+    strh    r8, [r1, r2]    ; out[pitch] = b1+c1    1
+    sub r7, r7, r11 ; b1 - c1   1                               b-c
+    add r10, r12, r10   ; temp1 1
+    add r3, r10, r3 ; d1 = temp1 + temp2    1               d1
+    add r10, r9, r3 ; a1 + d1   1                                           a+d
+    sub r3, r9, r3  ; a1 - d1   1               a-d
+    add r8, r2, r2  ; pitch * 2 1                                   p*2
+    strh    r7, [r1, r8]    ; out[pitch*2] = b1-c1  1
+    add r7, r2, r2, lsl #1  ; pitch * 3 1                               p*3
+    strh    r3, [r1, r7]    ; out[pitch*3] = a1-d1  1
+    subs    r6, r6, #1  ; i--   1                           --
+    strh    r10, [r1], #0x2 ; out[0] = a1+d1    1       ++
+    bne loop1   ; if i>0, continue
+            ;
+    sub r1, r1, #8  ; set up out for next loop  1       -4
+            ; for this iteration, input=prev output
+    mov r6, #4  ; i=4   1                           i
+;   b   returnfull
+loop2           ;
+    ldrsh   r11, [r1, #2]   ; input[1]  1, r11 un 2                                             [1]
+    ldrsh   r8, [r1, #6]    ; input[3]  1, r8 un 2                                  [3]
+    ldrsh   r3, [r1, #4]    ; input[2]  1, r3 un 2              [2]
+    ldrsh   r0, [r1]    ; input[0]  1, r0 un 2  [0]
+    smulwb  r9, r5, r11 ; ([1] * sinpi8sqrt2) >> 16 1, r9 un 2, r5/r11 ^1                                       t1
+    smulwb  r10, r4, r8 ; ([3] * cospi8sqrt2minus1) >> 16   1, r10 un 2, r4/r8 ^1                                           t2
+    add r7, r0, r3  ; a1 = [0] + [2]    1                               a1
+    sub r0, r0, r3  ; b1 = [0] - [2]    1   b1
+    add r10, r8, r10    ; temp2 1
+    rsb r9, r10, r9 ; c1 = temp1 - temp2    1                                       c1
+    smulwb  r8, r5, r8  ; ([3] * sinpi8sqrt2) >> 16 1, r8 un 2, r5/r8 ^1                                    t2
+    smulwb  r10, r4, r11    ; ([1] * cospi8sqrt2minus1) >> 16   1, r10 un 2, r4/r11 ^1                                          t1
+    add r3, r0, r9  ; b1+c1 1               b+c
+    add r3, r3, #4  ; b1+c1+4   1               +4
+    add r10, r11, r10   ; temp1 1
+    mov r3, r3, asr #3  ; b1+c1+4 >> 3  1, r3 ^1                >>3
+    strh    r3, [r1, #2]    ; out[1] = b1+c1    1
+    add r10, r10, r8    ; d1 = temp1 + temp2    1                                           d1
+    add r3, r7, r10 ; a1+d1 1               a+d
+    add r3, r3, #4  ; a1+d1+4   1               +4
+    sub r7, r7, r10 ; a1-d1 1                               a-d
+    add r7, r7, #4  ; a1-d1+4   1                               +4
+    mov r3, r3, asr #3  ; a1+d1+4 >> 3  1, r3 ^1                >>3
+    mov r7, r7, asr #3  ; a1-d1+4 >> 3  1, r7 ^1                                >>3
+    strh    r7, [r1, #6]    ; out[3] = a1-d1    1
+    sub r0, r0, r9  ; b1-c1 1   b-c
+    add r0, r0, #4  ; b1-c1+4   1   +4
+    subs    r6, r6, #1  ; i--   1                           --
+    mov r0, r0, asr #3  ; b1-c1+4 >> 3  1, r0 ^1    >>3
+    strh    r0, [r1, #4]    ; out[2] = b1-c1    1
+    strh    r3, [r1], r2    ; out[0] = a1+d1    1
+;   add r1, r1, r2  ; out += pitch  1       ++
+    bne loop2   ; if i>0, continue
+returnfull          ;
+    ldmia   sp!, {r4 - r11, pc} ; replace vars, return                      restore
+    ENDP
+
+;********************************************************************************
+;********************************************************************************
+;********************************************************************************
+
+;********************************************************************************
+;*  void short_idct4x4llm_v6_scott(INT16 * input, INT16 * output, INT32 pitch)
+;*      r0  INT16 * input
+;*      r1  INT16 * output
+;*      r2  INT32 pitch
+;*  bench:
+;********************************************************************************
+
+|vp8_short_idct4x4llm_v6_scott| PROC         ;   cycles  in  out pit
+;   mov r0, #0  ;
+;   ldr r0, [r0]    ;
+    stmdb   sp!, {r4 - r11, lr} ; backup registers  1                   backup
+            ;
+    mov r3, #0x00004E00 ;                   cos
+    orr r3, r3, #0x0000007B ; cospi8sqrt2minus1
+    mov r4, #0x00008A00 ;                       sin
+    orr r4, r4, #0x0000008C ; sinpi8sqrt2
+            ;
+    mov r5, #0x2    ; i                         i
+            ;
+short_idct4x4llm_v6_scott_loop1          ;
+    ldr r10, [r0, #(4*2)]   ; i5 | i4                                               5,4
+    ldr r11, [r0, #(12*2)]  ; i13 | i12                                                 13,12
+            ;
+    smulwb  r6, r4, r10 ; ((ip[4] * sinpi8sqrt2) >> 16)                             lt1
+    smulwb  r7, r3, r11 ; ((ip[12] * cospi8sqrt2minus1) >> 16)                                  lt2
+            ;
+    smulwb  r12, r3, r10    ; ((ip[4] * cospi8sqrt2misu1) >> 16)                                                        l2t2
+    smulwb  r14, r4, r11    ; ((ip[12] * sinpi8sqrt2) >> 16)                                                                l2t1
+            ;
+    add r6, r6, r7  ; partial c1                                lt1-lt2
+    add r12, r12, r14   ; partial d1                                                        l2t2+l2t1
+            ;
+    smulwt  r14, r4, r10    ; ((ip[5] * sinpi8sqrt2) >> 16)                                                             ht1
+    smulwt  r7, r3, r11 ; ((ip[13] * cospi8sqrt2minus1) >> 16)                                  ht2
+            ;
+    smulwt  r8, r3, r10 ; ((ip[5] * cospi8sqrt2minus1) >> 16)                                       h2t1
+    smulwt  r9, r4, r11 ; ((ip[13] * sinpi8sqrt2) >> 16)                                            h2t2
+            ;
+    add r7, r14, r7 ; partial c1_2                                  ht1+ht2
+    sub r8, r8, r9  ; partial d1_2                                      h2t1-h2t2
+            ;
+    pkhbt   r6, r6, r7, lsl #16 ; partial c1_2 | partial c1_1                               pack
+    pkhbt   r12, r12, r8, lsl #16   ; partial d1_2 | partial d1_1                                                       pack
+            ;
+    usub16  r6, r6, r10 ; c1_2 | c1_1                               c
+    uadd16  r12, r12, r11   ; d1_2 | d1_1                                                       d
+            ;
+    ldr r10, [r0, #0]   ; i1 | i0                                               1,0
+    ldr r11, [r0, #(8*2)]   ; i9 | i10                                                  9,10
+            ;
+;;;;;;  add r0, r0, #0x4    ;       +4
+;;;;;;  add r1, r1, #0x4    ;           +4
+            ;
+    uadd16  r8, r10, r11    ; i1 + i9 | i0 + i8 aka a1                                      a
+    usub16  r9, r10, r11    ; i1 - i9 | i0 - i8 aka b1                                          b
+            ;
+    uadd16  r7, r8, r12 ; a1 + d1 pair                                  a+d
+    usub16  r14, r8, r12    ; a1 - d1 pair                                                              a-d
+            ;
+    str r7, [r1]    ; op[0] = a1 + d1
+    str r14, [r1, r2]   ; op[pitch*3] = a1 - d1
+            ;
+    add r0, r0, #0x4    ; op[pitch] = b1 + c1       ++
+    add r1, r1, #0x4    ; op[pitch*2] = b1 - c1         ++
+            ;
+    subs    r5, r5, #0x1    ;                           --
+    bne short_idct4x4llm_v6_scott_loop1  ;
+            ;
+    sub r1, r1, #16 ; reset output ptr
+    mov r5, #0x4    ;
+    mov r0, r1  ; input = output
+            ;
+short_idct4x4llm_v6_scott_loop2          ;
+            ;
+    subs    r5, r5, #0x1    ;
+    bne short_idct4x4llm_v6_scott_loop2  ;
+            ;
+    ldmia   sp!, {r4 - r11, pc} ;
+    ENDP        ;
+            ;
+;********************************************************************************
+;********************************************************************************
+;********************************************************************************
+
+;********************************************************************************
+;*  void short_idct4x4llm_v6_dual(INT16 * input, INT16 * output, INT32 pitch)
+;*      r0  INT16 * input
+;*      r1  INT16 * output
+;*      r2  INT32 pitch
+;*  bench:
+;********************************************************************************
+
+|vp8_short_idct4x4llm_v6_dual| PROC          ;   cycles  in  out pit
+            ;
+    stmdb   sp!, {r4-r11, lr}   ; backup registers  1                   backup
+    mov r3, #0x00004E00 ;                   cos
+    orr r3, r3, #0x0000007B ; cospi8sqrt2minus1
+    mov r4, #0x00008A00 ;                       sin
+    orr r4, r4, #0x0000008C ; sinpi8sqrt2
+    mov r5, #0x2    ; i=2                           i
+loop1_dual
+    ldr r6, [r0, #(4*2)]    ; i5 | i4                               5|4
+    ldr r12, [r0, #(12*2)]  ; i13 | i12                                                     13|12
+    ldr r14, [r0, #(8*2)]   ; i9 | i8                                                               9|8
+
+    smulwt  r9, r3, r6  ; (ip[5] * cospi8sqrt2minus1) >> 16                                         5c
+    smulwb  r7, r3, r6  ; (ip[4] * cospi8sqrt2minus1) >> 16                                 4c
+    smulwt  r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16                                               5s
+    smulwb  r8, r4, r6  ; (ip[4] * sinpi8sqrt2) >> 16                                       4s
+    pkhbt   r7, r7, r9, lsl #16 ; 5c | 4c
+    smulwt  r11, r3, r12    ; (ip[13] * cospi8sqrt2minus1) >> 16                                                    13c
+    pkhbt   r8, r8, r10, lsl #16    ; 5s | 4s
+    uadd16  r6, r6, r7  ; 5c+5 | 4c+4
+    smulwt  r7, r4, r12 ; (ip[13] * sinpi8sqrt2) >> 16                                  13s
+    smulwb  r9, r3, r12 ; (ip[12] * cospi8sqrt2minus1) >> 16                                            12c
+    smulwb  r10, r4, r12    ; (ip[12] * sinpi8sqrt2) >> 16                                              12s
+    subs    r5, r5, #0x1    ; i--                           --
+    pkhbt   r9, r9, r11, lsl #16    ; 13c | 12c
+    ldr r11, [r0], #0x4 ; i1 | i0       ++                                          1|0
+    pkhbt   r10, r10, r7, lsl #16   ; 13s | 12s
+    uadd16  r7, r12, r9 ; 13c+13 | 12c+12
+    usub16  r7, r8, r7  ; c                                 c
+    uadd16  r6, r6, r10 ; d                             d
+    uadd16  r10, r11, r14   ; a                                             a
+    usub16  r8, r11, r14    ; b                                     b
+    uadd16  r9, r10, r6 ; a+d                                           a+d
+    usub16  r10, r10, r6    ; a-d                                               a-d
+    uadd16  r6, r8, r7  ; b+c                               b+c
+    usub16  r7, r8, r7  ; b-c                                   b-c
+    str r6, [r1, r2]    ; o5 | o4
+    add r6, r2, r2  ; pitch * 2                             p2
+    str r7, [r1, r6]    ; o9 | o8
+    add r6,  r6, r2 ; pitch * 3                             p3
+    str r10, [r1, r6]   ; o13 | o12
+    str r9, [r1], #0x4  ; o1 | o0           ++
+    bne loop1_dual  ;
+    mov r5, #0x2    ; i=2                           i
+    sub r0, r1, #8  ; reset input/output        i/o
+loop2_dual
+    ldr r6, [r0, r2]    ; i5 | i4                               5|4
+    ldr r1, [r0]    ; i1 | i0           1|0
+    ldr r12, [r0, #0x4] ; i3 | i2                                                       3|2
+    add r14, r2, #0x4   ; pitch + 2                                                             p+2
+    ldr r14, [r0, r14]  ; i7 | i6                                                               7|6
+    smulwt  r9, r3, r6  ; (ip[5] * cospi8sqrt2minus1) >> 16                                         5c
+    smulwt  r7, r3, r1  ; (ip[1] * cospi8sqrt2minus1) >> 16                                 1c
+    smulwt  r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16                                               5s
+    smulwt  r8, r4, r1  ; (ip[1] * sinpi8sqrt2) >> 16                                       1s
+    pkhbt   r11, r6, r1, lsl #16    ; i0 | i4                                                   0|4
+    pkhbt   r7, r9, r7, lsl #16 ; 1c | 5c
+    pkhbt   r8, r10, r8, lsl #16    ; 1s | 5s = temp1                                      tc1
+    pkhtb   r1, r1, r6, asr #16 ; i1 | i5           1|5
+    uadd16  r1, r7, r1  ; 1c+1 | 5c+5 = temp2 (d)           td2
+    pkhbt   r9, r14, r12, lsl #16   ; i2 | i6                                           2|6
+    uadd16  r10, r11, r9    ; a                                             a
+    usub16  r9, r11, r9 ; b                                         b
+    pkhtb   r6, r12, r14, asr #16   ; i3 | i7                               3|7
+    subs    r5, r5, #0x1    ; i--                           --
+    smulwt  r7, r3, r6  ; (ip[3] * cospi8sqrt2minus1) >> 16                                 3c
+    smulwt  r11, r4, r6 ; (ip[3] * sinpi8sqrt2) >> 16                                                   3s
+    smulwb  r12, r3, r6 ; (ip[7] * cospi8sqrt2minus1) >> 16                                                     7c
+    smulwb  r14, r4, r6 ; (ip[7] * sinpi8sqrt2) >> 16                                                               7s
+
+    pkhbt   r7, r12, r7, lsl #16    ; 3c | 7c
+    pkhbt   r11, r14, r11, lsl #16  ; 3s | 7s = temp1 (d)                                                   td1
+    uadd16  r6, r7, r6  ; 3c+3 | 7c+7 = temp2  (c)                              tc2
+    usub16  r12, r8, r6 ; c (o1 | o5)                                                       c
+    uadd16  r6, r11, r1 ; d (o3 | o7)                               d
+    uadd16  r7, r10, r6 ; a+d                                   a+d
+    mov r8, #0x4    ; set up 4's                                        4
+    orr r8, r8, #0x40000    ;                                       4|4
+    usub16  r6, r10, r6 ; a-d                               a-d
+    uadd16  r6, r6, r8  ; a-d+4                             3|7
+    uadd16  r7, r7, r8  ; a+d+4                                 0|4
+    uadd16  r10, r9, r12    ; b+c                                               b+c
+    usub16  r1, r9, r12 ; b-c           b-c
+    uadd16  r10, r10, r8    ; b+c+4                                             1|5
+    uadd16  r1, r1, r8  ; b-c+4         2|6
+    mov r8, r10, asr #19    ; o1 >> 3
+    strh    r8, [r0, #2]    ; o1
+    mov r8, r1, asr #19 ; o2 >> 3
+    strh    r8, [r0, #4]    ; o2
+    mov r8, r6, asr #19 ; o3 >> 3
+    strh    r8, [r0, #6]    ; o3
+    mov r8, r7, asr #19 ; o0 >> 3
+    strh    r8, [r0], r2    ; o0        +p
+    sxth    r10, r10    ;
+    mov r8, r10, asr #3 ; o5 >> 3
+    strh    r8, [r0, #2]    ; o5
+    sxth    r1, r1  ;
+    mov r8, r1, asr #3  ; o6 >> 3
+    strh    r8, [r0, #4]    ; o6
+    sxth    r6, r6  ;
+    mov r8, r6, asr #3  ; o7 >> 3
+    strh    r8, [r0, #6]    ; o7
+    sxth    r7, r7  ;
+    mov r8, r7, asr #3  ; o4 >> 3
+    strh    r8, [r0], r2    ; o4        +p
+;;;;;   subs    r5, r5, #0x1    ; i--                           --
+    bne loop2_dual  ;
+            ;
+    ldmia   sp!, {r4 - r11, pc} ; replace vars, return                      restore
+    ENDP
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/iwalsh_v6.asm
@@ -0,0 +1,152 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+    EXPORT |vp8_short_inv_walsh4x4_v6|
+    EXPORT |vp8_short_inv_walsh4x4_1_v6|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+
+;short vp8_short_inv_walsh4x4_v6(short *input, short *output)
+|vp8_short_inv_walsh4x4_v6| PROC
+
+    stmdb       sp!, {r4 - r11, lr}
+
+    ldr         r2, [r0], #4         ; [1  |  0]
+    ldr         r3, [r0], #4         ; [3  |  2]
+    ldr         r4, [r0], #4         ; [5  |  4]
+    ldr         r5, [r0], #4         ; [7  |  6]
+    ldr         r6, [r0], #4         ; [9  |  8]
+    ldr         r7, [r0], #4         ; [11 | 10]
+    ldr         r8, [r0], #4         ; [13 | 12]
+    ldr         r9, [r0]             ; [15 | 14]
+
+    qadd16      r10, r2, r8          ; a1 [1+13  |  0+12]
+    qadd16      r11, r4, r6          ; b1 [5+9   |  4+8]
+    qsub16      r12, r4, r6          ; c1 [5-9   |  4-8]
+    qsub16      lr, r2, r8           ; d1 [1-13  |  0-12]
+
+    qadd16      r2, r10, r11         ; a1 + b1 [1  |  0]
+    qadd16      r4, r12, lr          ; c1 + d1 [5  |  4]
+    qsub16      r6, r10, r11         ; a1 - b1 [9  |  8]
+    qsub16      r8, lr, r12          ; d1 - c1 [13 | 12]
+
+    qadd16      r10, r3, r9          ; a1 [3+15  |  2+14]
+    qadd16      r11, r5, r7          ; b1 [7+11  |  6+10]
+    qsub16      r12, r5, r7          ; c1 [7-11  |  6-10]
+    qsub16      lr, r3, r9           ; d1 [3-15  |  2-14]
+
+    qadd16      r3, r10, r11         ; a1 + b1 [3  |  2]
+    qadd16      r5, r12, lr          ; c1 + d1 [7  |  6]
+    qsub16      r7, r10, r11         ; a1 - b1 [11 | 10]
+    qsub16      r9, lr, r12          ; d1 - c1 [15 | 14]
+
+    ; first transform complete
+
+    qsubaddx    r10, r2, r3          ; [c1|a1] [1-2   |   0+3]
+    qaddsubx    r11, r2, r3          ; [b1|d1] [1+2   |   0-3]
+    qsubaddx    r12, r4, r5          ; [c1|a1] [5-6   |   4+7]
+    qaddsubx    lr, r4, r5           ; [b1|d1] [5+6   |   4-7]
+
+    qaddsubx    r2, r10, r11         ; [b2|c2] [c1+d1 | a1-b1]
+    qaddsubx    r3, r11, r10         ; [a2|d2] [b1+a1 | d1-c1]
+    ldr         r10, c0x00030003
+    qaddsubx    r4, r12, lr          ; [b2|c2] [c1+d1 | a1-b1]
+    qaddsubx    r5, lr, r12          ; [a2|d2] [b1+a1 | d1-c1]
+
+    qadd16      r2, r2, r10          ; [b2+3|c2+3]
+    qadd16      r3, r3, r10          ; [a2+3|d2+3]
+    qadd16      r4, r4, r10          ; [b2+3|c2+3]
+    qadd16      r5, r5, r10          ; [a2+3|d2+3]
+
+    asr         r12, r2, #3          ; [1  |  x]
+    pkhtb       r12, r12, r3, asr #19; [1  |  0]
+    lsl         lr, r3, #16          ; [~3 |  x]
+    lsl         r2, r2, #16          ; [~2 |  x]
+    asr         lr, lr, #3           ; [3  |  x]
+    pkhtb       lr, lr, r2, asr #19  ; [3  |  2]
+
+    asr         r2, r4, #3           ; [5  |  x]
+    pkhtb       r2, r2, r5, asr #19  ; [5  |  4]
+    lsl         r3, r5, #16          ; [~7 |  x]
+    lsl         r4, r4, #16          ; [~6 |  x]
+    asr         r3, r3, #3           ; [7  |  x]
+    pkhtb       r3, r3, r4, asr #19  ; [7  |  6]
+
+    str         r12, [r1], #4
+    str         lr, [r1], #4
+    str         r2, [r1], #4
+    str         r3, [r1], #4
+
+    qsubaddx    r2, r6, r7           ; [c1|a1] [9-10  |  8+11]
+    qaddsubx    r3, r6, r7           ; [b1|d1] [9+10  |  8-11]
+    qsubaddx    r4, r8, r9           ; [c1|a1] [13-14 | 12+15]
+    qaddsubx    r5, r8, r9           ; [b1|d1] [13+14 | 12-15]
+
+    qaddsubx    r6, r2, r3           ; [b2|c2] [c1+d1 | a1-b1]
+    qaddsubx    r7, r3, r2           ; [a2|d2] [b1+a1 | d1-c1]
+    qaddsubx    r8, r4, r5           ; [b2|c2] [c1+d1 | a1-b1]
+    qaddsubx    r9, r5, r4           ; [a2|d2] [b1+a1 | d1-c1]
+
+    qadd16      r6, r6, r10          ; [b2+3|c2+3]
+    qadd16      r7, r7, r10          ; [a2+3|d2+3]
+    qadd16      r8, r8, r10          ; [b2+3|c2+3]
+    qadd16      r9, r9, r10          ; [a2+3|d2+3]
+
+    asr         r2, r6, #3           ; [9  |  x]
+    pkhtb       r2, r2, r7, asr #19  ; [9  |  8]
+    lsl         r3, r7, #16          ; [~11|  x]
+    lsl         r4, r6, #16          ; [~10|  x]
+    asr         r3, r3, #3           ; [11 |  x]
+    pkhtb       r3, r3, r4, asr #19  ; [11 | 10]
+
+    asr         r4, r8, #3           ; [13 |  x]
+    pkhtb       r4, r4, r9, asr #19  ; [13 | 12]
+    lsl         r5, r9, #16          ; [~15|  x]
+    lsl         r6, r8, #16          ; [~14|  x]
+    asr         r5, r5, #3           ; [15 |  x]
+    pkhtb       r5, r5, r6, asr #19  ; [15 | 14]
+
+    str         r2, [r1], #4
+    str         r3, [r1], #4
+    str         r4, [r1], #4
+    str         r5, [r1]
+
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_short_inv_walsh4x4_v6|
+
+
+;short vp8_short_inv_walsh4x4_1_v6(short *input, short *output)
+|vp8_short_inv_walsh4x4_1_v6| PROC
+
+    ldrsh       r2, [r0]             ; [0]
+    add         r2, r2, #3           ; [0] + 3
+    asr         r2, r2, #3           ; a1 ([0]+3) >> 3
+    lsl         r2, r2, #16          ; [a1 |  x]
+    orr         r2, r2, r2, lsr #16  ; [a1 | a1]
+
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1], #4
+    str         r2, [r1]
+
+    bx          lr
+    ENDP        ; |vp8_short_inv_walsh4x4_1_v6|
+
+; Constant Pool
+c0x00030003 DCD 0x00030003
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/loopfilter_v6.asm
@@ -0,0 +1,1264 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT |vp8_loop_filter_horizontal_edge_armv6|
+    EXPORT |vp8_mbloop_filter_horizontal_edge_armv6|
+    EXPORT |vp8_loop_filter_vertical_edge_armv6|
+    EXPORT |vp8_mbloop_filter_vertical_edge_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+
+    MACRO
+    TRANSPOSE_MATRIX $a0, $a1, $a2, $a3, $b0, $b1, $b2, $b3
+    ; input: $a0, $a1, $a2, $a3; output: $b0, $b1, $b2, $b3
+    ; a0: 03 02 01 00
+    ; a1: 13 12 11 10
+    ; a2: 23 22 21 20
+    ; a3: 33 32 31 30
+    ;     b3 b2 b1 b0
+
+    uxtb16      $b1, $a1                    ; xx 12 xx 10
+    uxtb16      $b0, $a0                    ; xx 02 xx 00
+    uxtb16      $b3, $a3                    ; xx 32 xx 30
+    uxtb16      $b2, $a2                    ; xx 22 xx 20
+    orr         $b1, $b0, $b1, lsl #8       ; 12 02 10 00
+    orr         $b3, $b2, $b3, lsl #8       ; 32 22 30 20
+
+    uxtb16      $a1, $a1, ror #8            ; xx 13 xx 11
+    uxtb16      $a3, $a3, ror #8            ; xx 33 xx 31
+    uxtb16      $a0, $a0, ror #8            ; xx 03 xx 01
+    uxtb16      $a2, $a2, ror #8            ; xx 23 xx 21
+    orr         $a0, $a0, $a1, lsl #8       ; 13 03 11 01
+    orr         $a2, $a2, $a3, lsl #8       ; 33 23 31 21
+
+    pkhtb       $b2, $b3, $b1, asr #16      ; 32 22 12 02   -- p1
+    pkhbt       $b0, $b1, $b3, lsl #16      ; 30 20 10 00   -- p3
+
+    pkhtb       $b3, $a2, $a0, asr #16      ; 33 23 13 03   -- p0
+    pkhbt       $b1, $a0, $a2, lsl #16      ; 31 21 11 01   -- p2
+    MEND
+
+
+src         RN  r0
+pstep       RN  r1
+count       RN  r5
+
+;r0     unsigned char *src_ptr,
+;r1     int src_pixel_step,
+;r2     const char *flimit,
+;r3     const char *limit,
+;stack  const char *thresh,
+;stack  int  count
+
+;Note: All 16 elements in flimit are equal. So, in the code, only one load is needed
+;for flimit. Same way applies to limit and thresh.
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_loop_filter_horizontal_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+    ldr         count, [sp, #40]            ; count for 8-in-parallel
+    ldr         r6, [sp, #36]               ; load thresh address
+    sub         sp, sp, #16                 ; create temp buffer
+
+    ldr         r9, [src], pstep            ; p3
+    ldr         r4, [r2], #4                ; flimit
+    ldr         r10, [src], pstep           ; p2
+    ldr         r2, [r3], #4                ; limit
+    ldr         r11, [src], pstep           ; p1
+    uadd8       r4, r4, r4                  ; flimit * 2
+    ldr         r3, [r6], #4                ; thresh
+    mov         count, count, lsl #1        ; 4-in-parallel
+    uadd8       r4, r4, r2                  ; flimit * 2 + limit
+
+|Hnext8|
+    ; vp8_filter_mask() function
+    ; calculate breakout conditions
+    ldr         r12, [src], pstep           ; p0
+
+    uqsub8      r6, r9, r10                 ; p3 - p2
+    uqsub8      r7, r10, r9                 ; p2 - p3
+    uqsub8      r8, r10, r11                ; p2 - p1
+    uqsub8      r10, r11, r10               ; p1 - p2
+
+    orr         r6, r6, r7                  ; abs (p3-p2)
+    orr         r8, r8, r10                 ; abs (p2-p1)
+    uqsub8      lr, r6, r2                  ; compare to limit. lr: vp8_filter_mask
+    uqsub8      r8, r8, r2                  ; compare to limit
+    uqsub8      r6, r11, r12                ; p1 - p0
+    orr         lr, lr, r8
+    uqsub8      r7, r12, r11                ; p0 - p1
+    ldr         r9, [src], pstep            ; q0
+    ldr         r10, [src], pstep           ; q1
+    orr         r6, r6, r7                  ; abs (p1-p0)
+    uqsub8      r7, r6, r2                  ; compare to limit
+    uqsub8      r8, r6, r3                  ; compare to thresh  -- save r8 for later
+    orr         lr, lr, r7
+
+    uqsub8      r6, r11, r10                ; p1 - q1
+    uqsub8      r7, r10, r11                ; q1 - p1
+    uqsub8      r11, r12, r9                ; p0 - q0
+    uqsub8      r12, r9, r12                ; q0 - p0
+    orr         r6, r6, r7                  ; abs (p1-q1)
+    ldr         r7, c0x7F7F7F7F
+    orr         r12, r11, r12               ; abs (p0-q0)
+    ldr         r11, [src], pstep           ; q2
+    uqadd8      r12, r12, r12               ; abs (p0-q0) * 2
+    and         r6, r7, r6, lsr #1          ; abs (p1-q1) / 2
+    uqsub8      r7, r9, r10                 ; q0 - q1
+    uqadd8      r12, r12, r6                ; abs (p0-q0)*2 + abs (p1-q1)/2
+    uqsub8      r6, r10, r9                 ; q1 - q0
+    uqsub8      r12, r12, r4                ; compare to flimit
+    uqsub8      r9, r11, r10                ; q2 - q1
+
+    orr         lr, lr, r12
+
+    ldr         r12, [src], pstep           ; q3
+    uqsub8      r10, r10, r11               ; q1 - q2
+    orr         r6, r7, r6                  ; abs (q1-q0)
+    orr         r10, r9, r10                ; abs (q2-q1)
+    uqsub8      r7, r6, r2                  ; compare to limit
+    uqsub8      r10, r10, r2                ; compare to limit
+    uqsub8      r6, r6, r3                  ; compare to thresh -- save r6 for later
+    orr         lr, lr, r7
+    orr         lr, lr, r10
+
+    uqsub8      r10, r12, r11               ; q3 - q2
+    uqsub8      r9, r11, r12                ; q2 - q3
+
+    mvn         r11, #0                     ; r11 == -1
+
+    orr         r10, r10, r9                ; abs (q3-q2)
+    uqsub8      r10, r10, r2                ; compare to limit
+
+    mov         r12, #0
+    orr         lr, lr, r10
+    sub         src, src, pstep, lsl #2
+
+    usub8       lr, r12, lr                 ; use usub8 instead of ssub8
+    sel         lr, r11, r12                ; filter mask: lr
+
+    cmp         lr, #0
+    beq         hskip_filter                 ; skip filtering
+
+    sub         src, src, pstep, lsl #1     ; move src pointer down by 6 lines
+
+    ;vp8_hevmask() function
+    ;calculate high edge variance
+    orr         r10, r6, r8                 ; calculate vp8_hevmask
+
+    ldr         r7, [src], pstep            ; p1
+
+    usub8       r10, r12, r10               ; use usub8 instead of ssub8
+    sel         r6, r12, r11                ; obtain vp8_hevmask: r6
+
+    ;vp8_filter() function
+    ldr         r8, [src], pstep            ; p0
+    ldr         r12, c0x80808080
+    ldr         r9, [src], pstep            ; q0
+    ldr         r10, [src], pstep           ; q1
+
+    eor         r7, r7, r12                 ; p1 offset to convert to a signed value
+    eor         r8, r8, r12                 ; p0 offset to convert to a signed value
+    eor         r9, r9, r12                 ; q0 offset to convert to a signed value
+    eor         r10, r10, r12               ; q1 offset to convert to a signed value
+
+    str         r9, [sp]                    ; store qs0 temporarily
+    str         r8, [sp, #4]                ; store ps0 temporarily
+    str         r10, [sp, #8]               ; store qs1 temporarily
+    str         r7, [sp, #12]               ; store ps1 temporarily
+
+    qsub8       r7, r7, r10                 ; vp8_signed_char_clamp(ps1-qs1)
+    qsub8       r8, r9, r8                  ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+
+    and         r7, r7, r6                  ; vp8_filter (r7) &= hev
+
+    qadd8       r7, r7, r8
+    ldr         r9, c0x03030303             ; r9 = 3 --modified for vp8
+
+    qadd8       r7, r7, r8
+    ldr         r10, c0x04040404
+
+    qadd8       r7, r7, r8
+    and         r7, r7, lr                  ; vp8_filter &= mask;
+
+    ;modify code for vp8 -- Filter1 = vp8_filter (r7)
+    qadd8       r8 , r7 , r9                ; Filter2 (r8) = vp8_signed_char_clamp(vp8_filter+3)
+    qadd8       r7 , r7 , r10               ; vp8_filter = vp8_signed_char_clamp(vp8_filter+4)
+
+    mov         r9, #0
+    shadd8      r8 , r8 , r9                ; Filter2 >>= 3
+    shadd8      r7 , r7 , r9                ; vp8_filter >>= 3
+    shadd8      r8 , r8 , r9
+    shadd8      r7 , r7 , r9
+    shadd8      lr , r8 , r9                ; lr: Filter2
+    shadd8      r7 , r7 , r9                ; r7: filter
+
+    ;usub8      lr, r8, r10                 ; s = (s==4)*-1
+    ;sel        lr, r11, r9
+    ;usub8      r8, r10, r8
+    ;sel        r8, r11, r9
+    ;and        r8, r8, lr                  ; -1 for each element that equals 4
+
+    ;calculate output
+    ;qadd8      lr, r8, r7                  ; u = vp8_signed_char_clamp(s + vp8_filter)
+
+    ldr         r8, [sp]                    ; load qs0
+    ldr         r9, [sp, #4]                ; load ps0
+
+    ldr         r10, c0x01010101
+
+    qsub8       r8 ,r8, r7                  ; u = vp8_signed_char_clamp(qs0 - vp8_filter)
+    qadd8       r9, r9, lr                  ; u = vp8_signed_char_clamp(ps0 + Filter2)
+
+    ;end of modification for vp8
+
+    mov         lr, #0
+    sadd8       r7, r7 , r10                ; vp8_filter += 1
+    shadd8      r7, r7, lr                  ; vp8_filter >>= 1
+
+    ldr         r11, [sp, #12]              ; load ps1
+    ldr         r10, [sp, #8]               ; load qs1
+
+    bic         r7, r7, r6                  ; vp8_filter &= ~hev
+    sub         src, src, pstep, lsl #2
+
+    qadd8       r11, r11, r7                ; u = vp8_signed_char_clamp(ps1 + vp8_filter)
+    qsub8       r10, r10,r7                 ; u = vp8_signed_char_clamp(qs1 - vp8_filter)
+
+    eor         r11, r11, r12               ; *op1 = u^0x80
+    str         r11, [src], pstep           ; store op1
+    eor         r9, r9, r12                 ; *op0 = u^0x80
+    str         r9, [src], pstep            ; store op0 result
+    eor         r8, r8, r12                 ; *oq0 = u^0x80
+    str         r8, [src], pstep            ; store oq0 result
+    eor         r10, r10, r12               ; *oq1 = u^0x80
+    str         r10, [src], pstep           ; store oq1
+
+    sub         src, src, pstep, lsl #1
+
+|hskip_filter|
+    add         src, src, #4
+    sub         src, src, pstep, lsl #2
+
+    subs        count, count, #1
+
+    ;pld            [src]
+    ;pld            [src, pstep]
+    ;pld            [src, pstep, lsl #1]
+    ;pld            [src, pstep, lsl #2]
+    ;pld            [src, pstep, lsl #3]
+
+    ldrne       r9, [src], pstep            ; p3
+    ldrne       r10, [src], pstep           ; p2
+    ldrne       r11, [src], pstep           ; p1
+
+    bne         Hnext8
+
+    add         sp, sp, #16
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_loop_filter_horizontal_edge_armv6|
+
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_mbloop_filter_horizontal_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+    ldr         count, [sp, #40]            ; count for 8-in-parallel
+    ldr         r6, [sp, #36]               ; load thresh address
+    sub         sp, sp, #16                 ; create temp buffer
+
+    ldr         r9, [src], pstep            ; p3
+    ldr         r4, [r2], #4                ; flimit
+    ldr         r10, [src], pstep           ; p2
+    ldr         r2, [r3], #4                ; limit
+    ldr         r11, [src], pstep           ; p1
+    uadd8       r4, r4, r4                  ; flimit * 2
+    ldr         r3, [r6], #4                ; thresh
+    mov         count, count, lsl #1        ; 4-in-parallel
+    uadd8       r4, r4, r2                  ; flimit * 2 + limit
+
+|MBHnext8|
+
+    ; vp8_filter_mask() function
+    ; calculate breakout conditions
+    ldr         r12, [src], pstep           ; p0
+
+    uqsub8      r6, r9, r10                 ; p3 - p2
+    uqsub8      r7, r10, r9                 ; p2 - p3
+    uqsub8      r8, r10, r11                ; p2 - p1
+    uqsub8      r10, r11, r10               ; p1 - p2
+
+    orr         r6, r6, r7                  ; abs (p3-p2)
+    orr         r8, r8, r10                 ; abs (p2-p1)
+    uqsub8      lr, r6, r2                  ; compare to limit. lr: vp8_filter_mask
+    uqsub8      r8, r8, r2                  ; compare to limit
+
+    uqsub8      r6, r11, r12                ; p1 - p0
+    orr         lr, lr, r8
+    uqsub8      r7, r12, r11                ; p0 - p1
+    ldr         r9, [src], pstep            ; q0
+    ldr         r10, [src], pstep           ; q1
+    orr         r6, r6, r7                  ; abs (p1-p0)
+    uqsub8      r7, r6, r2                  ; compare to limit
+    uqsub8      r8, r6, r3                  ; compare to thresh  -- save r8 for later
+    orr         lr, lr, r7
+
+    uqsub8      r6, r11, r10                ; p1 - q1
+    uqsub8      r7, r10, r11                ; q1 - p1
+    uqsub8      r11, r12, r9                ; p0 - q0
+    uqsub8      r12, r9, r12                ; q0 - p0
+    orr         r6, r6, r7                  ; abs (p1-q1)
+    ldr         r7, c0x7F7F7F7F
+    orr         r12, r11, r12               ; abs (p0-q0)
+    ldr         r11, [src], pstep           ; q2
+    uqadd8      r12, r12, r12               ; abs (p0-q0) * 2
+    and         r6, r7, r6, lsr #1          ; abs (p1-q1) / 2
+    uqsub8      r7, r9, r10                 ; q0 - q1
+    uqadd8      r12, r12, r6                ; abs (p0-q0)*2 + abs (p1-q1)/2
+    uqsub8      r6, r10, r9                 ; q1 - q0
+    uqsub8      r12, r12, r4                ; compare to flimit
+    uqsub8      r9, r11, r10                ; q2 - q1
+
+    orr         lr, lr, r12
+
+    ldr         r12, [src], pstep           ; q3
+
+    uqsub8      r10, r10, r11               ; q1 - q2
+    orr         r6, r7, r6                  ; abs (q1-q0)
+    orr         r10, r9, r10                ; abs (q2-q1)
+    uqsub8      r7, r6, r2                  ; compare to limit
+    uqsub8      r10, r10, r2                ; compare to limit
+    uqsub8      r6, r6, r3                  ; compare to thresh -- save r6 for later
+    orr         lr, lr, r7
+    orr         lr, lr, r10
+
+    uqsub8      r10, r12, r11               ; q3 - q2
+    uqsub8      r9, r11, r12                ; q2 - q3
+
+    mvn         r11, #0                     ; r11 == -1
+
+    orr         r10, r10, r9                ; abs (q3-q2)
+    uqsub8      r10, r10, r2                ; compare to limit
+
+    mov         r12, #0
+
+    orr         lr, lr, r10
+
+    usub8       lr, r12, lr                 ; use usub8 instead of ssub8
+    sel         lr, r11, r12                ; filter mask: lr
+
+    cmp         lr, #0
+    beq         mbhskip_filter               ; skip filtering
+
+    ;vp8_hevmask() function
+    ;calculate high edge variance
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 6 lines
+    sub         src, src, pstep, lsl #1
+
+    orr         r10, r6, r8
+    ldr         r7, [src], pstep            ; p1
+
+    usub8       r10, r12, r10
+    sel         r6, r12, r11                ; hev mask: r6
+
+    ;vp8_mbfilter() function
+    ;p2, q2 are only needed at the end. Don't need to load them in now.
+    ldr         r8, [src], pstep            ; p0
+    ldr         r12, c0x80808080
+    ldr         r9, [src], pstep            ; q0
+    ldr         r10, [src]                  ; q1
+
+    eor         r7, r7, r12                 ; ps1
+    eor         r8, r8, r12                 ; ps0
+    eor         r9, r9, r12                 ; qs0
+    eor         r10, r10, r12               ; qs1
+
+    qsub8       r12, r9, r8                 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+    str         r7, [sp, #12]               ; store ps1 temporarily
+    qsub8       r7, r7, r10                 ; vp8_signed_char_clamp(ps1-qs1)
+    str         r10, [sp, #8]               ; store qs1 temporarily
+    qadd8       r7, r7, r12
+    str         r9, [sp]                    ; store qs0 temporarily
+    qadd8       r7, r7, r12
+    str         r8, [sp, #4]                ; store ps0 temporarily
+    qadd8       r7, r7, r12                 ; vp8_filter: r7
+
+    ldr         r10, c0x03030303            ; r10 = 3 --modified for vp8
+    ldr         r9, c0x04040404
+
+    and         r7, r7, lr                  ; vp8_filter &= mask (lr is free)
+
+    mov         r12, r7                     ; Filter2: r12
+    and         r12, r12, r6                ; Filter2 &= hev
+
+    ;modify code for vp8
+    ;save bottom 3 bits so that we round one side +4 and the other +3
+    qadd8       r8 , r12 , r9               ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4)
+    qadd8       r12 , r12 , r10             ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3)
+
+    mov         r10, #0
+    shadd8      r8 , r8 , r10               ; Filter1 >>= 3
+    shadd8      r12 , r12 , r10             ; Filter2 >>= 3
+    shadd8      r8 , r8 , r10
+    shadd8      r12 , r12 , r10
+    shadd8      r8 , r8 , r10               ; r8: Filter1
+    shadd8      r12 , r12 , r10             ; r12: Filter2
+
+    ldr         r9, [sp]                    ; load qs0
+    ldr         r11, [sp, #4]               ; load ps0
+
+    qsub8       r9 , r9, r8                 ; qs0 = vp8_signed_char_clamp(qs0 - Filter1)
+    qadd8       r11, r11, r12               ; ps0 = vp8_signed_char_clamp(ps0 + Filter2)
+
+    ;save bottom 3 bits so that we round one side +4 and the other +3
+    ;and            r8, r12, r10                ; s = Filter2 & 7 (s: r8)
+    ;qadd8      r12 , r12 , r9              ; Filter2 = vp8_signed_char_clamp(Filter2+4)
+    ;mov            r10, #0
+    ;shadd8     r12 , r12 , r10             ; Filter2 >>= 3
+    ;usub8      lr, r8, r9                  ; s = (s==4)*-1
+    ;sel            lr, r11, r10
+    ;shadd8     r12 , r12 , r10
+    ;usub8      r8, r9, r8
+    ;sel            r8, r11, r10
+    ;ldr            r9, [sp]                    ; load qs0
+    ;ldr            r11, [sp, #4]               ; load ps0
+    ;shadd8     r12 , r12 , r10
+    ;and            r8, r8, lr                  ; -1 for each element that equals 4
+    ;qadd8      r10, r8, r12                ; u = vp8_signed_char_clamp(s + Filter2)
+    ;qsub8      r9 , r9, r12                ; qs0 = vp8_signed_char_clamp(qs0 - Filter2)
+    ;qadd8      r11, r11, r10               ; ps0 = vp8_signed_char_clamp(ps0 + u)
+
+    ;end of modification for vp8
+
+    bic         r12, r7, r6                 ; vp8_filter &= ~hev    ( r6 is free)
+    ;mov        r12, r7
+
+    ;roughly 3/7th difference across boundary
+    mov         lr, #0x1b                   ; 27
+    mov         r7, #0x3f                   ; 63
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r7, r10, lr, r7
+    smultb      r10, r10, lr
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    add         r10, r10, #63
+    ssat        r7, #8, r7, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    ldr         lr, c0x80808080
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r7, r10, lsl #16
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    sub         src, src, pstep
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
+
+    qsub8       r8, r9, r10                 ; s = vp8_signed_char_clamp(qs0 - u)
+    qadd8       r10, r11, r10               ; s = vp8_signed_char_clamp(ps0 + u)
+    eor         r8, r8, lr                  ; *oq0 = s^0x80
+    str         r8, [src]                   ; store *oq0
+    sub         src, src, pstep
+    eor         r10, r10, lr                ; *op0 = s^0x80
+    str         r10, [src]                  ; store *op0
+
+    ;roughly 2/7th difference across boundary
+    mov         lr, #0x12                   ; 18
+    mov         r7, #0x3f                   ; 63
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r9, r10, lr, r7
+    smlatb      r10, r10, lr, r7
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    ssat        r9, #8, r9, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    ldr         lr, c0x80808080
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r9, r10, lsl #16
+
+    ldr         r9, [sp, #8]                ; load qs1
+    ldr         r11, [sp, #12]              ; load ps1
+
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    sub         src, src, pstep
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 18)>>7)
+
+    qadd8       r11, r11, r10               ; s = vp8_signed_char_clamp(ps1 + u)
+    qsub8       r8, r9, r10                 ; s = vp8_signed_char_clamp(qs1 - u)
+    eor         r11, r11, lr                ; *op1 = s^0x80
+    str         r11, [src], pstep           ; store *op1
+    eor         r8, r8, lr                  ; *oq1 = s^0x80
+    add         src, src, pstep, lsl #1
+
+    mov         r7, #0x3f                   ; 63
+
+    str         r8, [src], pstep            ; store *oq1
+
+    ;roughly 1/7th difference across boundary
+    mov         lr, #0x9                    ; 9
+    ldr         r9, [src]                   ; load q2
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r12, r10, lr, r7
+    smlatb      r10, r10, lr, r7
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    ssat        r12, #8, r12, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    sub         src, src, pstep, lsl #2
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r12, r10, lsl #16
+
+    sub         src, src, pstep
+    ldr         lr, c0x80808080
+
+    ldr         r11, [src]                  ; load p2
+
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    eor         r9, r9, lr
+    eor         r11, r11, lr
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 9)>>7)
+
+    qadd8       r8, r11, r10                ; s = vp8_signed_char_clamp(ps2 + u)
+    qsub8       r10, r9, r10                ; s = vp8_signed_char_clamp(qs2 - u)
+    eor         r8, r8, lr                  ; *op2 = s^0x80
+    str         r8, [src], pstep, lsl #2    ; store *op2
+    add         src, src, pstep
+    eor         r10, r10, lr                ; *oq2 = s^0x80
+    str         r10, [src], pstep, lsl #1   ; store *oq2
+
+|mbhskip_filter|
+    add         src, src, #4
+    sub         src, src, pstep, lsl #3
+    subs        count, count, #1
+
+    ldrne       r9, [src], pstep            ; p3
+    ldrne       r10, [src], pstep           ; p2
+    ldrne       r11, [src], pstep           ; p1
+
+    bne         MBHnext8
+
+    add         sp, sp, #16
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_mbloop_filter_horizontal_edge_armv6|
+
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_loop_filter_vertical_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    sub         src, src, #4                ; move src pointer down by 4
+    ldr         count, [sp, #40]            ; count for 8-in-parallel
+    ldr         r12, [sp, #36]              ; load thresh address
+    sub         sp, sp, #16                 ; create temp buffer
+
+    ldr         r6, [src], pstep            ; load source data
+    ldr         r4, [r2], #4                ; flimit
+    ldr         r7, [src], pstep
+    ldr         r2, [r3], #4                ; limit
+    ldr         r8, [src], pstep
+    uadd8       r4, r4, r4                  ; flimit * 2
+    ldr         r3, [r12], #4               ; thresh
+    ldr         lr, [src], pstep
+    mov         count, count, lsl #1        ; 4-in-parallel
+    uadd8       r4, r4, r2                  ; flimit * 2 + limit
+
+|Vnext8|
+
+    ; vp8_filter_mask() function
+    ; calculate breakout conditions
+    ; transpose the source data for 4-in-parallel operation
+    TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
+
+    uqsub8      r7, r9, r10                 ; p3 - p2
+    uqsub8      r8, r10, r9                 ; p2 - p3
+    uqsub8      r9, r10, r11                ; p2 - p1
+    uqsub8      r10, r11, r10               ; p1 - p2
+    orr         r7, r7, r8                  ; abs (p3-p2)
+    orr         r10, r9, r10                ; abs (p2-p1)
+    uqsub8      lr, r7, r2                  ; compare to limit. lr: vp8_filter_mask
+    uqsub8      r10, r10, r2                ; compare to limit
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    orr         lr, lr, r10
+
+    uqsub8      r6, r11, r12                ; p1 - p0
+    uqsub8      r7, r12, r11                ; p0 - p1
+    add         src, src, #4                ; move src pointer up by 4
+    orr         r6, r6, r7                  ; abs (p1-p0)
+    str         r11, [sp, #12]              ; save p1
+    uqsub8      r10, r6, r2                 ; compare to limit
+    uqsub8      r11, r6, r3                 ; compare to thresh
+    orr         lr, lr, r10
+
+    ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now
+    ; transpose the source data for 4-in-parallel operation
+    ldr         r6, [src], pstep            ; load source data
+    str         r11, [sp]                   ; push r11 to stack
+    ldr         r7, [src], pstep
+    str         r12, [sp, #4]               ; save current reg before load q0 - q3 data
+    ldr         r8, [src], pstep
+    str         lr, [sp, #8]
+    ldr         lr, [src], pstep
+
+    TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
+
+    ldr         lr, [sp, #8]                ; load back (f)limit accumulator
+
+    uqsub8      r6, r12, r11                ; q3 - q2
+    uqsub8      r7, r11, r12                ; q2 - q3
+    uqsub8      r12, r11, r10               ; q2 - q1
+    uqsub8      r11, r10, r11               ; q1 - q2
+    orr         r6, r6, r7                  ; abs (q3-q2)
+    orr         r7, r12, r11                ; abs (q2-q1)
+    uqsub8      r6, r6, r2                  ; compare to limit
+    uqsub8      r7, r7, r2                  ; compare to limit
+    ldr         r11, [sp, #4]               ; load back p0
+    ldr         r12, [sp, #12]              ; load back p1
+    orr         lr, lr, r6
+    orr         lr, lr, r7
+
+    uqsub8      r6, r11, r9                 ; p0 - q0
+    uqsub8      r7, r9, r11                 ; q0 - p0
+    uqsub8      r8, r12, r10                ; p1 - q1
+    uqsub8      r11, r10, r12               ; q1 - p1
+    orr         r6, r6, r7                  ; abs (p0-q0)
+    ldr         r7, c0x7F7F7F7F
+    orr         r8, r8, r11                 ; abs (p1-q1)
+    uqadd8      r6, r6, r6                  ; abs (p0-q0) * 2
+    and         r8, r7, r8, lsr #1          ; abs (p1-q1) / 2
+    uqsub8      r11, r10, r9                ; q1 - q0
+    uqadd8      r6, r8, r6                  ; abs (p0-q0)*2 + abs (p1-q1)/2
+    uqsub8      r12, r9, r10                ; q0 - q1
+    uqsub8      r6, r6, r4                  ; compare to flimit
+
+    orr         r9, r11, r12                ; abs (q1-q0)
+    uqsub8      r8, r9, r2                  ; compare to limit
+    uqsub8      r10, r9, r3                 ; compare to thresh
+    orr         lr, lr, r6
+    orr         lr, lr, r8
+
+    mvn         r11, #0                     ; r11 == -1
+    mov         r12, #0
+
+    usub8       lr, r12, lr
+    ldr         r9, [sp]                    ; load the compared result
+    sel         lr, r11, r12                ; filter mask: lr
+
+    cmp         lr, #0
+    beq         vskip_filter                 ; skip filtering
+
+    ;vp8_hevmask() function
+    ;calculate high edge variance
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    orr         r9, r9, r10
+
+    ldrh        r7, [src, #-2]
+    ldrh        r8, [src], pstep
+
+    usub8       r9, r12, r9
+    sel         r6, r12, r11                ; hev mask: r6
+
+    ;vp8_filter() function
+    ; load soure data to r6, r11, r12, lr
+    ldrh        r9, [src, #-2]
+    ldrh        r10, [src], pstep
+
+    pkhbt       r12, r7, r8, lsl #16
+
+    ldrh        r7, [src, #-2]
+    ldrh        r8, [src], pstep
+
+    pkhbt       r11, r9, r10, lsl #16
+
+    ldrh        r9, [src, #-2]
+    ldrh        r10, [src], pstep
+
+    ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first
+    str         r6, [sp]
+    str         lr, [sp, #4]
+
+    pkhbt       r6, r7, r8, lsl #16
+    pkhbt       lr, r9, r10, lsl #16
+
+    ;transpose r12, r11, r6, lr to r7, r8, r9, r10
+    TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10
+
+    ;load back hev_mask r6 and filter_mask lr
+    ldr         r12, c0x80808080
+    ldr         r6, [sp]
+    ldr         lr, [sp, #4]
+
+    eor         r7, r7, r12                 ; p1 offset to convert to a signed value
+    eor         r8, r8, r12                 ; p0 offset to convert to a signed value
+    eor         r9, r9, r12                 ; q0 offset to convert to a signed value
+    eor         r10, r10, r12               ; q1 offset to convert to a signed value
+
+    str         r9, [sp]                    ; store qs0 temporarily
+    str         r8, [sp, #4]                ; store ps0 temporarily
+    str         r10, [sp, #8]               ; store qs1 temporarily
+    str         r7, [sp, #12]               ; store ps1 temporarily
+
+    qsub8       r7, r7, r10                 ; vp8_signed_char_clamp(ps1-qs1)
+    qsub8       r8, r9, r8                  ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+
+    and         r7, r7, r6                  ;  vp8_filter (r7) &= hev (r7 : filter)
+
+    qadd8       r7, r7, r8
+    ldr         r9, c0x03030303             ; r9 = 3 --modified for vp8
+
+    qadd8       r7, r7, r8
+    ldr         r10, c0x04040404
+
+    qadd8       r7, r7, r8
+    ;mvn         r11, #0                     ; r11 == -1
+
+    and         r7, r7, lr                  ; vp8_filter &= mask
+
+    ;modify code for vp8 -- Filter1 = vp8_filter (r7)
+    qadd8       r8 , r7 , r9                ; Filter2 (r8) = vp8_signed_char_clamp(vp8_filter+3)
+    qadd8       r7 , r7 , r10               ; vp8_filter = vp8_signed_char_clamp(vp8_filter+4)
+
+    mov         r9, #0
+    shadd8      r8 , r8 , r9                ; Filter2 >>= 3
+    shadd8      r7 , r7 , r9                ; vp8_filter >>= 3
+    shadd8      r8 , r8 , r9
+    shadd8      r7 , r7 , r9
+    shadd8      lr , r8 , r9                ; lr: filter2
+    shadd8      r7 , r7 , r9                ; r7: filter
+
+    ;usub8      lr, r8, r10                 ; s = (s==4)*-1
+    ;sel            lr, r11, r9
+    ;usub8      r8, r10, r8
+    ;sel            r8, r11, r9
+    ;and            r8, r8, lr                  ; -1 for each element that equals 4 -- r8: s
+
+    ;calculate output
+    ;qadd8      lr, r8, r7                  ; u = vp8_signed_char_clamp(s + vp8_filter)
+
+    ldr         r8, [sp]                    ; load qs0
+    ldr         r9, [sp, #4]                ; load ps0
+
+    ldr         r10, c0x01010101
+
+    qsub8       r8, r8, r7                  ; u = vp8_signed_char_clamp(qs0 - vp8_filter)
+    qadd8       r9, r9, lr                  ; u = vp8_signed_char_clamp(ps0 + Filter2)
+    ;end of modification for vp8
+
+    eor         r8, r8, r12
+    eor         r9, r9, r12
+
+    mov         lr, #0
+
+    sadd8       r7, r7, r10
+    shadd8      r7, r7, lr
+
+    ldr         r10, [sp, #8]               ; load qs1
+    ldr         r11, [sp, #12]              ; load ps1
+
+    bic         r7, r7, r6                  ; r7: vp8_filter
+
+    qsub8       r10 , r10, r7               ; u = vp8_signed_char_clamp(qs1 - vp8_filter)
+    qadd8       r11, r11, r7                ; u = vp8_signed_char_clamp(ps1 + vp8_filter)
+    eor         r10, r10, r12
+    eor         r11, r11, r12
+
+    sub         src, src, pstep, lsl #2
+
+    ;we can use TRANSPOSE_MATRIX macro to transpose output - input: q1, q0, p0, p1
+    ;output is b0, b1, b2, b3
+    ;b0: 03 02 01 00
+    ;b1: 13 12 11 10
+    ;b2: 23 22 21 20
+    ;b3: 33 32 31 30
+    ;    p1 p0 q0 q1
+    ;   (a3 a2 a1 a0)
+    TRANSPOSE_MATRIX r11, r9, r8, r10, r6, r7, r12, lr
+
+    strh        r6, [src, #-2]              ; store the result
+    mov         r6, r6, lsr #16
+    strh        r6, [src], pstep
+
+    strh        r7, [src, #-2]
+    mov         r7, r7, lsr #16
+    strh        r7, [src], pstep
+
+    strh        r12, [src, #-2]
+    mov         r12, r12, lsr #16
+    strh        r12, [src], pstep
+
+    strh        lr, [src, #-2]
+    mov         lr, lr, lsr #16
+    strh        lr, [src], pstep
+
+|vskip_filter|
+    sub         src, src, #4
+    subs        count, count, #1
+
+    ldrne       r6, [src], pstep            ; load source data
+    ldrne       r7, [src], pstep
+    ldrne       r8, [src], pstep
+    ldrne       lr, [src], pstep
+
+    bne         Vnext8
+
+    add         sp, sp, #16
+
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_loop_filter_vertical_edge_armv6|
+
+
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_mbloop_filter_vertical_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    sub         src, src, #4                ; move src pointer down by 4
+    ldr         count, [sp, #40]            ; count for 8-in-parallel
+    ldr         r12, [sp, #36]              ; load thresh address
+    sub         sp, sp, #16                 ; create temp buffer
+
+    ldr         r6, [src], pstep            ; load source data
+    ldr         r4, [r2], #4                ; flimit
+    ldr         r7, [src], pstep
+    ldr         r2, [r3], #4                ; limit
+    ldr         r8, [src], pstep
+    uadd8       r4, r4, r4                  ; flimit * 2
+    ldr         r3, [r12], #4               ; thresh
+    ldr         lr, [src], pstep
+    mov         count, count, lsl #1        ; 4-in-parallel
+    uadd8       r4, r4, r2                  ; flimit * 2 + limit
+
+|MBVnext8|
+    ; vp8_filter_mask() function
+    ; calculate breakout conditions
+    ; transpose the source data for 4-in-parallel operation
+    TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
+
+    uqsub8      r7, r9, r10                 ; p3 - p2
+    uqsub8      r8, r10, r9                 ; p2 - p3
+    uqsub8      r9, r10, r11                ; p2 - p1
+    uqsub8      r10, r11, r10               ; p1 - p2
+    orr         r7, r7, r8                  ; abs (p3-p2)
+    orr         r10, r9, r10                ; abs (p2-p1)
+    uqsub8      lr, r7, r2                  ; compare to limit. lr: vp8_filter_mask
+    uqsub8      r10, r10, r2                ; compare to limit
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    orr         lr, lr, r10
+
+    uqsub8      r6, r11, r12                ; p1 - p0
+    uqsub8      r7, r12, r11                ; p0 - p1
+    add         src, src, #4                ; move src pointer up by 4
+    orr         r6, r6, r7                  ; abs (p1-p0)
+    str         r11, [sp, #12]              ; save p1
+    uqsub8      r10, r6, r2                 ; compare to limit
+    uqsub8      r11, r6, r3                 ; compare to thresh
+    orr         lr, lr, r10
+
+    ; transpose uses 8 regs(r6 - r12 and lr). Need to save reg value now
+    ; transpose the source data for 4-in-parallel operation
+    ldr         r6, [src], pstep            ; load source data
+    str         r11, [sp]                   ; push r11 to stack
+    ldr         r7, [src], pstep
+    str         r12, [sp, #4]               ; save current reg before load q0 - q3 data
+    ldr         r8, [src], pstep
+    str         lr, [sp, #8]
+    ldr         lr, [src], pstep
+
+    TRANSPOSE_MATRIX r6, r7, r8, lr, r9, r10, r11, r12
+
+    ldr         lr, [sp, #8]                ; load back (f)limit accumulator
+
+    uqsub8      r6, r12, r11                ; q3 - q2
+    uqsub8      r7, r11, r12                ; q2 - q3
+    uqsub8      r12, r11, r10               ; q2 - q1
+    uqsub8      r11, r10, r11               ; q1 - q2
+    orr         r6, r6, r7                  ; abs (q3-q2)
+    orr         r7, r12, r11                ; abs (q2-q1)
+    uqsub8      r6, r6, r2                  ; compare to limit
+    uqsub8      r7, r7, r2                  ; compare to limit
+    ldr         r11, [sp, #4]               ; load back p0
+    ldr         r12, [sp, #12]              ; load back p1
+    orr         lr, lr, r6
+    orr         lr, lr, r7
+
+    uqsub8      r6, r11, r9                 ; p0 - q0
+    uqsub8      r7, r9, r11                 ; q0 - p0
+    uqsub8      r8, r12, r10                ; p1 - q1
+    uqsub8      r11, r10, r12               ; q1 - p1
+    orr         r6, r6, r7                  ; abs (p0-q0)
+    ldr         r7, c0x7F7F7F7F
+    orr         r8, r8, r11                 ; abs (p1-q1)
+    uqadd8      r6, r6, r6                  ; abs (p0-q0) * 2
+    and         r8, r7, r8, lsr #1          ; abs (p1-q1) / 2
+    uqsub8      r11, r10, r9                ; q1 - q0
+    uqadd8      r6, r8, r6                  ; abs (p0-q0)*2 + abs (p1-q1)/2
+    uqsub8      r12, r9, r10                ; q0 - q1
+    uqsub8      r6, r6, r4                  ; compare to flimit
+
+    orr         r9, r11, r12                ; abs (q1-q0)
+    uqsub8      r8, r9, r2                  ; compare to limit
+    uqsub8      r10, r9, r3                 ; compare to thresh
+    orr         lr, lr, r6
+    orr         lr, lr, r8
+
+    mvn         r11, #0                     ; r11 == -1
+    mov         r12, #0
+
+    usub8       lr, r12, lr
+    ldr         r9, [sp]                    ; load the compared result
+    sel         lr, r11, r12                ; filter mask: lr
+
+    cmp         lr, #0
+    beq         mbvskip_filter               ; skip filtering
+
+
+    ;vp8_hevmask() function
+    ;calculate high edge variance
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    orr         r9, r9, r10
+
+    ldrh        r7, [src, #-2]
+    ldrh        r8, [src], pstep
+
+    usub8       r9, r12, r9
+    sel         r6, r12, r11                ; hev mask: r6
+
+
+    ; vp8_mbfilter() function
+    ; p2, q2 are only needed at the end. Don't need to load them in now.
+    ; Transpose needs 8 regs(r6 - r12, and lr). Save r6 and lr first
+    ; load soure data to r6, r11, r12, lr
+    ldrh        r9, [src, #-2]
+    ldrh        r10, [src], pstep
+
+    pkhbt       r12, r7, r8, lsl #16
+
+    ldrh        r7, [src, #-2]
+    ldrh        r8, [src], pstep
+
+    pkhbt       r11, r9, r10, lsl #16
+
+    ldrh        r9, [src, #-2]
+    ldrh        r10, [src], pstep
+
+    str         r6, [sp]                    ; save r6
+    str         lr, [sp, #4]                ; save lr
+
+    pkhbt       r6, r7, r8, lsl #16
+    pkhbt       lr, r9, r10, lsl #16
+
+    ;transpose r12, r11, r6, lr to p1, p0, q0, q1
+    TRANSPOSE_MATRIX r12, r11, r6, lr, r7, r8, r9, r10
+
+    ;load back hev_mask r6 and filter_mask lr
+    ldr         r12, c0x80808080
+    ldr         r6, [sp]
+    ldr         lr, [sp, #4]
+
+    eor         r7, r7, r12                 ; ps1
+    eor         r8, r8, r12                 ; ps0
+    eor         r9, r9, r12                 ; qs0
+    eor         r10, r10, r12               ; qs1
+
+    qsub8       r12, r9, r8                 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
+    str         r7, [sp, #12]               ; store ps1 temporarily
+    qsub8       r7, r7, r10                 ; vp8_signed_char_clamp(ps1-qs1)
+    str         r10, [sp, #8]               ; store qs1 temporarily
+    qadd8       r7, r7, r12
+    str         r9, [sp]                    ; store qs0 temporarily
+    qadd8       r7, r7, r12
+    str         r8, [sp, #4]                ; store ps0 temporarily
+    qadd8       r7, r7, r12                 ; vp8_filter: r7
+
+    ldr         r10, c0x03030303            ; r10 = 3 --modified for vp8
+    ldr         r9, c0x04040404
+    ;mvn         r11, #0                     ; r11 == -1
+
+    and         r7, r7, lr                  ; vp8_filter &= mask (lr is free)
+
+    mov         r12, r7                     ; Filter2: r12
+    and         r12, r12, r6                ; Filter2 &= hev
+
+    ;modify code for vp8
+    ;save bottom 3 bits so that we round one side +4 and the other +3
+    qadd8       r8 , r12 , r9               ; Filter1 (r8) = vp8_signed_char_clamp(Filter2+4)
+    qadd8       r12 , r12 , r10             ; Filter2 (r12) = vp8_signed_char_clamp(Filter2+3)
+
+    mov         r10, #0
+    shadd8      r8 , r8 , r10               ; Filter1 >>= 3
+    shadd8      r12 , r12 , r10             ; Filter2 >>= 3
+    shadd8      r8 , r8 , r10
+    shadd8      r12 , r12 , r10
+    shadd8      r8 , r8 , r10               ; r8: Filter1
+    shadd8      r12 , r12 , r10             ; r12: Filter2
+
+    ldr         r9, [sp]                    ; load qs0
+    ldr         r11, [sp, #4]               ; load ps0
+
+    qsub8       r9 , r9, r8                 ; qs0 = vp8_signed_char_clamp(qs0 - Filter1)
+    qadd8       r11, r11, r12               ; ps0 = vp8_signed_char_clamp(ps0 + Filter2)
+
+    ;save bottom 3 bits so that we round one side +4 and the other +3
+    ;and            r8, r12, r10                ; s = Filter2 & 7 (s: r8)
+    ;qadd8      r12 , r12 , r9              ; Filter2 = vp8_signed_char_clamp(Filter2+4)
+    ;mov            r10, #0
+    ;shadd8     r12 , r12 , r10             ; Filter2 >>= 3
+    ;usub8      lr, r8, r9                  ; s = (s==4)*-1
+    ;sel            lr, r11, r10
+    ;shadd8     r12 , r12 , r10
+    ;usub8      r8, r9, r8
+    ;sel            r8, r11, r10
+    ;ldr            r9, [sp]                    ; load qs0
+    ;ldr            r11, [sp, #4]               ; load ps0
+    ;shadd8     r12 , r12 , r10
+    ;and            r8, r8, lr                  ; -1 for each element that equals 4
+    ;qadd8      r10, r8, r12                ; u = vp8_signed_char_clamp(s + Filter2)
+    ;qsub8      r9 , r9, r12                ; qs0 = vp8_signed_char_clamp(qs0 - Filter2)
+    ;qadd8      r11, r11, r10               ; ps0 = vp8_signed_char_clamp(ps0 + u)
+
+    ;end of modification for vp8
+
+    bic         r12, r7, r6                 ;vp8_filter &= ~hev    ( r6 is free)
+    ;mov            r12, r7
+
+    ;roughly 3/7th difference across boundary
+    mov         lr, #0x1b                   ; 27
+    mov         r7, #0x3f                   ; 63
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r7, r10, lr, r7
+    smultb      r10, r10, lr
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    add         r10, r10, #63
+    ssat        r7, #8, r7, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    ldr         lr, c0x80808080
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r7, r10, lsl #16
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 27)>>7)
+
+    qsub8       r8, r9, r10                 ; s = vp8_signed_char_clamp(qs0 - u)
+    qadd8       r10, r11, r10               ; s = vp8_signed_char_clamp(ps0 + u)
+    eor         r8, r8, lr                  ; *oq0 = s^0x80
+    eor         r10, r10, lr                ; *op0 = s^0x80
+
+    strb        r10, [src, #-1]             ; store op0 result
+    strb        r8, [src], pstep            ; store oq0 result
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+    strb        r10, [src, #-1]
+    strb        r8, [src], pstep
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+    strb        r10, [src, #-1]
+    strb        r8, [src], pstep
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+    strb        r10, [src, #-1]
+    strb        r8, [src], pstep
+
+    ;roughly 2/7th difference across boundary
+    mov         lr, #0x12                   ; 18
+    mov         r7, #0x3f                   ; 63
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r9, r10, lr, r7
+    smlatb      r10, r10, lr, r7
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    ssat        r9, #8, r9, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    sub         src, src, pstep, lsl #2     ; move src pointer down by 4 lines
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r9, r10, lsl #16
+
+    ldr         r9, [sp, #8]                ; load qs1
+    ldr         r11, [sp, #12]              ; load ps1
+    ldr         lr, c0x80808080
+
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    add         src, src, #2
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 18)>>7)
+
+    qsub8       r8, r9, r10                 ; s = vp8_signed_char_clamp(qs1 - u)
+    qadd8       r10, r11, r10               ; s = vp8_signed_char_clamp(ps1 + u)
+    eor         r8, r8, lr                  ; *oq1 = s^0x80
+    eor         r10, r10, lr                ; *op1 = s^0x80
+
+    ldrb        r11, [src, #-5]             ; load p2 for 1/7th difference across boundary
+    strb        r10, [src, #-4]             ; store op1
+    strb        r8, [src, #-1]              ; store oq1
+    ldrb        r9, [src], pstep            ; load q2 for 1/7th difference across boundary
+
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+
+    ldrb        r6, [src, #-5]
+    strb        r10, [src, #-4]
+    strb        r8, [src, #-1]
+    ldrb        r7, [src], pstep
+
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+    orr         r11, r11, r6, lsl #8
+    orr         r9, r9, r7, lsl #8
+
+    ldrb        r6, [src, #-5]
+    strb        r10, [src, #-4]
+    strb        r8, [src, #-1]
+    ldrb        r7, [src], pstep
+
+    mov         r10, r10, lsr #8
+    mov         r8, r8, lsr #8
+    orr         r11, r11, r6, lsl #16
+    orr         r9, r9, r7, lsl #16
+
+    ldrb        r6, [src, #-5]
+    strb        r10, [src, #-4]
+    strb        r8, [src, #-1]
+    ldrb        r7, [src], pstep
+    orr         r11, r11, r6, lsl #24
+    orr         r9, r9, r7, lsl #24
+
+    ;roughly 1/7th difference across boundary
+    eor         r9, r9, lr
+    eor         r11, r11, lr
+
+    mov         lr, #0x9                    ; 9
+    mov         r7, #0x3f                   ; 63
+
+    sxtb16      r6, r12
+    sxtb16      r10, r12, ror #8
+    smlabb      r8, r6, lr, r7
+    smlatb      r6, r6, lr, r7
+    smlabb      r12, r10, lr, r7
+    smlatb      r10, r10, lr, r7
+    ssat        r8, #8, r8, asr #7
+    ssat        r6, #8, r6, asr #7
+    ssat        r12, #8, r12, asr #7
+    ssat        r10, #8, r10, asr #7
+
+    sub         src, src, pstep, lsl #2
+
+    pkhbt       r6, r8, r6, lsl #16
+    pkhbt       r10, r12, r10, lsl #16
+
+    uxtb16      r6, r6
+    uxtb16      r10, r10
+
+    ldr         lr, c0x80808080
+
+    orr         r10, r6, r10, lsl #8        ; u = vp8_signed_char_clamp((63 + Filter2 * 9)>>7)
+
+    qadd8       r8, r11, r10                ; s = vp8_signed_char_clamp(ps2 + u)
+    qsub8       r10, r9, r10                ; s = vp8_signed_char_clamp(qs2 - u)
+    eor         r8, r8, lr                  ; *op2 = s^0x80
+    eor         r10, r10, lr                ; *oq2 = s^0x80
+
+    strb        r8, [src, #-5]              ; store *op2
+    strb        r10, [src], pstep           ; store *oq2
+    mov         r8, r8, lsr #8
+    mov         r10, r10, lsr #8
+    strb        r8, [src, #-5]
+    strb        r10, [src], pstep
+    mov         r8, r8, lsr #8
+    mov         r10, r10, lsr #8
+    strb        r8, [src, #-5]
+    strb        r10, [src], pstep
+    mov         r8, r8, lsr #8
+    mov         r10, r10, lsr #8
+    strb        r8, [src, #-5]
+    strb        r10, [src], pstep
+
+    ;adjust src pointer for next loop
+    sub         src, src, #2
+
+|mbvskip_filter|
+    sub         src, src, #4
+    subs        count, count, #1
+
+    ldrne       r6, [src], pstep            ; load source data
+    ldrne       r7, [src], pstep
+    ldrne       r8, [src], pstep
+    ldrne       lr, [src], pstep
+
+    bne         MBVnext8
+
+    add         sp, sp, #16
+
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_mbloop_filter_vertical_edge_armv6|
+
+; Constant Pool
+c0x80808080 DCD     0x80808080
+c0x03030303 DCD     0x03030303
+c0x04040404 DCD     0x04040404
+c0x01010101 DCD     0x01010101
+c0x7F7F7F7F DCD     0x7F7F7F7F
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/recon_v6.asm
@@ -0,0 +1,281 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_recon_b_armv6|
+    EXPORT  |vp8_recon2b_armv6|
+    EXPORT  |vp8_recon4b_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+prd     RN  r0
+dif     RN  r1
+dst     RN  r2
+stride      RN  r3
+
+;void recon_b(unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int stride)
+; R0 char* pred_ptr
+; R1 short * dif_ptr
+; R2 char * dst_ptr
+; R3 int stride
+
+; Description:
+; Loop through the block adding the Pred and Diff together.  Clamp and then
+; store back into the Dst.
+
+; Restrictions :
+; all buffers are expected to be 4 byte aligned coming in and
+; going out.
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+;
+;
+;
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+|vp8_recon_b_armv6| PROC
+    stmdb   sp!, {r4 - r9, lr}
+
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #16          ; 3 | 2 | 1 | 0
+    ldr     r6, [dif, #0]           ;     1 |     0
+    ldr     r7, [dif, #4]           ;     3 |     2
+
+    pkhbt   r8, r6, r7, lsl #16     ;     2 |     0
+    pkhtb   r9, r7, r6, asr #16     ;     3 |     1
+
+    uxtab16 r8, r8, r4              ;     2 |     0  +  3 | 2 | 2 | 0
+    uxtab16 r9, r9, r4, ror #8      ;     3 |     1  +  0 | 3 | 2 | 1
+
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    add     dif, dif, #32
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst], stride
+
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #16          ; 3 | 2 | 1 | 0
+;;  ldr     r6, [dif, #8]           ;     1 |     0
+;;  ldr     r7, [dif, #12]          ;     3 |     2
+    ldr     r6, [dif, #0]           ;     1 |     0
+    ldr     r7, [dif, #4]           ;     3 |     2
+
+    pkhbt   r8, r6, r7, lsl #16     ;     2 |     0
+    pkhtb   r9, r7, r6, asr #16     ;     3 |     1
+
+    uxtab16 r8, r8, r4              ;     2 |     0  +  3 | 2 | 2 | 0
+    uxtab16 r9, r9, r4, ror #8      ;     3 |     1  +  0 | 3 | 2 | 1
+
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    add     dif, dif, #32
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst], stride
+
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #16          ; 3 | 2 | 1 | 0
+;;  ldr     r6, [dif, #16]          ;     1 |     0
+;;  ldr     r7, [dif, #20]          ;     3 |     2
+    ldr     r6, [dif, #0]           ;     1 |     0
+    ldr     r7, [dif, #4]           ;     3 |     2
+
+    pkhbt   r8, r6, r7, lsl #16     ;     2 |     0
+    pkhtb   r9, r7, r6, asr #16     ;     3 |     1
+
+    uxtab16 r8, r8, r4              ;     2 |     0  +  3 | 2 | 2 | 0
+    uxtab16 r9, r9, r4, ror #8      ;     3 |     1  +  0 | 3 | 2 | 1
+
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    add     dif, dif, #32
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst], stride
+
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #16          ; 3 | 2 | 1 | 0
+;;  ldr     r6, [dif, #24]          ;     1 |     0
+;;  ldr     r7, [dif, #28]          ;     3 |     2
+    ldr     r6, [dif, #0]           ;     1 |     0
+    ldr     r7, [dif, #4]           ;     3 |     2
+
+    pkhbt   r8, r6, r7, lsl #16     ;     2 |     0
+    pkhtb   r9, r7, r6, asr #16     ;     3 |     1
+
+    uxtab16 r8, r8, r4              ;     2 |     0  +  3 | 2 | 2 | 0
+    uxtab16 r9, r9, r4, ror #8      ;     3 |     1  +  0 | 3 | 2 | 1
+
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst], stride
+
+    ldmia   sp!, {r4 - r9, pc}
+
+    ENDP    ; |recon_b|
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+;
+;
+;
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+; R0 char  *pred_ptr
+; R1 short *dif_ptr
+; R2 char  *dst_ptr
+; R3 int stride
+|vp8_recon4b_armv6| PROC
+    stmdb   sp!, {r4 - r9, lr}
+
+    mov     lr, #4
+
+recon4b_loop
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #4           ; 3 | 2 | 1 | 0
+    ldr     r6, [dif, #0]           ;     1 |     0
+    ldr     r7, [dif, #4]           ;     3 |     2
+
+    pkhbt   r8, r6, r7, lsl #16     ;     2 |     0
+    pkhtb   r9, r7, r6, asr #16     ;     3 |     1
+
+    uxtab16 r8, r8, r4              ;     2 |     0  +  3 | 2 | 2 | 0
+    uxtab16 r9, r9, r4, ror #8      ;     3 |     1  +  0 | 3 | 2 | 1
+
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst]
+
+    ;4, 5, 6, 7
+    ldr     r4, [prd], #4
+;;  ldr     r6, [dif, #32]
+;;  ldr     r7, [dif, #36]
+    ldr     r6, [dif, #8]
+    ldr     r7, [dif, #12]
+
+    pkhbt   r8, r6, r7, lsl #16
+    pkhtb   r9, r7, r6, asr #16
+
+    uxtab16 r8, r8, r4
+    uxtab16 r9, r9, r4, ror #8
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst, #4]
+
+    ;8, 9, 10, 11
+    ldr     r4, [prd], #4
+;;  ldr     r6, [dif, #64]
+;;  ldr     r7, [dif, #68]
+    ldr     r6, [dif, #16]
+    ldr     r7, [dif, #20]
+
+    pkhbt   r8, r6, r7, lsl #16
+    pkhtb   r9, r7, r6, asr #16
+
+    uxtab16 r8, r8, r4
+    uxtab16 r9, r9, r4, ror #8
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst, #8]
+
+    ;12, 13, 14, 15
+    ldr     r4, [prd], #4
+;;  ldr     r6, [dif, #96]
+;;  ldr     r7, [dif, #100]
+    ldr     r6, [dif, #24]
+    ldr     r7, [dif, #28]
+
+    pkhbt   r8, r6, r7, lsl #16
+    pkhtb   r9, r7, r6, asr #16
+
+    uxtab16 r8, r8, r4
+    uxtab16 r9, r9, r4, ror #8
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst, #12]
+
+    add     dst, dst, stride
+;;  add     dif, dif, #8
+    add     dif, dif, #32
+
+    subs    lr, lr, #1
+    bne     recon4b_loop
+
+    ldmia   sp!, {r4 - r9, pc}
+
+    ENDP    ; |Recon4B|
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+;
+;
+;
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+; R0 char  *pred_ptr
+; R1 short *dif_ptr
+; R2 char  *dst_ptr
+; R3 int stride
+|vp8_recon2b_armv6| PROC
+    stmdb   sp!, {r4 - r9, lr}
+
+    mov     lr, #4
+
+recon2b_loop
+    ;0, 1, 2, 3
+    ldr     r4, [prd], #4
+    ldr     r6, [dif, #0]
+    ldr     r7, [dif, #4]
+
+    pkhbt   r8, r6, r7, lsl #16
+    pkhtb   r9, r7, r6, asr #16
+
+    uxtab16 r8, r8, r4
+    uxtab16 r9, r9, r4, ror #8
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst]
+
+    ;4, 5, 6, 7
+    ldr     r4, [prd], #4
+;;  ldr     r6, [dif, #32]
+;;  ldr     r7, [dif, #36]
+    ldr     r6, [dif, #8]
+    ldr     r7, [dif, #12]
+
+    pkhbt   r8, r6, r7, lsl #16
+    pkhtb   r9, r7, r6, asr #16
+
+    uxtab16 r8, r8, r4
+    uxtab16 r9, r9, r4, ror #8
+    usat16  r8, #8, r8
+    usat16  r9, #8, r9
+    orr     r8, r8, r9, lsl #8
+
+    str     r8, [dst, #4]
+
+    add     dst, dst, stride
+;;  add     dif, dif, #8
+    add     dif, dif, #16
+
+    subs    lr, lr, #1
+    bne     recon2b_loop
+
+    ldmia   sp!, {r4 - r9, pc}
+
+    ENDP    ; |Recon2B|
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/simpleloopfilter_v6.asm
@@ -0,0 +1,287 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT |vp8_loop_filter_simple_horizontal_edge_armv6|
+    EXPORT |vp8_loop_filter_simple_vertical_edge_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+
+    MACRO
+    TRANSPOSE_MATRIX $a0, $a1, $a2, $a3, $b0, $b1, $b2, $b3
+    ; input: $a0, $a1, $a2, $a3; output: $b0, $b1, $b2, $b3
+    ; a0: 03 02 01 00
+    ; a1: 13 12 11 10
+    ; a2: 23 22 21 20
+    ; a3: 33 32 31 30
+    ;     b3 b2 b1 b0
+
+    uxtb16      $b1, $a1                    ; xx 12 xx 10
+    uxtb16      $b0, $a0                    ; xx 02 xx 00
+    uxtb16      $b3, $a3                    ; xx 32 xx 30
+    uxtb16      $b2, $a2                    ; xx 22 xx 20
+    orr         $b1, $b0, $b1, lsl #8       ; 12 02 10 00
+    orr         $b3, $b2, $b3, lsl #8       ; 32 22 30 20
+
+    uxtb16      $a1, $a1, ror #8            ; xx 13 xx 11
+    uxtb16      $a3, $a3, ror #8            ; xx 33 xx 31
+    uxtb16      $a0, $a0, ror #8            ; xx 03 xx 01
+    uxtb16      $a2, $a2, ror #8            ; xx 23 xx 21
+    orr         $a0, $a0, $a1, lsl #8       ; 13 03 11 01
+    orr         $a2, $a2, $a3, lsl #8       ; 33 23 31 21
+
+    pkhtb       $b2, $b3, $b1, asr #16      ; 32 22 12 02   -- p1
+    pkhbt       $b0, $b1, $b3, lsl #16      ; 30 20 10 00   -- p3
+
+    pkhtb       $b3, $a2, $a0, asr #16      ; 33 23 13 03   -- p0
+    pkhbt       $b1, $a0, $a2, lsl #16      ; 31 21 11 01   -- p2
+    MEND
+
+
+src         RN  r0
+pstep       RN  r1
+
+;r0     unsigned char *src_ptr,
+;r1     int src_pixel_step,
+;r2     const char *flimit,
+;r3     const char *limit,
+;stack  const char *thresh,
+;stack  int  count
+
+; All 16 elements in flimit are equal. So, in the code, only one load is needed
+; for flimit. Same applies to limit. thresh is not used in simple looopfilter
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_loop_filter_simple_horizontal_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    ldr         r12, [r3]                   ; limit
+    ldr         r3, [src, -pstep, lsl #1]   ; p1
+    ldr         r4, [src, -pstep]           ; p0
+    ldr         r5, [src]                   ; q0
+    ldr         r6, [src, pstep]            ; q1
+    ldr         r7, [r2]                    ; flimit
+    ldr         r2, c0x80808080
+    ldr         r9, [sp, #40]               ; count for 8-in-parallel
+    uadd8       r7, r7, r7                  ; flimit * 2
+    mov         r9, r9, lsl #1              ; double the count. we're doing 4 at a time
+    uadd8       r12, r7, r12                ; flimit * 2 + limit
+    mov         lr, #0                      ; need 0 in a couple places
+
+|simple_hnext8|
+    ; vp8_simple_filter_mask()
+
+    uqsub8      r7, r3, r6                  ; p1 - q1
+    uqsub8      r8, r6, r3                  ; q1 - p1
+    uqsub8      r10, r4, r5                 ; p0 - q0
+    uqsub8      r11, r5, r4                 ; q0 - p0
+    orr         r8, r8, r7                  ; abs(p1 - q1)
+    orr         r10, r10, r11               ; abs(p0 - q0)
+    uqadd8      r10, r10, r10               ; abs(p0 - q0) * 2
+    uhadd8      r8, r8, lr                  ; abs(p1 - q2) >> 1
+    uqadd8      r10, r10, r8                ; abs(p0 - q0)*2 + abs(p1 - q1)/2
+    mvn         r8, #0
+    usub8       r10, r12, r10               ; compare to flimit. usub8 sets GE flags
+    sel         r10, r8, lr                 ; filter mask: F or 0
+    cmp         r10, #0
+    beq         simple_hskip_filter         ; skip filtering if all masks are 0x00
+
+    ;vp8_simple_filter()
+
+    eor         r3, r3, r2                  ; p1 offset to convert to a signed value
+    eor         r6, r6, r2                  ; q1 offset to convert to a signed value
+    eor         r4, r4, r2                  ; p0 offset to convert to a signed value
+    eor         r5, r5, r2                  ; q0 offset to convert to a signed value
+
+    qsub8       r3, r3, r6                  ; vp8_filter = p1 - q1
+    qsub8       r6, r5, r4                  ; q0 - p0
+    qadd8       r3, r3, r6                  ; += q0 - p0
+    ldr         r7, c0x04040404
+    qadd8       r3, r3, r6                  ; += q0 - p0
+    ldr         r8, c0x03030303
+    qadd8       r3, r3, r6                  ; vp8_filter = p1-q1 + 3*(q0-p0))
+    ;STALL
+    and         r3, r3, r10                 ; vp8_filter &= mask
+
+    qadd8       r7 , r3 , r7                ; Filter1 = vp8_filter + 4
+    qadd8       r8 , r3 , r8                ; Filter2 = vp8_filter + 3
+
+    shadd8      r7 , r7 , lr
+    shadd8      r8 , r8 , lr
+    shadd8      r7 , r7 , lr
+    shadd8      r8 , r8 , lr
+    shadd8      r7 , r7 , lr                ; Filter1 >>= 3
+    shadd8      r8 , r8 , lr                ; Filter2 >>= 3
+
+    qsub8       r5 ,r5, r7                  ; u = q0 - Filter1
+    qadd8       r4, r4, r8                  ; u = p0 + Filter2
+    eor         r5, r5, r2                  ; *oq0 = u^0x80
+    str         r5, [src]                   ; store oq0 result
+    eor         r4, r4, r2                  ; *op0 = u^0x80
+    str         r4, [src, -pstep]           ; store op0 result
+
+|simple_hskip_filter|
+    subs        r9, r9, #1
+    addne       src, src, #4                ; next row
+
+    ldrne       r3, [src, -pstep, lsl #1]   ; p1
+    ldrne       r4, [src, -pstep]           ; p0
+    ldrne       r5, [src]                   ; q0
+    ldrne       r6, [src, pstep]            ; q1
+
+    bne         simple_hnext8
+
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_loop_filter_simple_horizontal_edge_armv6|
+
+
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+|vp8_loop_filter_simple_vertical_edge_armv6| PROC
+;-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+    stmdb       sp!, {r4 - r11, lr}
+
+    ldr         r12, [r2]                   ; r12: flimit
+    ldr         r2, c0x80808080
+    ldr         r7, [r3]                    ; limit
+
+    ; load soure data to r7, r8, r9, r10
+    ldrh        r3, [src, #-2]
+    ldrh        r4, [src], pstep
+    uadd8       r12, r12, r12               ; flimit * 2
+
+    ldrh        r5, [src, #-2]
+    ldrh        r6, [src], pstep
+    uadd8       r12, r12, r7                ; flimit * 2 + limit
+
+    pkhbt       r7, r3, r4, lsl #16
+
+    ldrh        r3, [src, #-2]
+    ldrh        r4, [src], pstep
+    ldr         r11, [sp, #40]              ; count (r11) for 8-in-parallel
+
+    pkhbt       r8, r5, r6, lsl #16
+
+    ldrh        r5, [src, #-2]
+    ldrh        r6, [src], pstep
+    mov         r11, r11, lsl #1            ; 4-in-parallel
+
+|simple_vnext8|
+    ; vp8_simple_filter_mask() function
+    pkhbt       r9, r3, r4, lsl #16
+    pkhbt       r10, r5, r6, lsl #16
+
+    ;transpose r7, r8, r9, r10 to r3, r4, r5, r6
+    TRANSPOSE_MATRIX r7, r8, r9, r10, r3, r4, r5, r6
+
+    uqsub8      r7, r3, r6                  ; p1 - q1
+    uqsub8      r8, r6, r3                  ; q1 - p1
+    uqsub8      r9, r4, r5                  ; p0 - q0
+    uqsub8      r10, r5, r4                 ; q0 - p0
+    orr         r7, r7, r8                  ; abs(p1 - q1)
+    orr         r9, r9, r10                 ; abs(p0 - q0)
+    mov         r8, #0
+    uqadd8      r9, r9, r9                  ; abs(p0 - q0) * 2
+    uhadd8      r7, r7, r8                  ; abs(p1 - q1) / 2
+    uqadd8      r7, r7, r9                  ; abs(p0 - q0)*2 + abs(p1 - q1)/2
+    mvn         r10, #0                     ; r10 == -1
+
+    usub8       r7, r12, r7                 ; compare to flimit
+    sel         lr, r10, r8                 ; filter mask
+
+    cmp         lr, #0
+    beq         simple_vskip_filter         ; skip filtering
+
+    ;vp8_simple_filter() function
+    eor         r3, r3, r2                  ; p1 offset to convert to a signed value
+    eor         r6, r6, r2                  ; q1 offset to convert to a signed value
+    eor         r4, r4, r2                  ; p0 offset to convert to a signed value
+    eor         r5, r5, r2                  ; q0 offset to convert to a signed value
+
+    qsub8       r3, r3, r6                  ; vp8_filter = p1 - q1
+    qsub8       r6, r5, r4                  ; q0 - p0
+
+    qadd8       r3, r3, r6                  ; vp8_filter += q0 - p0
+    ldr         r9, c0x03030303             ; r9 = 3
+
+    qadd8       r3, r3, r6                  ; vp8_filter += q0 - p0
+    ldr         r7, c0x04040404
+
+    qadd8       r3, r3, r6                  ; vp8_filter = p1-q1 + 3*(q0-p0))
+    ;STALL
+    and         r3, r3, lr                  ; vp8_filter &= mask
+
+    qadd8       r9 , r3 , r9                ; Filter2 = vp8_filter + 3
+    qadd8       r3 , r3 , r7                ; Filter1 = vp8_filter + 4
+
+    shadd8      r9 , r9 , r8
+    shadd8      r3 , r3 , r8
+    shadd8      r9 , r9 , r8
+    shadd8      r3 , r3 , r8
+    shadd8      r9 , r9 , r8                ; Filter2 >>= 3
+    shadd8      r3 , r3 , r8                ; Filter1 >>= 3
+
+    ;calculate output
+    sub         src, src, pstep, lsl #2
+
+    qadd8       r4, r4, r9                  ; u = p0 + Filter2
+    qsub8       r5, r5, r3                  ; u = q0 - Filter1
+    eor         r4, r4, r2                  ; *op0 = u^0x80
+    eor         r5, r5, r2                  ; *oq0 = u^0x80
+
+    strb        r4, [src, #-1]              ; store the result
+    mov         r4, r4, lsr #8
+    strb        r5, [src], pstep
+    mov         r5, r5, lsr #8
+
+    strb        r4, [src, #-1]
+    mov         r4, r4, lsr #8
+    strb        r5, [src], pstep
+    mov         r5, r5, lsr #8
+
+    strb        r4, [src, #-1]
+    mov         r4, r4, lsr #8
+    strb        r5, [src], pstep
+    mov         r5, r5, lsr #8
+
+    strb        r4, [src, #-1]
+    strb        r5, [src], pstep
+
+|simple_vskip_filter|
+    subs        r11, r11, #1
+
+    ; load soure data to r7, r8, r9, r10
+    ldrneh      r3, [src, #-2]
+    ldrneh      r4, [src], pstep
+
+    ldrneh      r5, [src, #-2]
+    ldrneh      r6, [src], pstep
+
+    pkhbt       r7, r3, r4, lsl #16
+
+    ldrneh      r3, [src, #-2]
+    ldrneh      r4, [src], pstep
+
+    pkhbt       r8, r5, r6, lsl #16
+
+    ldrneh      r5, [src, #-2]
+    ldrneh      r6, [src], pstep
+
+    bne         simple_vnext8
+
+    ldmia       sp!, {r4 - r11, pc}
+    ENDP        ; |vp8_loop_filter_simple_vertical_edge_armv6|
+
+; Constant Pool
+c0x80808080 DCD     0x80808080
+c0x03030303 DCD     0x03030303
+c0x04040404 DCD     0x04040404
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/armv6/sixtappredict8x4_v6.asm
@@ -0,0 +1,271 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_sixtap_predict8x4_armv6|
+
+    AREA    |.text|, CODE, READONLY  ; name this block of code
+;-------------------------------------
+; r0    unsigned char *src_ptr,
+; r1    int  src_pixels_per_line,
+; r2    int  xoffset,
+; r3    int  yoffset,
+; stack unsigned char *dst_ptr,
+; stack int  dst_pitch
+;-------------------------------------
+;note: In first pass, store the result in transpose(8linesx9columns) on stack. Temporary stack size is 184.
+;Line width is 20 that is 9 short data plus 2 to make it 4bytes aligned. In second pass, load data from stack,
+;and the result is stored in transpose.
+|vp8_sixtap_predict8x4_armv6| PROC
+    stmdb       sp!, {r4 - r11, lr}
+    str         r3, [sp, #-184]!            ;reserve space on stack for temporary storage, store yoffset
+
+    cmp         r2, #0                      ;skip first_pass filter if xoffset=0
+    add         lr, sp, #4                  ;point to temporary buffer
+    beq         skip_firstpass_filter
+
+;first-pass filter
+    ldr         r12, _filter8_coeff_
+    sub         r0, r0, r1, lsl #1
+
+    add         r2, r12, r2, lsl #4         ;calculate filter location
+    add         r0, r0, #3                  ;adjust src only for loading convinience
+
+    ldr         r3, [r2]                    ; load up packed filter coefficients
+    ldr         r4, [r2, #4]
+    ldr         r5, [r2, #8]
+
+    mov         r2, #0x90000                ; height=9 is top part of counter
+
+    sub         r1, r1, #8
+
+|first_pass_hloop_v6|
+    ldrb        r6, [r0, #-5]               ; load source data
+    ldrb        r7, [r0, #-4]
+    ldrb        r8, [r0, #-3]
+    ldrb        r9, [r0, #-2]
+    ldrb        r10, [r0, #-1]
+
+    orr         r2, r2, #0x4                ; construct loop counter. width=8=4x2
+
+    pkhbt       r6, r6, r7, lsl #16         ; r7 | r6
+    pkhbt       r7, r7, r8, lsl #16         ; r8 | r7
+
+    pkhbt       r8, r8, r9, lsl #16         ; r9 | r8
+    pkhbt       r9, r9, r10, lsl #16        ; r10 | r9
+
+|first_pass_wloop_v6|
+    smuad       r11, r6, r3                 ; vp8_filter[0], vp8_filter[1]
+    smuad       r12, r7, r3
+
+    ldrb        r6, [r0], #1
+
+    smlad       r11, r8, r4, r11            ; vp8_filter[2], vp8_filter[3]
+    ldrb        r7, [r0], #1
+    smlad       r12, r9, r4, r12
+
+    pkhbt       r10, r10, r6, lsl #16       ; r10 | r9
+    pkhbt       r6, r6, r7, lsl #16         ; r11 | r10
+    smlad       r11, r10, r5, r11           ; vp8_filter[4], vp8_filter[5]
+    smlad       r12, r6, r5, r12
+
+    sub         r2, r2, #1
+
+    add         r11, r11, #0x40             ; round_shift_and_clamp
+    tst         r2, #0xff                   ; test loop counter
+    usat        r11, #8, r11, asr #7
+    add         r12, r12, #0x40
+    strh        r11, [lr], #20              ; result is transposed and stored, which
+    usat        r12, #8, r12, asr #7
+
+    strh        r12, [lr], #20
+
+    movne       r11, r6
+    movne       r12, r7
+
+    movne       r6, r8
+    movne       r7, r9
+    movne       r8, r10
+    movne       r9, r11
+    movne       r10, r12
+
+    bne         first_pass_wloop_v6
+
+    ;;add       r9, ppl, #30                ; attempt to load 2 adjacent cache lines
+    ;;IF ARCHITECTURE=6
+    ;pld        [src, ppl]
+    ;;pld       [src, r9]
+    ;;ENDIF
+
+    subs        r2, r2, #0x10000
+
+    sub         lr, lr, #158
+
+    add         r0, r0, r1                  ; move to next input line
+
+    bne         first_pass_hloop_v6
+
+;second pass filter
+secondpass_filter
+    ldr         r3, [sp], #4                ; load back yoffset
+    ldr         r0, [sp, #216]              ; load dst address from stack 180+36
+    ldr         r1, [sp, #220]              ; load dst stride from stack 180+40
+
+    cmp         r3, #0
+    beq         skip_secondpass_filter
+
+    ldr         r12, _filter8_coeff_
+    add         lr, r12, r3, lsl #4         ;calculate filter location
+
+    mov         r2, #0x00080000
+
+    ldr         r3, [lr]                    ; load up packed filter coefficients
+    ldr         r4, [lr, #4]
+    ldr         r5, [lr, #8]
+
+    pkhbt       r12, r4, r3                 ; pack the filter differently
+    pkhbt       r11, r5, r4
+
+second_pass_hloop_v6
+    ldr         r6, [sp]                    ; load the data
+    ldr         r7, [sp, #4]
+
+    orr         r2, r2, #2                  ; loop counter
+
+second_pass_wloop_v6
+    smuad       lr, r3, r6                  ; apply filter
+    smulbt      r10, r3, r6
+
+    ldr         r8, [sp, #8]
+
+    smlad       lr, r4, r7, lr
+    smladx      r10, r12, r7, r10
+
+    ldrh        r9, [sp, #12]
+
+    smlad       lr, r5, r8, lr
+    smladx      r10, r11, r8, r10
+
+    add         sp, sp, #4
+    smlatb      r10, r5, r9, r10
+
+    sub         r2, r2, #1
+
+    add         lr, lr, #0x40               ; round_shift_and_clamp
+    tst         r2, #0xff
+    usat        lr, #8, lr, asr #7
+    add         r10, r10, #0x40
+    strb        lr, [r0], r1                ; the result is transposed back and stored
+    usat        r10, #8, r10, asr #7
+
+    strb        r10, [r0],r1
+
+    movne       r6, r7
+    movne       r7, r8
+
+    bne         second_pass_wloop_v6
+
+    subs        r2, r2, #0x10000
+    add         sp, sp, #12                 ; updata src for next loop (20-8)
+    sub         r0, r0, r1, lsl #2
+    add         r0, r0, #1
+
+    bne         second_pass_hloop_v6
+
+    add         sp, sp, #20
+    ldmia       sp!, {r4 - r11, pc}
+
+;--------------------
+skip_firstpass_filter
+    sub         r0, r0, r1, lsl #1
+    sub         r1, r1, #8
+    mov         r2, #9
+
+skip_firstpass_hloop
+    ldrb        r4, [r0], #1                ; load data
+    subs        r2, r2, #1
+    ldrb        r5, [r0], #1
+    strh        r4, [lr], #20               ; store it to immediate buffer
+    ldrb        r6, [r0], #1                ; load data
+    strh        r5, [lr], #20
+    ldrb        r7, [r0], #1
+    strh        r6, [lr], #20
+    ldrb        r8, [r0], #1
+    strh        r7, [lr], #20
+    ldrb        r9, [r0], #1
+    strh        r8, [lr], #20
+    ldrb        r10, [r0], #1
+    strh        r9, [lr], #20
+    ldrb        r11, [r0], #1
+    strh        r10, [lr], #20
+    add         r0, r0, r1                  ; move to next input line
+    strh        r11, [lr], #20
+
+    sub         lr, lr, #158                ; move over to next column
+    bne         skip_firstpass_hloop
+
+    b           secondpass_filter
+
+;--------------------
+skip_secondpass_filter
+    mov         r2, #8
+    add         sp, sp, #4                  ;start from src[0] instead of src[-2]
+
+skip_secondpass_hloop
+    ldr         r6, [sp], #4
+    subs        r2, r2, #1
+    ldr         r8, [sp], #4
+
+    mov         r7, r6, lsr #16             ; unpack
+    strb        r6, [r0], r1
+    mov         r9, r8, lsr #16
+    strb        r7, [r0], r1
+    add         sp, sp, #12                 ; 20-8
+    strb        r8, [r0], r1
+    strb        r9, [r0], r1
+
+    sub         r0, r0, r1, lsl #2
+    add         r0, r0, #1
+
+    bne         skip_secondpass_hloop
+
+    add         sp, sp, #16                 ; 180 - (160 +4)
+
+    ldmia       sp!, {r4 - r11, pc}
+
+    ENDP
+
+;-----------------
+    AREA    subpelfilters8_dat, DATA, READWRITE         ;read/write by default
+;Data section with name data_area is specified. DCD reserves space in memory for 48 data.
+;One word each is reserved. Label filter_coeff can be used to access the data.
+;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ...
+_filter8_coeff_
+    DCD     filter8_coeff
+filter8_coeff
+    DCD     0x00000000,     0x00000080,     0x00000000,     0x00000000
+    DCD     0xfffa0000,     0x000c007b,     0x0000ffff,     0x00000000
+    DCD     0xfff50002,     0x0024006c,     0x0001fff8,     0x00000000
+    DCD     0xfff70000,     0x0032005d,     0x0000fffa,     0x00000000
+    DCD     0xfff00003,     0x004d004d,     0x0003fff0,     0x00000000
+    DCD     0xfffa0000,     0x005d0032,     0x0000fff7,     0x00000000
+    DCD     0xfff80001,     0x006c0024,     0x0002fff5,     0x00000000
+    DCD     0xffff0000,     0x007b000c,     0x0000fffa,     0x00000000
+
+    ;DCD        0,  0,  128,    0,   0,  0
+    ;DCD        0, -6,  123,   12,  -1,  0
+    ;DCD        2, -11, 108,   36,  -8,  1
+    ;DCD        0, -9,   93,   50,  -6,  0
+    ;DCD        3, -16,  77,   77, -16,  3
+    ;DCD        0, -6,   50,   93,  -9,  0
+    ;DCD        1, -8,   36,  108, -11,  2
+    ;DCD        0, -1,   12,  123,  -6,  0
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/bilinearfilter_arm.c
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include <math.h>
+#include "subpixel.h"
+
+#define BLOCK_HEIGHT_WIDTH 4
+#define VP8_FILTER_WEIGHT 128
+#define VP8_FILTER_SHIFT  7
+
+static const short bilinear_filters[8][2] =
+{
+    { 128,   0 },
+    { 112,  16 },
+    {  96,  32 },
+    {  80,  48 },
+    {  64,  64 },
+    {  48,  80 },
+    {  32,  96 },
+    {  16, 112 }
+};
+
+
+extern void vp8_filter_block2d_bil_first_pass_armv6
+(
+    unsigned char *src_ptr,
+    unsigned short *output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_height,
+    unsigned int output_width,
+    const short *vp8_filter
+);
+
+extern void vp8_filter_block2d_bil_second_pass_armv6
+(
+    unsigned short *src_ptr,
+    unsigned char  *output_ptr,
+    int output_pitch,
+    unsigned int  output_height,
+    unsigned int  output_width,
+    const short *vp8_filter
+);
+
+#if 0
+void vp8_filter_block2d_bil_first_pass_6
+(
+    unsigned char *src_ptr,
+    unsigned short *output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_height,
+    unsigned int output_width,
+    const short *vp8_filter
+)
+{
+    unsigned int i, j;
+
+    for ( i=0; i<output_height; i++ )
+    {
+        for ( j=0; j<output_width; j++ )
+        {
+            /* Apply bilinear filter */
+            output_ptr[j] = ( ( (int)src_ptr[0]          * vp8_filter[0]) +
+                               ((int)src_ptr[1] * vp8_filter[1]) +
+                                (VP8_FILTER_WEIGHT/2) ) >> VP8_FILTER_SHIFT;
+            src_ptr++;
+        }
+
+        /* Next row... */
+        src_ptr    += src_pixels_per_line - output_width;
+        output_ptr += output_width;
+    }
+}
+
+void vp8_filter_block2d_bil_second_pass_6
+(
+    unsigned short *src_ptr,
+    unsigned char  *output_ptr,
+    int output_pitch,
+    unsigned int  output_height,
+    unsigned int  output_width,
+    const short *vp8_filter
+)
+{
+    unsigned int  i,j;
+    int  Temp;
+
+    for ( i=0; i<output_height; i++ )
+    {
+        for ( j=0; j<output_width; j++ )
+        {
+            /* Apply filter */
+            Temp =  ((int)src_ptr[0]         * vp8_filter[0]) +
+                    ((int)src_ptr[output_width] * vp8_filter[1]) +
+                    (VP8_FILTER_WEIGHT/2);
+            output_ptr[j] = (unsigned int)(Temp >> VP8_FILTER_SHIFT);
+            src_ptr++;
+        }
+
+        /* Next row... */
+        /*src_ptr    += src_pixels_per_line - output_width;*/
+        output_ptr += output_pitch;
+    }
+}
+#endif
+
+void vp8_filter_block2d_bil_armv6
+(
+    unsigned char *src_ptr,
+    unsigned char *output_ptr,
+    unsigned int   src_pixels_per_line,
+    unsigned int   dst_pitch,
+    const short      *HFilter,
+    const short      *VFilter,
+    int            Width,
+    int            Height
+)
+{
+
+    unsigned short FData[36*16]; /* Temp data bufffer used in filtering */
+
+    /* First filter 1-D horizontally... */
+    /* pixel_step = 1; */
+    vp8_filter_block2d_bil_first_pass_armv6(src_ptr, FData, src_pixels_per_line, Height + 1, Width, HFilter);
+
+    /* then 1-D vertically... */
+    vp8_filter_block2d_bil_second_pass_armv6(FData, output_ptr, dst_pitch, Height, Width, VFilter);
+}
+
+
+void vp8_bilinear_predict4x4_armv6
+(
+    unsigned char  *src_ptr,
+    int   src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+
+    HFilter = bilinear_filters[xoffset];
+    VFilter = bilinear_filters[yoffset];
+
+    vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 4, 4);
+}
+
+void vp8_bilinear_predict8x8_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+
+    HFilter = bilinear_filters[xoffset];
+    VFilter = bilinear_filters[yoffset];
+
+    vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 8);
+}
+
+void vp8_bilinear_predict8x4_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+
+    HFilter = bilinear_filters[xoffset];
+    VFilter = bilinear_filters[yoffset];
+
+    vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 8, 4);
+}
+
+void vp8_bilinear_predict16x16_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+
+    HFilter = bilinear_filters[xoffset];
+    VFilter = bilinear_filters[yoffset];
+
+    vp8_filter_block2d_bil_armv6(src_ptr, dst_ptr, src_pixels_per_line, dst_pitch, HFilter, VFilter, 16, 16);
+}
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/filter_arm.c
@@ -0,0 +1,256 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_ports/config.h"
+#include <math.h>
+#include "subpixel.h"
+#include "vpx_ports/mem.h"
+
+#define BLOCK_HEIGHT_WIDTH 4
+#define VP8_FILTER_WEIGHT 128
+#define VP8_FILTER_SHIFT  7
+
+DECLARE_ALIGNED(16, static const short, sub_pel_filters[8][6]) =
+{
+    { 0,  0,  128,    0,   0,  0 },         /* note that 1/8 pel positions are just as per alpha -0.5 bicubic */
+    { 0, -6,  123,   12,  -1,  0 },
+    { 2, -11, 108,   36,  -8,  1 },         /* New 1/4 pel 6 tap filter */
+    { 0, -9,   93,   50,  -6,  0 },
+    { 3, -16,  77,   77, -16,  3 },         /* New 1/2 pel 6 tap filter */
+    { 0, -6,   50,   93,  -9,  0 },
+    { 1, -8,   36,  108, -11,  2 },         /* New 1/4 pel 6 tap filter */
+    { 0, -1,   12,  123,  -6,  0 },
+};
+
+
+extern void vp8_filter_block2d_first_pass_armv6
+(
+    unsigned char *src_ptr,
+    short         *output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int output_width,
+    unsigned int output_height,
+    const short *vp8_filter
+);
+
+extern void vp8_filter_block2d_second_pass_armv6
+(
+    short         *src_ptr,
+    unsigned char *output_ptr,
+    unsigned int output_pitch,
+    unsigned int cnt,
+    const short *vp8_filter
+);
+
+extern void vp8_filter4_block2d_second_pass_armv6
+(
+    short         *src_ptr,
+    unsigned char *output_ptr,
+    unsigned int output_pitch,
+    unsigned int cnt,
+    const short *vp8_filter
+);
+
+extern void vp8_filter_block2d_first_pass_only_armv6
+(
+    unsigned char *src_ptr,
+    unsigned char *output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int cnt,
+    unsigned int output_pitch,
+    const short *vp8_filter
+);
+
+
+extern void vp8_filter_block2d_second_pass_only_armv6
+(
+    unsigned char *src_ptr,
+    unsigned char *output_ptr,
+    unsigned int src_pixels_per_line,
+    unsigned int cnt,
+    unsigned int output_pitch,
+    const short *vp8_filter
+);
+
+#if HAVE_ARMV6
+void vp8_sixtap_predict_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+    DECLARE_ALIGNED_ARRAY(4, short, FData, 12*4); /* Temp data bufffer used in filtering */
+
+
+    HFilter = sub_pel_filters[xoffset];   /* 6 tap */
+    VFilter = sub_pel_filters[yoffset];       /* 6 tap */
+
+    /* Vfilter is null. First pass only */
+    if (xoffset && !yoffset)
+    {
+        /*vp8_filter_block2d_first_pass_armv6 ( src_ptr, FData+2, src_pixels_per_line, 4, 4, HFilter );
+        vp8_filter_block2d_second_pass_armv6 ( FData+2, dst_ptr, dst_pitch, 4, VFilter );*/
+
+        vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 4, dst_pitch, HFilter);
+    }
+    /* Hfilter is null. Second pass only */
+    else if (!xoffset && yoffset)
+    {
+        vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 4, dst_pitch, VFilter);
+    }
+    else
+    {
+        /* Vfilter is a 4 tap filter */
+        if (yoffset & 0x1)
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 4, 7, HFilter);
+            vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 4, VFilter);
+        }
+        /* Vfilter is 6 tap filter */
+        else
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 4, 9, HFilter);
+            vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 4, VFilter);
+        }
+    }
+}
+
+#if 0
+void vp8_sixtap_predict8x4_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+    DECLARE_ALIGNED_ARRAY(4, short, FData, 16*8); /* Temp data bufffer used in filtering */
+
+    HFilter = sub_pel_filters[xoffset];   /* 6 tap */
+    VFilter = sub_pel_filters[yoffset];       /* 6 tap */
+
+
+    /*if (xoffset && !yoffset)
+    {
+        vp8_filter_block2d_first_pass_only_armv6 (  src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, HFilter );
+    }*/
+    /* Hfilter is null. Second pass only */
+    /*else if (!xoffset && yoffset)
+    {
+        vp8_filter_block2d_second_pass_only_armv6 ( src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, VFilter );
+    }
+    else
+    {
+        if (yoffset & 0x1)
+            vp8_filter_block2d_first_pass_armv6 ( src_ptr-src_pixels_per_line, FData+1, src_pixels_per_line, 8, 7, HFilter );
+        else*/
+
+        vp8_filter_block2d_first_pass_armv6 ( src_ptr-(2*src_pixels_per_line), FData, src_pixels_per_line, 8, 9, HFilter );
+
+        vp8_filter_block2d_second_pass_armv6 ( FData+2, dst_ptr, dst_pitch, 4, 8, VFilter );
+    /*}*/
+}
+#endif
+
+void vp8_sixtap_predict8x8_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+    DECLARE_ALIGNED_ARRAY(4, short, FData, 16*8); /* Temp data bufffer used in filtering */
+
+    HFilter = sub_pel_filters[xoffset];   /* 6 tap */
+    VFilter = sub_pel_filters[yoffset];       /* 6 tap */
+
+    if (xoffset && !yoffset)
+    {
+        vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, HFilter);
+    }
+    /* Hfilter is null. Second pass only */
+    else if (!xoffset && yoffset)
+    {
+        vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 8, dst_pitch, VFilter);
+    }
+    else
+    {
+        if (yoffset & 0x1)
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 8, 11, HFilter);
+            vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 8, VFilter);
+        }
+        else
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 8, 13, HFilter);
+            vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 8, VFilter);
+        }
+    }
+}
+
+
+void vp8_sixtap_predict16x16_armv6
+(
+    unsigned char  *src_ptr,
+    int  src_pixels_per_line,
+    int  xoffset,
+    int  yoffset,
+    unsigned char *dst_ptr,
+    int  dst_pitch
+)
+{
+    const short  *HFilter;
+    const short  *VFilter;
+    DECLARE_ALIGNED_ARRAY(4, short, FData, 24*16);    /* Temp data bufffer used in filtering */
+
+    HFilter = sub_pel_filters[xoffset];   /* 6 tap */
+    VFilter = sub_pel_filters[yoffset];       /* 6 tap */
+
+    if (xoffset && !yoffset)
+    {
+        vp8_filter_block2d_first_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 16, dst_pitch, HFilter);
+    }
+    /* Hfilter is null. Second pass only */
+    else if (!xoffset && yoffset)
+    {
+        vp8_filter_block2d_second_pass_only_armv6(src_ptr, dst_ptr, src_pixels_per_line, 16, dst_pitch, VFilter);
+    }
+    else
+    {
+        if (yoffset & 0x1)
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - src_pixels_per_line, FData + 1, src_pixels_per_line, 16, 19, HFilter);
+            vp8_filter4_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 16, VFilter);
+        }
+        else
+        {
+            vp8_filter_block2d_first_pass_armv6(src_ptr - (2 * src_pixels_per_line), FData, src_pixels_per_line, 16, 21, HFilter);
+            vp8_filter_block2d_second_pass_armv6(FData + 2, dst_ptr, dst_pitch, 16, VFilter);
+        }
+    }
+
+}
+#endif
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/idct_arm.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef IDCT_ARM_H
+#define IDCT_ARM_H
+
+#if HAVE_ARMV6
+extern prototype_idct(vp8_short_idct4x4llm_1_v6);
+extern prototype_idct(vp8_short_idct4x4llm_v6_dual);
+extern prototype_idct_scalar_add(vp8_dc_only_idct_add_v6);
+extern prototype_second_order(vp8_short_inv_walsh4x4_1_v6);
+extern prototype_second_order(vp8_short_inv_walsh4x4_v6);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef  vp8_idct_idct1
+#define vp8_idct_idct1 vp8_short_idct4x4llm_1_v6
+
+#undef  vp8_idct_idct16
+#define vp8_idct_idct16 vp8_short_idct4x4llm_v6_dual
+
+#undef  vp8_idct_idct1_scalar_add
+#define vp8_idct_idct1_scalar_add vp8_dc_only_idct_add_v6
+
+#undef  vp8_idct_iwalsh1
+#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_v6
+
+#undef  vp8_idct_iwalsh16
+#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_v6
+#endif
+#endif
+
+#if HAVE_ARMV7
+extern prototype_idct(vp8_short_idct4x4llm_1_neon);
+extern prototype_idct(vp8_short_idct4x4llm_neon);
+extern prototype_idct_scalar_add(vp8_dc_only_idct_add_neon);
+extern prototype_second_order(vp8_short_inv_walsh4x4_1_neon);
+extern prototype_second_order(vp8_short_inv_walsh4x4_neon);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef  vp8_idct_idct1
+#define vp8_idct_idct1 vp8_short_idct4x4llm_1_neon
+
+#undef  vp8_idct_idct16
+#define vp8_idct_idct16 vp8_short_idct4x4llm_neon
+
+#undef  vp8_idct_idct1_scalar_add
+#define vp8_idct_idct1_scalar_add vp8_dc_only_idct_add_neon
+
+#undef  vp8_idct_iwalsh1
+#define vp8_idct_iwalsh1 vp8_short_inv_walsh4x4_1_neon
+
+#undef  vp8_idct_iwalsh16
+#define vp8_idct_iwalsh16 vp8_short_inv_walsh4x4_neon
+#endif
+#endif
+
+#endif
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/loopfilter_arm.c
@@ -0,0 +1,237 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "vpx_ports/config.h"
+#include <math.h>
+#include "loopfilter.h"
+#include "onyxc_int.h"
+
+extern prototype_loopfilter(vp8_loop_filter_horizontal_edge_armv6);
+extern prototype_loopfilter(vp8_loop_filter_vertical_edge_armv6);
+extern prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_armv6);
+extern prototype_loopfilter(vp8_mbloop_filter_vertical_edge_armv6);
+extern prototype_loopfilter(vp8_loop_filter_simple_horizontal_edge_armv6);
+extern prototype_loopfilter(vp8_loop_filter_simple_vertical_edge_armv6);
+
+extern prototype_loopfilter(vp8_loop_filter_horizontal_edge_y_neon);
+extern prototype_loopfilter(vp8_loop_filter_vertical_edge_y_neon);
+extern prototype_loopfilter(vp8_mbloop_filter_horizontal_edge_y_neon);
+extern prototype_loopfilter(vp8_mbloop_filter_vertical_edge_y_neon);
+extern prototype_loopfilter(vp8_loop_filter_simple_horizontal_edge_neon);
+extern prototype_loopfilter(vp8_loop_filter_simple_vertical_edge_neon);
+
+extern loop_filter_uvfunction vp8_loop_filter_horizontal_edge_uv_neon;
+extern loop_filter_uvfunction vp8_loop_filter_vertical_edge_uv_neon;
+extern loop_filter_uvfunction vp8_mbloop_filter_horizontal_edge_uv_neon;
+extern loop_filter_uvfunction vp8_mbloop_filter_vertical_edge_uv_neon;
+
+
+#if HAVE_ARMV6
+/*ARMV6 loopfilter functions*/
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_mbloop_filter_horizontal_edge_armv6(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+
+    if (u_ptr)
+        vp8_mbloop_filter_horizontal_edge_armv6(u_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, 1);
+
+    if (v_ptr)
+        vp8_mbloop_filter_horizontal_edge_armv6(v_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, 1);
+}
+
+void vp8_loop_filter_mbhs_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                                int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_mbloop_filter_vertical_edge_armv6(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+
+    if (u_ptr)
+        vp8_mbloop_filter_vertical_edge_armv6(u_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, 1);
+
+    if (v_ptr)
+        vp8_mbloop_filter_vertical_edge_armv6(v_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, 1);
+}
+
+void vp8_loop_filter_mbvs_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                                int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_vertical_edge_armv6(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_loop_filter_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+
+    if (u_ptr)
+        vp8_loop_filter_horizontal_edge_armv6(u_ptr + 4 * uv_stride, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, 1);
+
+    if (v_ptr)
+        vp8_loop_filter_horizontal_edge_armv6(v_ptr + 4 * uv_stride, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, 1);
+}
+
+void vp8_loop_filter_bhs_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 4 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 8 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_horizontal_edge_armv6(y_ptr + 12 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_loop_filter_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+
+    if (u_ptr)
+        vp8_loop_filter_vertical_edge_armv6(u_ptr + 4, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, 1);
+
+    if (v_ptr)
+        vp8_loop_filter_vertical_edge_armv6(v_ptr + 4, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, 1);
+}
+
+void vp8_loop_filter_bvs_armv6(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 4, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 8, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_vertical_edge_armv6(y_ptr + 12, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+}
+#endif
+
+#if HAVE_ARMV7
+/* NEON loopfilter functions */
+/* Horizontal MB filtering */
+void vp8_loop_filter_mbh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_mbloop_filter_horizontal_edge_y_neon(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+
+    if (u_ptr)
+        vp8_mbloop_filter_horizontal_edge_uv_neon(u_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, v_ptr);
+}
+
+void vp8_loop_filter_mbhs_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+}
+
+/* Vertical MB Filtering */
+void vp8_loop_filter_mbv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_mbloop_filter_vertical_edge_y_neon(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+
+    if (u_ptr)
+        vp8_mbloop_filter_vertical_edge_uv_neon(u_ptr, uv_stride, lfi->uvmbflim, lfi->uvlim, lfi->uvmbthr, v_ptr);
+}
+
+void vp8_loop_filter_mbvs_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                               int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr, y_stride, lfi->mbflim, lfi->lim, lfi->mbthr, 2);
+}
+
+/* Horizontal B Filtering */
+void vp8_loop_filter_bh_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                             int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 4 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 8 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_horizontal_edge_y_neon(y_ptr + 12 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+
+    if (u_ptr)
+        vp8_loop_filter_horizontal_edge_uv_neon(u_ptr + 4 * uv_stride, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, v_ptr + 4 * uv_stride);
+}
+
+void vp8_loop_filter_bhs_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr + 4 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr + 8 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_horizontal_edge_neon(y_ptr + 12 * y_stride, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+}
+
+/* Vertical B Filtering */
+void vp8_loop_filter_bv_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                             int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) simpler_lpf;
+    vp8_loop_filter_vertical_edge_y_neon(y_ptr + 4, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_vertical_edge_y_neon(y_ptr + 8, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_vertical_edge_y_neon(y_ptr + 12, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+
+    if (u_ptr)
+        vp8_loop_filter_vertical_edge_uv_neon(u_ptr + 4, uv_stride, lfi->uvflim, lfi->uvlim, lfi->uvthr, v_ptr + 4);
+}
+
+void vp8_loop_filter_bvs_neon(unsigned char *y_ptr, unsigned char *u_ptr, unsigned char *v_ptr,
+                              int y_stride, int uv_stride, loop_filter_info *lfi, int simpler_lpf)
+{
+    (void) u_ptr;
+    (void) v_ptr;
+    (void) uv_stride;
+    (void) simpler_lpf;
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr + 4, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr + 8, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+    vp8_loop_filter_simple_vertical_edge_neon(y_ptr + 12, y_stride, lfi->flim, lfi->lim, lfi->thr, 2);
+}
+#endif
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/loopfilter_arm.h
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#ifndef LOOPFILTER_ARM_H
+#define LOOPFILTER_ARM_H
+
+#if HAVE_ARMV6
+extern prototype_loopfilter_block(vp8_loop_filter_mbv_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_bv_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_mbh_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_bh_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_mbvs_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_bvs_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_mbhs_armv6);
+extern prototype_loopfilter_block(vp8_loop_filter_bhs_armv6);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef  vp8_lf_normal_mb_v
+#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_armv6
+
+#undef  vp8_lf_normal_b_v
+#define vp8_lf_normal_b_v vp8_loop_filter_bv_armv6
+
+#undef  vp8_lf_normal_mb_h
+#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_armv6
+
+#undef  vp8_lf_normal_b_h
+#define vp8_lf_normal_b_h vp8_loop_filter_bh_armv6
+
+#undef  vp8_lf_simple_mb_v
+#define vp8_lf_simple_mb_v vp8_loop_filter_mbvs_armv6
+
+#undef  vp8_lf_simple_b_v
+#define vp8_lf_simple_b_v vp8_loop_filter_bvs_armv6
+
+#undef  vp8_lf_simple_mb_h
+#define vp8_lf_simple_mb_h vp8_loop_filter_mbhs_armv6
+
+#undef  vp8_lf_simple_b_h
+#define vp8_lf_simple_b_h vp8_loop_filter_bhs_armv6
+#endif
+#endif
+
+#if HAVE_ARMV7
+extern prototype_loopfilter_block(vp8_loop_filter_mbv_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_bv_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_mbh_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_bh_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_mbvs_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_bvs_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_mbhs_neon);
+extern prototype_loopfilter_block(vp8_loop_filter_bhs_neon);
+
+#if !CONFIG_RUNTIME_CPU_DETECT
+#undef  vp8_lf_normal_mb_v
+#define vp8_lf_normal_mb_v vp8_loop_filter_mbv_neon
+
+#undef  vp8_lf_normal_b_v
+#define vp8_lf_normal_b_v vp8_loop_filter_bv_neon
+
+#undef  vp8_lf_normal_mb_h
+#define vp8_lf_normal_mb_h vp8_loop_filter_mbh_neon
+
+#undef  vp8_lf_normal_b_h
+#define vp8_lf_normal_b_h vp8_loop_filter_bh_neon
+
+#undef  vp8_lf_simple_mb_v
+#define vp8_lf_simple_mb_v vp8_loop_filter_mbvs_neon
+
+#undef  vp8_lf_simple_b_v
+#define vp8_lf_simple_b_v vp8_loop_filter_bvs_neon
+
+#undef  vp8_lf_simple_mb_h
+#define vp8_lf_simple_mb_h vp8_loop_filter_mbhs_neon
+
+#undef  vp8_lf_simple_b_h
+#define vp8_lf_simple_b_h vp8_loop_filter_bhs_neon
+#endif
+#endif
+
+#endif
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict16x16_neon.asm
@@ -0,0 +1,362 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_bilinear_predict16x16_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char  *src_ptr,
+; r1    int  src_pixels_per_line,
+; r2    int  xoffset,
+; r3    int  yoffset,
+; r4    unsigned char *dst_ptr,
+; stack(r5) int  dst_pitch
+
+|vp8_bilinear_predict16x16_neon| PROC
+    push            {r4-r5, lr}
+
+    ldr             r12, _bifilter16_coeff_
+    ldr             r4, [sp, #12]           ;load parameters from stack
+    ldr             r5, [sp, #16]           ;load parameters from stack
+
+    cmp             r2, #0                  ;skip first_pass filter if xoffset=0
+    beq             secondpass_bfilter16x16_only
+
+    add             r2, r12, r2, lsl #3     ;calculate filter location
+
+    cmp             r3, #0                  ;skip second_pass filter if yoffset=0
+
+    vld1.s32        {d31}, [r2]             ;load first_pass filter
+
+    beq             firstpass_bfilter16x16_only
+
+    sub             sp, sp, #272            ;reserve space on stack for temporary storage
+    vld1.u8         {d2, d3, d4}, [r0], r1      ;load src data
+    mov             lr, sp
+    vld1.u8         {d5, d6, d7}, [r0], r1
+
+    mov             r2, #3                  ;loop counter
+    vld1.u8         {d8, d9, d10}, [r0], r1
+
+    vdup.8          d0, d31[0]              ;first_pass filter (d0 d1)
+    vld1.u8         {d11, d12, d13}, [r0], r1
+
+    vdup.8          d1, d31[4]
+
+;First Pass: output_height lines x output_width columns (17x16)
+filt_blk2d_fp16x16_loop_neon
+    pld             [r0]
+    pld             [r0, r1]
+    pld             [r0, r1, lsl #1]
+
+    vmull.u8        q7, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q8, d3, d0
+    vmull.u8        q9, d5, d0
+    vmull.u8        q10, d6, d0
+    vmull.u8        q11, d8, d0
+    vmull.u8        q12, d9, d0
+    vmull.u8        q13, d11, d0
+    vmull.u8        q14, d12, d0
+
+    vext.8          d2, d2, d3, #1          ;construct src_ptr[1]
+    vext.8          d5, d5, d6, #1
+    vext.8          d8, d8, d9, #1
+    vext.8          d11, d11, d12, #1
+
+    vmlal.u8        q7, d2, d1              ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q9, d5, d1
+    vmlal.u8        q11, d8, d1
+    vmlal.u8        q13, d11, d1
+
+    vext.8          d3, d3, d4, #1
+    vext.8          d6, d6, d7, #1
+    vext.8          d9, d9, d10, #1
+    vext.8          d12, d12, d13, #1
+
+    vmlal.u8        q8, d3, d1              ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q10, d6, d1
+    vmlal.u8        q12, d9, d1
+    vmlal.u8        q14, d12, d1
+
+    subs            r2, r2, #1
+
+    vqrshrn.u16    d14, q7, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d15, q8, #7
+    vqrshrn.u16    d16, q9, #7
+    vqrshrn.u16    d17, q10, #7
+    vqrshrn.u16    d18, q11, #7
+    vqrshrn.u16    d19, q12, #7
+    vqrshrn.u16    d20, q13, #7
+
+    vld1.u8         {d2, d3, d4}, [r0], r1      ;load src data
+    vqrshrn.u16    d21, q14, #7
+    vld1.u8         {d5, d6, d7}, [r0], r1
+
+    vst1.u8         {d14, d15, d16, d17}, [lr]!     ;store result
+    vld1.u8         {d8, d9, d10}, [r0], r1
+    vst1.u8         {d18, d19, d20, d21}, [lr]!
+    vld1.u8         {d11, d12, d13}, [r0], r1
+
+    bne             filt_blk2d_fp16x16_loop_neon
+
+;First-pass filtering for rest 5 lines
+    vld1.u8         {d14, d15, d16}, [r0], r1
+
+    vmull.u8        q9, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q10, d3, d0
+    vmull.u8        q11, d5, d0
+    vmull.u8        q12, d6, d0
+    vmull.u8        q13, d8, d0
+    vmull.u8        q14, d9, d0
+
+    vext.8          d2, d2, d3, #1          ;construct src_ptr[1]
+    vext.8          d5, d5, d6, #1
+    vext.8          d8, d8, d9, #1
+
+    vmlal.u8        q9, d2, d1              ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q11, d5, d1
+    vmlal.u8        q13, d8, d1
+
+    vext.8          d3, d3, d4, #1
+    vext.8          d6, d6, d7, #1
+    vext.8          d9, d9, d10, #1
+
+    vmlal.u8        q10, d3, d1             ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q12, d6, d1
+    vmlal.u8        q14, d9, d1
+
+    vmull.u8        q1, d11, d0
+    vmull.u8        q2, d12, d0
+    vmull.u8        q3, d14, d0
+    vmull.u8        q4, d15, d0
+
+    vext.8          d11, d11, d12, #1       ;construct src_ptr[1]
+    vext.8          d14, d14, d15, #1
+
+    vmlal.u8        q1, d11, d1             ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q3, d14, d1
+
+    vext.8          d12, d12, d13, #1
+    vext.8          d15, d15, d16, #1
+
+    vmlal.u8        q2, d12, d1             ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q4, d15, d1
+
+    vqrshrn.u16    d10, q9, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d11, q10, #7
+    vqrshrn.u16    d12, q11, #7
+    vqrshrn.u16    d13, q12, #7
+    vqrshrn.u16    d14, q13, #7
+    vqrshrn.u16    d15, q14, #7
+    vqrshrn.u16    d16, q1, #7
+    vqrshrn.u16    d17, q2, #7
+    vqrshrn.u16    d18, q3, #7
+    vqrshrn.u16    d19, q4, #7
+
+    vst1.u8         {d10, d11, d12, d13}, [lr]!         ;store result
+    vst1.u8         {d14, d15, d16, d17}, [lr]!
+    vst1.u8         {d18, d19}, [lr]!
+
+;Second pass: 16x16
+;secondpass_filter
+    add             r3, r12, r3, lsl #3
+    sub             lr, lr, #272
+
+    vld1.u32        {d31}, [r3]             ;load second_pass filter
+
+    vld1.u8         {d22, d23}, [lr]!       ;load src data
+
+    vdup.8          d0, d31[0]              ;second_pass filter parameters (d0 d1)
+    vdup.8          d1, d31[4]
+    mov             r12, #4                 ;loop counter
+
+filt_blk2d_sp16x16_loop_neon
+    vld1.u8         {d24, d25}, [lr]!
+    vmull.u8        q1, d22, d0             ;(src_ptr[0] * vp8_filter[0])
+    vld1.u8         {d26, d27}, [lr]!
+    vmull.u8        q2, d23, d0
+    vld1.u8         {d28, d29}, [lr]!
+    vmull.u8        q3, d24, d0
+    vld1.u8         {d30, d31}, [lr]!
+
+    vmull.u8        q4, d25, d0
+    vmull.u8        q5, d26, d0
+    vmull.u8        q6, d27, d0
+    vmull.u8        q7, d28, d0
+    vmull.u8        q8, d29, d0
+
+    vmlal.u8        q1, d24, d1             ;(src_ptr[pixel_step] * vp8_filter[1])
+    vmlal.u8        q2, d25, d1
+    vmlal.u8        q3, d26, d1
+    vmlal.u8        q4, d27, d1
+    vmlal.u8        q5, d28, d1
+    vmlal.u8        q6, d29, d1
+    vmlal.u8        q7, d30, d1
+    vmlal.u8        q8, d31, d1
+
+    subs            r12, r12, #1
+
+    vqrshrn.u16    d2, q1, #7               ;shift/round/saturate to u8
+    vqrshrn.u16    d3, q2, #7
+    vqrshrn.u16    d4, q3, #7
+    vqrshrn.u16    d5, q4, #7
+    vqrshrn.u16    d6, q5, #7
+    vqrshrn.u16    d7, q6, #7
+    vqrshrn.u16    d8, q7, #7
+    vqrshrn.u16    d9, q8, #7
+
+    vst1.u8         {d2, d3}, [r4], r5      ;store result
+    vst1.u8         {d4, d5}, [r4], r5
+    vst1.u8         {d6, d7}, [r4], r5
+    vmov            q11, q15
+    vst1.u8         {d8, d9}, [r4], r5
+
+    bne             filt_blk2d_sp16x16_loop_neon
+
+    add             sp, sp, #272
+
+    pop             {r4-r5,pc}
+
+;--------------------
+firstpass_bfilter16x16_only
+    mov             r2, #4                      ;loop counter
+    vdup.8          d0, d31[0]                  ;first_pass filter (d0 d1)
+    vdup.8          d1, d31[4]
+
+;First Pass: output_height lines x output_width columns (16x16)
+filt_blk2d_fpo16x16_loop_neon
+    vld1.u8         {d2, d3, d4}, [r0], r1      ;load src data
+    vld1.u8         {d5, d6, d7}, [r0], r1
+    vld1.u8         {d8, d9, d10}, [r0], r1
+    vld1.u8         {d11, d12, d13}, [r0], r1
+
+    pld             [r0]
+    pld             [r0, r1]
+    pld             [r0, r1, lsl #1]
+
+    vmull.u8        q7, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q8, d3, d0
+    vmull.u8        q9, d5, d0
+    vmull.u8        q10, d6, d0
+    vmull.u8        q11, d8, d0
+    vmull.u8        q12, d9, d0
+    vmull.u8        q13, d11, d0
+    vmull.u8        q14, d12, d0
+
+    vext.8          d2, d2, d3, #1          ;construct src_ptr[1]
+    vext.8          d5, d5, d6, #1
+    vext.8          d8, d8, d9, #1
+    vext.8          d11, d11, d12, #1
+
+    vmlal.u8        q7, d2, d1              ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q9, d5, d1
+    vmlal.u8        q11, d8, d1
+    vmlal.u8        q13, d11, d1
+
+    vext.8          d3, d3, d4, #1
+    vext.8          d6, d6, d7, #1
+    vext.8          d9, d9, d10, #1
+    vext.8          d12, d12, d13, #1
+
+    vmlal.u8        q8, d3, d1              ;(src_ptr[0] * vp8_filter[1])
+    vmlal.u8        q10, d6, d1
+    vmlal.u8        q12, d9, d1
+    vmlal.u8        q14, d12, d1
+
+    subs            r2, r2, #1
+
+    vqrshrn.u16    d14, q7, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d15, q8, #7
+    vqrshrn.u16    d16, q9, #7
+    vqrshrn.u16    d17, q10, #7
+    vqrshrn.u16    d18, q11, #7
+    vqrshrn.u16    d19, q12, #7
+    vqrshrn.u16    d20, q13, #7
+    vst1.u8         {d14, d15}, [r4], r5        ;store result
+    vqrshrn.u16    d21, q14, #7
+
+    vst1.u8         {d16, d17}, [r4], r5
+    vst1.u8         {d18, d19}, [r4], r5
+    vst1.u8         {d20, d21}, [r4], r5
+
+    bne             filt_blk2d_fpo16x16_loop_neon
+    pop             {r4-r5,pc}
+
+;---------------------
+secondpass_bfilter16x16_only
+;Second pass: 16x16
+;secondpass_filter
+    add             r3, r12, r3, lsl #3
+    mov             r12, #4                     ;loop counter
+    vld1.u32        {d31}, [r3]                 ;load second_pass filter
+    vld1.u8         {d22, d23}, [r0], r1        ;load src data
+
+    vdup.8          d0, d31[0]                  ;second_pass filter parameters (d0 d1)
+    vdup.8          d1, d31[4]
+
+filt_blk2d_spo16x16_loop_neon
+    vld1.u8         {d24, d25}, [r0], r1
+    vmull.u8        q1, d22, d0             ;(src_ptr[0] * vp8_filter[0])
+    vld1.u8         {d26, d27}, [r0], r1
+    vmull.u8        q2, d23, d0
+    vld1.u8         {d28, d29}, [r0], r1
+    vmull.u8        q3, d24, d0
+    vld1.u8         {d30, d31}, [r0], r1
+
+    vmull.u8        q4, d25, d0
+    vmull.u8        q5, d26, d0
+    vmull.u8        q6, d27, d0
+    vmull.u8        q7, d28, d0
+    vmull.u8        q8, d29, d0
+
+    vmlal.u8        q1, d24, d1             ;(src_ptr[pixel_step] * vp8_filter[1])
+    vmlal.u8        q2, d25, d1
+    vmlal.u8        q3, d26, d1
+    vmlal.u8        q4, d27, d1
+    vmlal.u8        q5, d28, d1
+    vmlal.u8        q6, d29, d1
+    vmlal.u8        q7, d30, d1
+    vmlal.u8        q8, d31, d1
+
+    vqrshrn.u16    d2, q1, #7               ;shift/round/saturate to u8
+    vqrshrn.u16    d3, q2, #7
+    vqrshrn.u16    d4, q3, #7
+    vqrshrn.u16    d5, q4, #7
+    vqrshrn.u16    d6, q5, #7
+    vqrshrn.u16    d7, q6, #7
+    vqrshrn.u16    d8, q7, #7
+    vqrshrn.u16    d9, q8, #7
+
+    vst1.u8         {d2, d3}, [r4], r5      ;store result
+    subs            r12, r12, #1
+    vst1.u8         {d4, d5}, [r4], r5
+    vmov            q11, q15
+    vst1.u8         {d6, d7}, [r4], r5
+    vst1.u8         {d8, d9}, [r4], r5
+
+    bne             filt_blk2d_spo16x16_loop_neon
+    pop             {r4-r5,pc}
+
+    ENDP
+
+;-----------------
+    AREA    bifilters16_dat, DATA, READWRITE            ;read/write by default
+;Data section with name data_area is specified. DCD reserves space in memory for 48 data.
+;One word each is reserved. Label filter_coeff can be used to access the data.
+;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ...
+_bifilter16_coeff_
+    DCD     bifilter16_coeff
+bifilter16_coeff
+    DCD     128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict4x4_neon.asm
@@ -0,0 +1,135 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_bilinear_predict4x4_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char  *src_ptr,
+; r1    int  src_pixels_per_line,
+; r2    int  xoffset,
+; r3    int  yoffset,
+; r4    unsigned char *dst_ptr,
+; stack(lr) int  dst_pitch
+
+|vp8_bilinear_predict4x4_neon| PROC
+    push            {r4, lr}
+
+    ldr             r12, _bifilter4_coeff_
+    ldr             r4, [sp, #8]            ;load parameters from stack
+    ldr             lr, [sp, #12]           ;load parameters from stack
+
+    cmp             r2, #0                  ;skip first_pass filter if xoffset=0
+    beq             skip_firstpass_filter
+
+;First pass: output_height lines x output_width columns (5x4)
+    vld1.u8         {d2}, [r0], r1          ;load src data
+    add             r2, r12, r2, lsl #3     ;calculate Hfilter location (2coeffsx4bytes=8bytes)
+
+    vld1.u8         {d3}, [r0], r1
+    vld1.u32        {d31}, [r2]             ;first_pass filter
+
+    vld1.u8         {d4}, [r0], r1
+    vdup.8          d0, d31[0]              ;first_pass filter (d0-d1)
+    vld1.u8         {d5}, [r0], r1
+    vdup.8          d1, d31[4]
+    vld1.u8         {d6}, [r0], r1
+
+    vshr.u64        q4, q1, #8              ;construct src_ptr[1]
+    vshr.u64        q5, q2, #8
+    vshr.u64        d12, d6, #8
+
+    vzip.32         d2, d3                  ;put 2-line data in 1 register (src_ptr[0])
+    vzip.32         d4, d5
+    vzip.32         d8, d9                  ;put 2-line data in 1 register (src_ptr[1])
+    vzip.32         d10, d11
+
+    vmull.u8        q7, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q8, d4, d0
+    vmull.u8        q9, d6, d0
+
+    vmlal.u8        q7, d8, d1              ;(src_ptr[1] * vp8_filter[1])
+    vmlal.u8        q8, d10, d1
+    vmlal.u8        q9, d12, d1
+
+    vqrshrn.u16    d28, q7, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d29, q8, #7
+    vqrshrn.u16    d30, q9, #7
+
+;Second pass: 4x4
+secondpass_filter
+    cmp             r3, #0                  ;skip second_pass filter if yoffset=0
+    beq             skip_secondpass_filter
+
+    add             r3, r12, r3, lsl #3 ;calculate Vfilter location
+    vld1.u32        {d31}, [r3]         ;load second_pass filter
+
+    vdup.8          d0, d31[0]              ;second_pass filter parameters (d0-d5)
+    vdup.8          d1, d31[4]
+
+    vmull.u8        q1, d28, d0
+    vmull.u8        q2, d29, d0
+
+    vext.8          d26, d28, d29, #4       ;construct src_ptr[pixel_step]
+    vext.8          d27, d29, d30, #4
+
+    vmlal.u8        q1, d26, d1
+    vmlal.u8        q2, d27, d1
+
+    add             r0, r4, lr
+    add             r1, r0, lr
+    add             r2, r1, lr
+
+    vqrshrn.u16    d2, q1, #7               ;shift/round/saturate to u8
+    vqrshrn.u16    d3, q2, #7
+
+    vst1.32         {d2[0]}, [r4]           ;store result
+    vst1.32         {d2[1]}, [r0]
+    vst1.32         {d3[0]}, [r1]
+    vst1.32         {d3[1]}, [r2]
+
+    pop             {r4, pc}
+
+;--------------------
+skip_firstpass_filter
+
+    vld1.32         {d28[0]}, [r0], r1      ;load src data
+    vld1.32         {d28[1]}, [r0], r1
+    vld1.32         {d29[0]}, [r0], r1
+    vld1.32         {d29[1]}, [r0], r1
+    vld1.32         {d30[0]}, [r0], r1
+
+    b               secondpass_filter
+
+;---------------------
+skip_secondpass_filter
+    vst1.32         {d28[0]}, [r4], lr      ;store result
+    vst1.32         {d28[1]}, [r4], lr
+    vst1.32         {d29[0]}, [r4], lr
+    vst1.32         {d29[1]}, [r4], lr
+
+    pop             {r4, pc}
+
+    ENDP
+
+;-----------------
+    AREA    bilinearfilters4_dat, DATA, READWRITE           ;read/write by default
+;Data section with name data_area is specified. DCD reserves space in memory for 48 data.
+;One word each is reserved. Label filter_coeff can be used to access the data.
+;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ...
+_bifilter4_coeff_
+    DCD     bifilter4_coeff
+bifilter4_coeff
+    DCD     128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict8x4_neon.asm
@@ -0,0 +1,140 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_bilinear_predict8x4_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char  *src_ptr,
+; r1    int  src_pixels_per_line,
+; r2    int  xoffset,
+; r3    int  yoffset,
+; r4    unsigned char *dst_ptr,
+; stack(lr) int  dst_pitch
+
+|vp8_bilinear_predict8x4_neon| PROC
+    push            {r4, lr}
+
+    ldr             r12, _bifilter8x4_coeff_
+    ldr             r4, [sp, #8]            ;load parameters from stack
+    ldr             lr, [sp, #12]           ;load parameters from stack
+
+    cmp             r2, #0                  ;skip first_pass filter if xoffset=0
+    beq             skip_firstpass_filter
+
+;First pass: output_height lines x output_width columns (5x8)
+    add             r2, r12, r2, lsl #3     ;calculate filter location
+
+    vld1.u8         {q1}, [r0], r1          ;load src data
+    vld1.u32        {d31}, [r2]             ;load first_pass filter
+    vld1.u8         {q2}, [r0], r1
+    vdup.8          d0, d31[0]              ;first_pass filter (d0 d1)
+    vld1.u8         {q3}, [r0], r1
+    vdup.8          d1, d31[4]
+    vld1.u8         {q4}, [r0], r1
+
+    vmull.u8        q6, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vld1.u8         {q5}, [r0], r1
+    vmull.u8        q7, d4, d0
+    vmull.u8        q8, d6, d0
+    vmull.u8        q9, d8, d0
+    vmull.u8        q10, d10, d0
+
+    vext.8          d3, d2, d3, #1          ;construct src_ptr[-1]
+    vext.8          d5, d4, d5, #1
+    vext.8          d7, d6, d7, #1
+    vext.8          d9, d8, d9, #1
+    vext.8          d11, d10, d11, #1
+
+    vmlal.u8        q6, d3, d1              ;(src_ptr[1] * vp8_filter[1])
+    vmlal.u8        q7, d5, d1
+    vmlal.u8        q8, d7, d1
+    vmlal.u8        q9, d9, d1
+    vmlal.u8        q10, d11, d1
+
+    vqrshrn.u16    d22, q6, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d23, q7, #7
+    vqrshrn.u16    d24, q8, #7
+    vqrshrn.u16    d25, q9, #7
+    vqrshrn.u16    d26, q10, #7
+
+;Second pass: 4x8
+secondpass_filter
+    cmp             r3, #0                  ;skip second_pass filter if yoffset=0
+    beq             skip_secondpass_filter
+
+    add             r3, r12, r3, lsl #3
+    add             r0, r4, lr
+
+    vld1.u32        {d31}, [r3]             ;load second_pass filter
+    add             r1, r0, lr
+
+    vdup.8          d0, d31[0]              ;second_pass filter parameters (d0 d1)
+    vdup.8          d1, d31[4]
+
+    vmull.u8        q1, d22, d0             ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q2, d23, d0
+    vmull.u8        q3, d24, d0
+    vmull.u8        q4, d25, d0
+
+    vmlal.u8        q1, d23, d1             ;(src_ptr[pixel_step] * vp8_filter[1])
+    vmlal.u8        q2, d24, d1
+    vmlal.u8        q3, d25, d1
+    vmlal.u8        q4, d26, d1
+
+    add             r2, r1, lr
+
+    vqrshrn.u16    d2, q1, #7               ;shift/round/saturate to u8
+    vqrshrn.u16    d3, q2, #7
+    vqrshrn.u16    d4, q3, #7
+    vqrshrn.u16    d5, q4, #7
+
+    vst1.u8         {d2}, [r4]              ;store result
+    vst1.u8         {d3}, [r0]
+    vst1.u8         {d4}, [r1]
+    vst1.u8         {d5}, [r2]
+
+    pop             {r4, pc}
+
+;--------------------
+skip_firstpass_filter
+    vld1.u8         {d22}, [r0], r1         ;load src data
+    vld1.u8         {d23}, [r0], r1
+    vld1.u8         {d24}, [r0], r1
+    vld1.u8         {d25}, [r0], r1
+    vld1.u8         {d26}, [r0], r1
+
+    b               secondpass_filter
+
+;---------------------
+skip_secondpass_filter
+    vst1.u8         {d22}, [r4], lr         ;store result
+    vst1.u8         {d23}, [r4], lr
+    vst1.u8         {d24}, [r4], lr
+    vst1.u8         {d25}, [r4], lr
+
+    pop             {r4, pc}
+
+    ENDP
+
+;-----------------
+    AREA    bifilters8x4_dat, DATA, READWRITE           ;read/write by default
+;Data section with name data_area is specified. DCD reserves space in memory for 48 data.
+;One word each is reserved. Label filter_coeff can be used to access the data.
+;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ...
+_bifilter8x4_coeff_
+    DCD     bifilter8x4_coeff
+bifilter8x4_coeff
+    DCD     128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/bilinearpredict8x8_neon.asm
@@ -0,0 +1,188 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_bilinear_predict8x8_neon|
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char  *src_ptr,
+; r1    int  src_pixels_per_line,
+; r2    int  xoffset,
+; r3    int  yoffset,
+; r4    unsigned char *dst_ptr,
+; stack(lr) int  dst_pitch
+
+|vp8_bilinear_predict8x8_neon| PROC
+    push            {r4, lr}
+
+    ldr             r12, _bifilter8_coeff_
+    ldr             r4, [sp, #8]            ;load parameters from stack
+    ldr             lr, [sp, #12]           ;load parameters from stack
+
+    cmp             r2, #0                  ;skip first_pass filter if xoffset=0
+    beq             skip_firstpass_filter
+
+;First pass: output_height lines x output_width columns (9x8)
+    add             r2, r12, r2, lsl #3     ;calculate filter location
+
+    vld1.u8         {q1}, [r0], r1          ;load src data
+    vld1.u32        {d31}, [r2]             ;load first_pass filter
+    vld1.u8         {q2}, [r0], r1
+    vdup.8          d0, d31[0]              ;first_pass filter (d0 d1)
+    vld1.u8         {q3}, [r0], r1
+    vdup.8          d1, d31[4]
+    vld1.u8         {q4}, [r0], r1
+
+    vmull.u8        q6, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q7, d4, d0
+    vmull.u8        q8, d6, d0
+    vmull.u8        q9, d8, d0
+
+    vext.8          d3, d2, d3, #1          ;construct src_ptr[-1]
+    vext.8          d5, d4, d5, #1
+    vext.8          d7, d6, d7, #1
+    vext.8          d9, d8, d9, #1
+
+    vmlal.u8        q6, d3, d1              ;(src_ptr[1] * vp8_filter[1])
+    vmlal.u8        q7, d5, d1
+    vmlal.u8        q8, d7, d1
+    vmlal.u8        q9, d9, d1
+
+    vld1.u8         {q1}, [r0], r1          ;load src data
+    vqrshrn.u16    d22, q6, #7              ;shift/round/saturate to u8
+    vld1.u8         {q2}, [r0], r1
+    vqrshrn.u16    d23, q7, #7
+    vld1.u8         {q3}, [r0], r1
+    vqrshrn.u16    d24, q8, #7
+    vld1.u8         {q4}, [r0], r1
+    vqrshrn.u16    d25, q9, #7
+
+    ;first_pass filtering on the rest 5-line data
+    vld1.u8         {q5}, [r0], r1
+
+    vmull.u8        q6, d2, d0              ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q7, d4, d0
+    vmull.u8        q8, d6, d0
+    vmull.u8        q9, d8, d0
+    vmull.u8        q10, d10, d0
+
+    vext.8          d3, d2, d3, #1          ;construct src_ptr[-1]
+    vext.8          d5, d4, d5, #1
+    vext.8          d7, d6, d7, #1
+    vext.8          d9, d8, d9, #1
+    vext.8          d11, d10, d11, #1
+
+    vmlal.u8        q6, d3, d1              ;(src_ptr[1] * vp8_filter[1])
+    vmlal.u8        q7, d5, d1
+    vmlal.u8        q8, d7, d1
+    vmlal.u8        q9, d9, d1
+    vmlal.u8        q10, d11, d1
+
+    vqrshrn.u16    d26, q6, #7              ;shift/round/saturate to u8
+    vqrshrn.u16    d27, q7, #7
+    vqrshrn.u16    d28, q8, #7
+    vqrshrn.u16    d29, q9, #7
+    vqrshrn.u16    d30, q10, #7
+
+;Second pass: 8x8
+secondpass_filter
+    cmp             r3, #0                  ;skip second_pass filter if yoffset=0
+    beq             skip_secondpass_filter
+
+    add             r3, r12, r3, lsl #3
+    add             r0, r4, lr
+
+    vld1.u32        {d31}, [r3]             ;load second_pass filter
+    add             r1, r0, lr
+
+    vdup.8          d0, d31[0]              ;second_pass filter parameters (d0 d1)
+    vdup.8          d1, d31[4]
+
+    vmull.u8        q1, d22, d0             ;(src_ptr[0] * vp8_filter[0])
+    vmull.u8        q2, d23, d0
+    vmull.u8        q3, d24, d0
+    vmull.u8        q4, d25, d0
+    vmull.u8        q5, d26, d0
+    vmull.u8        q6, d27, d0
+    vmull.u8        q7, d28, d0
+    vmull.u8        q8, d29, d0
+
+    vmlal.u8        q1, d23, d1             ;(src_ptr[pixel_step] * vp8_filter[1])
+    vmlal.u8        q2, d24, d1
+    vmlal.u8        q3, d25, d1
+    vmlal.u8        q4, d26, d1
+    vmlal.u8        q5, d27, d1
+    vmlal.u8        q6, d28, d1
+    vmlal.u8        q7, d29, d1
+    vmlal.u8        q8, d30, d1
+
+    vqrshrn.u16    d2, q1, #7               ;shift/round/saturate to u8
+    vqrshrn.u16    d3, q2, #7
+    vqrshrn.u16    d4, q3, #7
+    vqrshrn.u16    d5, q4, #7
+    vqrshrn.u16    d6, q5, #7
+    vqrshrn.u16    d7, q6, #7
+    vqrshrn.u16    d8, q7, #7
+    vqrshrn.u16    d9, q8, #7
+
+    vst1.u8         {d2}, [r4]              ;store result
+    vst1.u8         {d3}, [r0]
+    vst1.u8         {d4}, [r1], lr
+    vst1.u8         {d5}, [r1], lr
+    vst1.u8         {d6}, [r1], lr
+    vst1.u8         {d7}, [r1], lr
+    vst1.u8         {d8}, [r1], lr
+    vst1.u8         {d9}, [r1], lr
+
+    pop             {r4, pc}
+
+;--------------------
+skip_firstpass_filter
+    vld1.u8         {d22}, [r0], r1         ;load src data
+    vld1.u8         {d23}, [r0], r1
+    vld1.u8         {d24}, [r0], r1
+    vld1.u8         {d25}, [r0], r1
+    vld1.u8         {d26}, [r0], r1
+    vld1.u8         {d27}, [r0], r1
+    vld1.u8         {d28}, [r0], r1
+    vld1.u8         {d29}, [r0], r1
+    vld1.u8         {d30}, [r0], r1
+
+    b               secondpass_filter
+
+;---------------------
+skip_secondpass_filter
+    vst1.u8         {d22}, [r4], lr         ;store result
+    vst1.u8         {d23}, [r4], lr
+    vst1.u8         {d24}, [r4], lr
+    vst1.u8         {d25}, [r4], lr
+    vst1.u8         {d26}, [r4], lr
+    vst1.u8         {d27}, [r4], lr
+    vst1.u8         {d28}, [r4], lr
+    vst1.u8         {d29}, [r4], lr
+
+    pop             {r4, pc}
+
+    ENDP
+
+;-----------------
+    AREA    bifilters8_dat, DATA, READWRITE         ;read/write by default
+;Data section with name data_area is specified. DCD reserves space in memory for 48 data.
+;One word each is reserved. Label filter_coeff can be used to access the data.
+;Data address: filter_coeff, filter_coeff+4, filter_coeff+8 ...
+_bifilter8_coeff_
+    DCD     bifilter8_coeff
+bifilter8_coeff
+    DCD     128, 0, 112, 16, 96, 32, 80, 48, 64, 64, 48, 80, 32, 96, 16, 112
+
+    END
new file mode 100644
--- /dev/null
+++ b/media/libvpx/vp8/common/arm/neon/buildintrapredictorsmby_neon.asm
@@ -0,0 +1,584 @@
+;
+;  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+;
+;  Use of this source code is governed by a BSD-style license
+;  that can be found in the LICENSE file in the root of the source
+;  tree. An additional intellectual property rights grant can be found
+;  in the file PATENTS.  All contributing project authors may
+;  be found in the AUTHORS file in the root of the source tree.
+;
+
+
+    EXPORT  |vp8_build_intra_predictors_mby_neon_func|
+    EXPORT  |vp8_build_intra_predictors_mby_s_neon_func|
+
+    ARM
+    REQUIRE8
+    PRESERVE8
+
+    AREA ||.text||, CODE, READONLY, ALIGN=2
+; r0    unsigned char *y_buffer
+; r1    unsigned char *ypred_ptr
+; r2    int y_stride
+; r3    int mode
+; stack int Up
+; stack int Left
+
+|vp8_build_intra_predictors_mby_neon_func| PROC
+    push            {r4-r8, lr}
+
+    cmp             r3, #0
+    beq             case_dc_pred
+    cmp             r3, #1
+    beq             case_v_pred
+    cmp             r3, #2
+    beq             case_h_pred
+    cmp             r3, #3
+    beq             case_tm_pred
+
+case_dc_pred
+    ldr             r4, [sp, #24]       ; Up
+    ldr             r5, [sp, #28]       ; Left
+
+    ; Default the DC average to 128
+    mov             r12, #128
+    vdup.u8         q0, r12
+
+    ; Zero out running sum
+    mov             r12, #0
+
+    ; compute shift and jump
+    adds            r7, r4, r5
+    beq             skip_dc_pred_up_left
+
+    ; Load above row, if it exists
+    cmp             r4, #0
+    beq             skip_dc_pred_up
+
+    sub             r6, r0, r2
+    vld1.8          {q1}, [r6]
+    vpaddl.u8       q2, q1
+    vpaddl.u16      q3, q2
+    vpaddl.u32      q4, q3
+
+    vmov.32         r4, d8[0]
+    vmov.32         r6, d9[0]
+
+    add             r12, r4, r6
+
+    ; Move back to interger registers
+
+skip_dc_pred_up
+
+    cmp             r5, #0
+    beq             skip_dc_pred_left
+
+    sub             r0, r0, #1
+
+    ; Load left row, if it exists
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0]
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+skip_dc_pred_left
+    add             r7, r7, #3          ; Shift
+    sub             r4, r7, #1
+    mov             r5, #1
+    add             r12, r12, r5, lsl r4
+    mov             r5, r12, lsr r7     ; expected_dc
+
+    vdup.u8         q0, r5
+
+skip_dc_pred_up_left
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+
+    pop             {r4-r8,pc}
+case_v_pred
+    ; Copy down above row
+    sub             r6, r0, r2
+    vld1.8          {q0}, [r6]
+
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q0}, [r1]!
+    pop             {r4-r8,pc}
+
+case_h_pred
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u8         q0, r3
+    vdup.u8         q1, r4
+    vdup.u8         q2, r5
+    vdup.u8         q3, r6
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    pop             {r4-r8,pc}
+
+case_tm_pred
+    ; Load yabove_row
+    sub             r3, r0, r2
+    vld1.8          {q8}, [r3]
+
+    ; Load ytop_left
+    sub             r3, r3, #1
+    ldrb            r7, [r3]
+
+    vdup.u16        q7, r7
+
+    ; Compute yabove_row - ytop_left
+    mov             r3, #1
+    vdup.u8         q0, r3
+
+    vmull.u8        q4, d16, d0
+    vmull.u8        q5, d17, d0
+
+    vsub.s16        q4, q4, q7
+    vsub.s16        q5, q5, q7
+
+    ; Load 4x yleft_col
+    sub             r0, r0, #1
+    mov             r12, #4
+
+case_tm_pred_loop
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+    vdup.u16        q0, r3
+    vdup.u16        q1, r4
+    vdup.u16        q2, r5
+    vdup.u16        q3, r6
+
+    vqadd.s16       q8, q0, q4
+    vqadd.s16       q9, q0, q5
+
+    vqadd.s16       q10, q1, q4
+    vqadd.s16       q11, q1, q5
+
+    vqadd.s16       q12, q2, q4
+    vqadd.s16       q13, q2, q5
+
+    vqadd.s16       q14, q3, q4
+    vqadd.s16       q15, q3, q5
+
+    vqshrun.s16     d0, q8, #0
+    vqshrun.s16     d1, q9, #0
+
+    vqshrun.s16     d2, q10, #0
+    vqshrun.s16     d3, q11, #0
+
+    vqshrun.s16     d4, q12, #0
+    vqshrun.s16     d5, q13, #0
+
+    vqshrun.s16     d6, q14, #0
+    vqshrun.s16     d7, q15, #0
+
+    vst1.u8         {q0}, [r1]!
+    vst1.u8         {q1}, [r1]!
+    vst1.u8         {q2}, [r1]!
+    vst1.u8         {q3}, [r1]!
+
+    subs            r12, r12, #1
+    bne             case_tm_pred_loop
+
+    pop             {r4-r8,pc}
+
+    ENDP
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; r0    unsigned char *y_buffer
+; r1    unsigned char *ypred_ptr
+; r2    int y_stride
+; r3    int mode
+; stack int Up
+; stack int Left
+
+|vp8_build_intra_predictors_mby_s_neon_func| PROC
+    push            {r4-r8, lr}
+
+    mov             r1, r0      ;   unsigned char *ypred_ptr = x->dst.y_buffer; //x->Predictor;
+
+    cmp             r3, #0
+    beq             case_dc_pred_s
+    cmp             r3, #1
+    beq             case_v_pred_s
+    cmp             r3, #2
+    beq             case_h_pred_s
+    cmp             r3, #3
+    beq             case_tm_pred_s
+
+case_dc_pred_s
+    ldr             r4, [sp, #24]       ; Up
+    ldr             r5, [sp, #28]       ; Left
+
+    ; Default the DC average to 128
+    mov             r12, #128
+    vdup.u8         q0, r12
+
+    ; Zero out running sum
+    mov             r12, #0
+
+    ; compute shift and jump
+    adds            r7, r4, r5
+    beq             skip_dc_pred_up_left_s
+
+    ; Load above row, if it exists
+    cmp             r4, #0
+    beq             skip_dc_pred_up_s
+
+    sub             r6, r0, r2
+    vld1.8          {q1}, [r6]
+    vpaddl.u8       q2, q1
+    vpaddl.u16      q3, q2
+    vpaddl.u32      q4, q3
+
+    vmov.32         r4, d8[0]
+    vmov.32         r6, d9[0]
+
+    add             r12, r4, r6
+
+    ; Move back to interger registers
+
+skip_dc_pred_up_s
+
+    cmp             r5, #0
+    beq             skip_dc_pred_left_s
+
+    sub             r0, r0, #1
+
+    ; Load left row, if it exists
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0], r2
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+    ldrb            r3, [r0], r2
+    ldrb            r4, [r0], r2
+    ldrb            r5, [r0], r2
+    ldrb            r6, [r0]
+
+    add             r12, r12, r3
+    add             r12, r12, r4
+    add             r12, r12, r5
+    add             r12, r12, r6
+
+skip_dc_pred_left_s
+    add             r7, r7, #3          ; Shift
+    sub             r4, r7, #1
+    mov             r5, #1
+    add             r12, r12, r5, lsl r4
+    mov             r5, r12, lsr r7     ; expected_dc
+
+    vdup.u8         q0, r5
+
+skip_dc_pred_up_left_s
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+
+    pop             {r4-r8,pc}
+case_v_pred_s
+    ; Copy down above row
+    sub             r6, r0, r2
+    vld1.8          {q0}, [r6]
+
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8         {q0}, [r1], r2
+    vst1.u8