Merge TM -> JM
authorBrian Hackett <bhackett1024@gmail.com>
Tue, 19 Apr 2011 08:23:41 -0700
changeset 74959 1fe03044bfebcccb5322c8719e9049c74b25dadc
parent 74958 d78eef12a329730d699e6ad76cab7722acc1d3a1 (current diff)
parent 68573 96c70f2aae85d668b303403d3f085ae08195f3b9 (diff)
child 74960 fbcbc74151c16215a3a24d9377684e7b40a7e31a
push id2
push userbsmedberg@mozilla.com
push dateFri, 19 Aug 2011 14:38:13 +0000
milestone6.0a1
Merge TM -> JM
js/landbranch.pl
js/src/Makefile.in
js/src/Makefile.ref
js/src/config.mk
js/src/configure.in
js/src/jsapi.cpp
js/src/jsapi.h
js/src/jsarray.cpp
js/src/jsatom.cpp
js/src/jsclone.cpp
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jscntxtinlines.h
js/src/jscompartment.cpp
js/src/jscompartment.h
js/src/jsconfig.mk
js/src/jsdate.cpp
js/src/jsdbgapi.cpp
js/src/jsexn.cpp
js/src/jsfun.cpp
js/src/jsgc.cpp
js/src/jsgc.h
js/src/jsgcinlines.h
js/src/jshashtable.h
js/src/jsinferinlines.h
js/src/jsinterp.cpp
js/src/jsinterp.h
js/src/jsinterpinlines.h
js/src/jsiter.cpp
js/src/jsobj.cpp
js/src/jsobj.h
js/src/jsobjinlines.h
js/src/json.cpp
js/src/jsparse.cpp
js/src/jsparse.h
js/src/jspropertycache.cpp
js/src/jsproxy.cpp
js/src/jsscope.cpp
js/src/jsscopeinlines.h
js/src/jsscript.cpp
js/src/jsscript.h
js/src/jsscriptinlines.h
js/src/jsstr.cpp
js/src/jstracer.cpp
js/src/jstracer.h
js/src/jstypedarray.cpp
js/src/jsvalue.h
js/src/jsweakmap.cpp
js/src/methodjit/BaseAssembler.h
js/src/methodjit/Compiler.cpp
js/src/methodjit/FastArithmetic.cpp
js/src/methodjit/FastOps.cpp
js/src/methodjit/FrameState-inl.h
js/src/methodjit/InvokeHelpers.cpp
js/src/methodjit/MachineRegs.h
js/src/methodjit/MethodJIT.cpp
js/src/methodjit/MethodJIT.h
js/src/methodjit/MonoIC.h
js/src/methodjit/NunboxAssembler.h
js/src/methodjit/PolyIC.cpp
js/src/methodjit/PolyIC.h
js/src/methodjit/Retcon.cpp
js/src/methodjit/Retcon.h
js/src/methodjit/StubCalls.cpp
js/src/methodjit/TypedArrayIC.h
js/src/ref-config/AIX4.1.mk
js/src/ref-config/AIX4.2.mk
js/src/ref-config/AIX4.3.mk
js/src/ref-config/Darwin.mk
js/src/ref-config/Darwin1.3.mk
js/src/ref-config/Darwin1.4.mk
js/src/ref-config/Darwin5.2.mk
js/src/ref-config/Darwin5.3.mk
js/src/ref-config/Darwin64.mk
js/src/ref-config/HP-UXB.10.10.mk
js/src/ref-config/HP-UXB.10.20.mk
js/src/ref-config/HP-UXB.11.00.mk
js/src/ref-config/HP-UXB.11.31.mk
js/src/ref-config/IRIX.mk
js/src/ref-config/IRIX5.3.mk
js/src/ref-config/IRIX6.1.mk
js/src/ref-config/IRIX6.2.mk
js/src/ref-config/IRIX6.3.mk
js/src/ref-config/IRIX6.5.mk
js/src/ref-config/Linux_All.mk
js/src/ref-config/Mac_OS10.0.mk
js/src/ref-config/OSF1V4.0.mk
js/src/ref-config/OSF1V5.0.mk
js/src/ref-config/SunOS4.1.4.mk
js/src/ref-config/SunOS5.10.mk
js/src/ref-config/SunOS5.3.mk
js/src/ref-config/SunOS5.4.mk
js/src/ref-config/SunOS5.5.1.mk
js/src/ref-config/SunOS5.5.mk
js/src/ref-config/SunOS5.6.mk
js/src/ref-config/SunOS5.7.mk
js/src/ref-config/SunOS5.8.mk
js/src/ref-config/SunOS5.9.mk
js/src/ref-config/WINNT4.0.mk
js/src/ref-config/WINNT5.0.mk
js/src/ref-config/WINNT5.1.mk
js/src/ref-config/WINNT5.2.mk
js/src/ref-config/WINNT6.0.mk
js/src/ref-config/dgux.mk
js/src/rules.mk
js/src/shell/js.cpp
js/src/tests/js1_8_5/regress/jstests.list
js/src/tests/manifest.py
js/src/tracejit/Writer.h
js/src/xpconnect/shell/xpcshell.cpp
--- a/ipc/testshell/XPCShellEnvironment.cpp
+++ b/ipc/testshell/XPCShellEnvironment.cpp
@@ -665,17 +665,17 @@ ProcessFile(JSContext *cx,
         startline = lineno;
         do {
             if (!GetLine(bufp, file, startline == lineno ? "js> " : "")) {
                 hitEOF = JS_TRUE;
                 break;
             }
             bufp += strlen(bufp);
             lineno++;
-        } while (!JS_BufferIsCompilableUnit(cx, obj, buffer, strlen(buffer)));
+        } while (!JS_BufferIsCompilableUnit(cx, JS_FALSE, obj, buffer, strlen(buffer)));
 
         /* Clear any pending exception from previous failed compiles.  */
         JS_ClearPendingException(cx);
         scriptObj =
             JS_CompileScriptForPrincipals(cx, obj, env->GetPrincipal(), buffer,
                                           strlen(buffer), "typein", startline);
         if (scriptObj) {
             JSErrorReporter older;
deleted file mode 100755
--- a/js/landbranch.pl
+++ /dev/null
@@ -1,227 +0,0 @@
-#! /usr/local/bin/perl5
-
-use File::Path;
-
-# The development branch is where primary development and checkins
-# are done on a day-to-day basis.
-$development_branch_prefix = "SpiderMonkey140";
-
-# Space-separated list of CVS-controlled directories to tag/merge
-$merge_dirs =
-    "mozilla/js/src " ;
-
-# When line below uncommented, don't recurse into subdirs
-#$recurse_flag = '-l';
-
-#----------------------------------------------------------------------------
-
-# The merge branch is itself a branch off of the development branch
-# at a point where the development branch is thought to be stable.
-# (A branch is used rather than a static tag because, inevitably,
-# the development branch is not quite as stable/buildable as was
-# thought.)  The contents of the merge branch will be copied to
-# the trunk when merging takes place.
-
-
-# The following tags are created automatically by this script:
-#
-# JS_STABLE_DROP
-#
-#     A static tag on the development branch (or a branch off the
-#     development branch) that indicates the code that should be merged
-#     into the trunk.  This is a "variable" tag in the sense that it is
-#     redefined after each merge.
-#
-# JS_LAST_STABLE_DROP
-#
-#     A static tag that is a copy of what the JS_STABLE_DROP tag was in
-#     the previous merge cycle.  This is a "variable" tag that is
-#     redefined after each merge.  Changes in the branch can be merged
-#     to the trunk by using:
-#
-#         cvs up -jJS_LAST_STABLE_DROP -jJS_STABLE_DROP
-#
-# JS_LANDING
-#
-#     A static tag that identifies the code on the trunk after the merge
-#     from the branch to the trunk takes place.  This is a "variable"
-#     tag that is redefined after each merge.  Changes on the trunk
-#     since the last branch landing can be seen by using:
-#
-#         cvs diff -rJS_LANDING -rHEAD
-#
-# JS_LANDING_mmddyyyy
-#
-#     This is a tag on the trunk which may be used for archaeological
-#     purposes.  This tag is made from the JS_LANDING tag.
-
-
-$development_branch = $development_branch_prefix . "_BRANCH";
-$development_base = $development_branch_prefix . "_BASE";
-
-sub help {
-print <<"END";
-$0: A tool for merging stable snapshots of JavaScript from a CVS
-development branch onto the trunk
-
-Landing a snapshot of the development branch consists of
-the following steps:
-
-  1) Tag all/some files on the branch to identify files to be merged.
-  2) Merge files from the branch into the trunk using a temporary
-     working directory.
-  3) Resolve any conflicts that arise as a result of the merge.
-  4) Commit merged changes to the trunk.
-  5) Make changes to resolve (build) difficulties and re-commit.
-     Repeat as necessary.
-  6) Backpropagate changes on the trunk to the development branch.
-  
-This script will assist with steps #2, #4 and #6:
-
-  $0 -merge JS_STABLE_10131998
-  $0 -commit
-  $0 -backpatch
-  
-END
-}
-
-sub log {
-    local($msg) = @_;
-    print LOGFILE $msg if $logfile;
-}
-
-# Similar to die built-in
-sub die {
-    local($msg) = @_;
-    &log($msg);
-    chomp($msg);
-    if ($logfile) {
-	$msg .= "\nSee $logfile for details.";
-    }
-    die "$msg\n";
-}
-
-# Similar to system() built-in
-sub system {
-    local(@args) = @_;
-    local($cmd) = join(" ", @args);
-    &log("Executing: $cmd\n");
-
-    if ($logfile) {
-	$cmd .= " >> $logfile 2>&1";
-	close(LOGFILE);
-    }
-
-    local($result) = 0xffff & system($cmd);
-
-    if ($logfile) {
-	open(LOGFILE, ">>$logfile");
-    }
-
-    return unless ($result);
-    $msg = "Died while executing $cmd";
-
-    if ($result == 0xff00) {
-	&die("$msg\nWhile executExecution failed due to perl error: $!. ");
-    } else {
-	$result >>= 8;
-	&die("$msg\nExecution failed; exit status: $result. ");
-    }
-}
-
-chomp($root_dir = `pwd`);
-
-# Default log file
-$logfile = $root_dir . "/log";
-
-while ($#ARGV >=0) {
-    $_ = shift;
- 
-    if (/-merge/) {
-	$do_tag = 1;
-	$do_checkout = 1;
-	$do_merge = 1;
-	$tag = shift;
-    } elsif (/-commit/ || /-ci/) {
-	$do_commit = 1;
-    } elsif (/-backpatch/) {
-	$do_backpatch = 1;
-    } elsif (/-log/) {
-	$logfile = shift;
-    } elsif (/-tag/) { # Debugging option
-	$do_tag = 1;
-	$tag = shift;
-    } elsif (/-co/) {  # Debugging option
-	$do_checkout = 1;
-    } else {
-	print STDERR "Illegal option: $_\n" unless (/-h/);
-	&help();
-	exit(1);
-    }
-}
-
-die "You must set your CVSROOT environment variable" if !$ENV{"CVSROOT"};
-
-if ($logfile) {
-    open(LOGFILE, ">$logfile") || die "Couldn't open log file \"$logfile\"";
-    print("Logging to file \"$logfile\".\n");
-}
-
-$trunk_dir = $root_dir . "/trunk";
-
-if ($do_tag) {
-    if (!$tag) {
-	&die("Must specify tag on command-line\n");
-    }
-
-    print("Tagging tree with tag JS_STABLE_DROP.\n");
-    &system("cvs rtag $recurse_flag -F -r $tag JS_STABLE_DROP $merge_dirs");
-}
-
-if ($do_checkout) {
-
-    # Delete trunk subdir if it already exists
-    if (-d $trunk_dir) {
-	&log("Deleting directory $trunk_dir\n");
-	rmtree ($trunk_dir, 0, 1);
-    }
-    &log("Creating directory $trunk_dir\n");
-    mkdir($trunk_dir, 0777) || die "Couldn't create directory $trunk_dir";
-
-    # Check out on trunk
-    print("Checking out $merge_dirs.\n");
-    chdir $trunk_dir;
-    &system("cvs co $recurse_flag -A $merge_dirs");
-}
-
-if ($do_merge) {
-    chdir $trunk_dir;
-    print("Merging from JS_STABLE_DROP into trunk\n");
-    &system("cvs up -jJS_LAST_STABLE_DROP -jJS_STABLE_DROP");
-}
-
-if ($do_commit) {
-    &die("No merged tree found.  Wrong directory ?") if (!chdir $trunk_dir);
-
-    ($_,$_,$_,$day,$mon,$year,$_,$_) = localtime(time());
-    if ($year < 30) {
-	$year = "20" . $year;
-    } else {
-	$year = "19" . $year;
-    }
-
-    $mmddyyyy = sprintf("%02d%02d%s", $mon, $day, $year);
-
-    print("Checking in code on trunk");
-    &system("cvs ci -m 'Stable drop of JavaScript interpreter code from " .
-	    "$development_branch'");
-
-    # Tag merged result
-    &system("cvs tag -F JS_LANDING");
-    &system("cvs tag -F JS_LANDING_$mmddyyyy");
-
-    # Move JS_LAST_STABLE_DROP tag forward
-    &system("cvs tag -F -rJS_STABLE_DROP JS_LAST_STABLE_DROP");
-}
-    
-    
--- a/js/src/Makefile.in
+++ b/js/src/Makefile.in
@@ -120,16 +120,17 @@ ifdef JS_SHARED_LIBRARY
 FORCE_SHARED_LIB = 1
 endif
 FORCE_STATIC_LIB = 1
 DIST_INSTALL = 1
 
 VPATH		= $(srcdir)
 
 CPPSRCS		= \
+		jsalloc.cpp \
 		jsanalyze.cpp \
 		jsapi.cpp \
 		jsarena.cpp \
 		jsarray.cpp \
 		jsatom.cpp \
 		jsbool.cpp \
 		jsclone.cpp \
 		jscntxt.cpp \
@@ -168,28 +169,30 @@ CPPSRCS		= \
 		jsreflect.cpp \
 		jsregexp.cpp \
 		jsscan.cpp \
 		jsscope.cpp \
 		jsscript.cpp \
 		jsstr.cpp \
 		jstypedarray.cpp \
 		jsutil.cpp \
+		jsweakmap.cpp \
 		jswrapper.cpp \
 		jsxdrapi.cpp \
 		jsxml.cpp \
 		prmjtime.cpp \
 		sharkctl.cpp \
 		$(NULL)
 
 INSTALLED_HEADERS = \
 		js-config.h \
 		jsautocfg.h \
 		$(CURDIR)/jsautokw.h \
 		js.msg \
+		jsalloc.h \
 		jsanalyze.h \
 		jsapi.h \
 		jsarray.h \
 		jsarena.h \
 		jsatom.h \
 		jsbit.h \
 		jsbool.h \
 		jsclist.h \
@@ -249,16 +252,17 @@ INSTALLED_HEADERS = \
 		jshotloop.h \
 		jstypedarray.h \
 		jstypes.h \
 		jsutil.h \
 		jsvector.h \
 		jstl.h \
 		jshashtable.h \
 		jsversion.h \
+		jsweakmap.h \
 		jswrapper.h \
 		jsxdrapi.h \
 		jsxml.h \
 		jsval.h \
 		jsvalue.h \
 		prmjtime.h \
 		$(NULL)
 
@@ -363,16 +367,19 @@ else
 ifdef SOLARIS_SUNPRO_CXX
 ASFILES +=	TrampolineSUNWX86.s
 endif
 endif
 endif
 ifeq (arm, $(TARGET_CPU))
 #CPPSRCS		+= only_on_arm.cpp
 endif
+ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
+ASFILES +=	TrampolineSparc.s
+endif
 #
 # END enclude sources for the method JIT
 #############################################
 
 endif
 
 ###############################################
 # BEGIN include sources for V8 dtoa
@@ -388,17 +395,17 @@ CPPSRCS += 	checks.cc \
 		platform.cc \
 		utils.cc \
 		$(NONE)
 
 #
 # END enclude sources for V8 dtoa
 #############################################
 
-ifeq (,$(filter-out powerpc sparc,$(TARGET_CPU)))
+ifeq (,$(filter-out powerpc,$(TARGET_CPU)))
 
 VPATH +=	$(srcdir)/assembler \
 		$(srcdir)/assembler/wtf \
 		$(srcdir)/yarr/pcre \
 		$(NULL)
 
 CPPSRCS += 	pcre_compile.cpp \
                 pcre_exec.cpp \
@@ -610,35 +617,35 @@ check:: check-ooms
 endif
 
 ## Prevent regressing in our deprecation of non-preferred memory management functions.
 # We use all the files in the distribution so that different configurations
 # don't give different results. We skip the contents of objdirs using |find|
 # (it can't be done with %-expansion, because the files we want to skip aren't
 # in the vpath).
 ALL_FILES=$(shell find $(srcdir) \( -name "*.cpp" -o -name "*.h" \) -not -path "*/dist/*")
-check-malloc-function-usage: $(filter-out %jsutil.h %jscntxt.h, $(ALL_FILES))
+check-malloc-function-usage: $(filter-out %jsalloc.h %jscntxt.h %jsutil.h, $(ALL_FILES))
 
 	# js_malloc and friends are only used by other memory managers, and should
 	# never be used elsewhere directly.
 	$(srcdir)/config/check_source_count.py "\bjs_malloc\b" 0 \
 		"in Makefile.in" "cx->malloc_ or rt->malloc_" $^
 	$(srcdir)/config/check_source_count.py "\bjs_calloc\b" 0 \
 		"in Makefile.in" "cx->calloc_ or rt->calloc_" $^
 	$(srcdir)/config/check_source_count.py "\bjs_realloc\b" 0 \
 		"in Makefile.in" "cx->realloc_ or rt->realloc_" $^
 	$(srcdir)/config/check_source_count.py "\bjs_free\b" 0 \
 		"in Makefile.in" "cx->free_" $^
 
 	# We desire these numbers to go down, not up. See "User guide to memory
 	# management within SpiderMonkey" in jsutil.h.
-	$(srcdir)/config/check_source_count.py OffTheBooks:: 54 \
+	$(srcdir)/config/check_source_count.py OffTheBooks:: 53 \
 		"in Makefile.in" "{cx,rt}->{new_,new_array,malloc_,calloc_,realloc_}" $^
 	# This should go to zero, if possible.
-	$(srcdir)/config/check_source_count.py UnwantedForeground:: 35 \
+	$(srcdir)/config/check_source_count.py UnwantedForeground:: 34 \
 		"in Makefile.in" "{cx,rt}->{free_,delete_,array_delete}" $^
 
 ifneq ($(OS_ARCH),WINNT) # FIXME: this should be made work on Windows too.
 check:: check-malloc-function-usage
 endif
 
 JITFLAGS = ,m,j,mj,mjp,am,amj,amjp,amd,n,mn,jn,mjn,mjpn,amn,amjn,amjpn,amdn
 check-jit-test::
deleted file mode 100644
--- a/js/src/Makefile.ref
+++ /dev/null
@@ -1,468 +0,0 @@
-# -*- Mode: makefile -*-
-# vim: ft=make
-#
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1/GPL 2.0/LGPL 2.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-#
-# The Original Code is Mozilla Communicator client code, released
-# March 31, 1998.
-#
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1998
-# the Initial Developer. All Rights Reserved.
-#
-# Contributor(s):
-#   Michael Ang <mang@subcarrier.org>
-#   Kevin Buhr <buhr@stat.wisc.edu>
-#
-# Alternatively, the contents of this file may be used under the terms of
-# either of the GNU General Public License Version 2 or later (the "GPL"),
-# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-# in which case the provisions of the GPL or the LGPL are applicable instead
-# of those above. If you wish to allow use of your version of this file only
-# under the terms of either the GPL or the LGPL, and not to allow others to
-# use your version of this file under the terms of the MPL, indicate your
-# decision by deleting the provisions above and replace them with the notice
-# and other provisions required by the GPL or the LGPL. If you do not delete
-# the provisions above, a recipient may use your version of this file under
-# the terms of any one of the MPL, the GPL or the LGPL.
-#
-# ***** END LICENSE BLOCK *****
-
-#
-# JSRef GNUmake makefile.
-#
-# Note: dependency rules are missing for some files (some
-#       .h, all .msg, etc.)  Re-make clean if in doubt.
-#
-
-
-DEPTH = .
-
-include config.mk
-
-#NS_USE_NATIVE = 1
-
-ifndef NANOJIT_ARCH
-$(warning NANOJIT_ARCH not defined in config/$(OS_CONFIG).mk, JIT disabled)
-else
-ifdef DISABLE_JIT
-$(warning disabling JIT per build specification)
-else
-ENABLE_JIT=1
-endif
-endif
-
-ifdef ENABLE_JIT
-DEFINES += -DJS_TRACER
-DEFINES += -DFEATURE_NANOJIT
-INCLUDES += -Inanojit
-endif
-
-#ifndef BUILD_OPT
-#DEFINES += -Ivprof
-#endif
-
-# Look in OBJDIR to find jsautocfg.h, jsautokw.h, and js-config.h
-INCLUDES   += -I. -I$(OBJDIR)
-
-ifdef JS_THREADSAFE
-DEFINES += -DJS_THREADSAFE
-INCLUDES += -I$(DIST)/include/nspr
-ifdef USE_MSVC
-OTHER_LIBS += $(DIST)/lib/libnspr$(NSPR_LIBSUFFIX).lib
-else
-OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX)
-endif
-endif
-
-ifdef JS_HAS_CTYPES
-DEFINES += -DJS_HAS_CTYPES
-INCLUDES += -I$(DIST)/include/nspr
-ifdef USE_MSVC
-OTHER_LIBS += $(DIST)/lib/libnspr$(NSPR_LIBSUFFIX).lib
-else
-OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX)
-endif
-endif
-
-ifdef JS_NO_THIN_LOCKS
-DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
-endif
-
-ifdef JS_GC_ZEAL
-DEFINES += -DJS_GC_ZEAL
-endif
-
-#
-# XCFLAGS may be set in the environment or on the gmake command line
-#
-#CFLAGS += -DDEBUG -DDEBUG_brendan -DJS_ARENAMETER -DJS_HASHMETER -DJS_DUMP_PROPTREE_STATS -DJS_DUMP_SCOPE_METERS -DJS_SCOPE_DEPTH_METER -DJS_BASIC_STATS
-CFLAGS          += $(OS_CFLAGS) $(DEFINES) $(INCLUDES) $(XCFLAGS)
-
-LDFLAGS		= $(XLDFLAGS)
-LDFLAGS += $(OS_LDFLAGS)
-
-ifdef MOZ_SHARK
-DEFINES += -DMOZ_SHARK
-CFLAGS += -F/System/Library/PrivateFrameworks
-LDFLAGS += -F/System/Library/PrivateFrameworks -framework CHUD
-endif
-ifdef MOZ_CALLGRIND
-DEFINES += -DMOZ_CALLGRIND
-endif
-ifdef MOZ_VTUNE
-DEFINES += -DMOZ_VTUNE
-CXXFLAGS += -IC:/Program\ Files/Intel/VTune/Analyzer/Include
-OTHER_LIBS += C:/Program\ Files/Intel/VTune/Analyzer/Lib/VtuneApi.lib
-endif
-
-ifndef NO_LIBM
-LDFLAGS += -lm
-endif
-
-# Prevent floating point errors caused by VC++ optimizations
-ifeq ($(OS_ARCH),WINNT)
-_MSC_VER = $(shell $(CXX) 2>&1 | sed -n 's/.*Compiler Version \([0-9]*\)\.\([0-9]*\).*/\1\2/p')
-ifeq (,$(filter-out 1200 1300 1310,$(_MSC_VER)))
-CFLAGS += -Op
-else
-CFLAGS += -fp:precise
-endif
-endif # WINNT
-
-#
-#	Server-related changes :
-#
-ifdef NES40
-DEFINES += -DNES40
-endif
-
-#
-# Line editing support.
-# Define JS_READLINE or JS_EDITLINE to enable line editing in the
-# js command-line interpreter.
-#
-ifdef JS_READLINE
-# For those platforms with the readline library installed.
-DEFINES += -DEDITLINE
-PROG_LIBS += -lreadline -ltermcap
-else
-ifdef JS_EDITLINE
-# Use the editline library, built locally.
-PREDIRS += editline
-DEFINES += -DEDITLINE
-PROG_LIBS += $(OBJDIR)/editline/libedit.a
-endif
-endif
-
-# For purify
-PURE_CFLAGS     = -DXP_UNIX $(OPTIMIZER) $(PURE_OS_CFLAGS) $(DEFINES) \
-                  $(INCLUDES) $(XCFLAGS)
-
-#
-# JS file lists
-#
-JS_HFILES =		\
-	jsarray.h	\
-	jsatom.h	\
-	jsbool.h	\
-	jscntxt.h	\
-	jsdate.h	\
-	jsemit.h	\
-	jsexn.h		\
-	jsfun.h		\
-	jsgc.h		\
-	jsinterp.h	\
-	jsiter.h	\
-	jslibmath.h	\
-	jslock.h	\
-	jsmath.h	\
-	jsnum.h		\
-	jsobj.h		\
-	json.h		\
-	jsopcode.h	\
-	jsparse.h	\
-	jsarena.h	\
-	jsclist.h	\
-	jsdhash.h	\
-	jsdtoa.h	\
-	jshash.h	\
-	jslong.h	\
-	jstypes.h	\
-	jsprvtd.h	\
-	jspubtd.h	\
-	jsregexp.h	\
-	jsscan.h	\
-	jsscope.h	\
-	jsscript.h	\
-	jsstr.h		\
-	jsversion.h	\
-	jsxdrapi.h	\
-	jsxml.h		\
-	$(NULL)
-
-ifdef ENABLE_JIT
-JS_HFILES +=			\
-	jstracer.h		\
-	nanojit/Assembler.h     \
-	nanojit/LIR.h		\
-	nanojit/Native$(NANOJIT_ARCH).h	\
-	nanojit/avmplus.h	\
-	nanojit/vm_fops.h	\
-	nanojit/Fragmento.h	\
-	nanojit/Native.h	\
-	nanojit/RegAlloc.h	\
-	nanojit/nanojit.h	\
-	$(NULL)
-endif
-
-ifndef BUILD_OPT
-#JS_HFILES +=            \
-#        vprof/vprof.h   \
-#        $(NULL)
-endif
-
-API_HFILES =		\
-	jsapi.h		\
-	jsdbgapi.h	\
-	$(NULL)
-
-OTHER_HFILES =		\
-	jsbit.h		\
-	jscompat.h	\
-	jscpucfg.h	\
-	jsotypes.h	\
-	prmjtime.h	\
-	resource.h	\
-	jsopcode.tbl	\
-	jsproto.tbl     \
-	js.msg		\
-	jsshell.msg	\
-	jskeyword.tbl	\
-	$(NULL)
-
-ifndef PREBUILT_CPUCFG
-OTHER_HFILES += $(OBJDIR)/jsautocfg.h
-endif
-OTHER_HFILES += $(OBJDIR)/jsautokw.h $(OBJDIR)/js-config.h
-
-HFILES = $(JS_HFILES) $(API_HFILES) $(OTHER_HFILES)
-
-JS_CPPFILES =		\
-	jsapi.cpp	\
-	jsarena.cpp	\
-	jsarray.cpp	\
-	jsatom.cpp	\
-	jsbool.cpp	\
-	jscntxt.cpp	\
-	jsdate.cpp	\
-	jsdbgapi.cpp	\
-	jsdhash.cpp	\
-	jsdtoa.cpp	\
-	jsemit.cpp	\
-	jsexn.cpp	\
-	jsfun.cpp	\
-	jsgc.cpp	\
-	jshash.cpp	\
-	jsinterp.cpp	\
-	jsinvoke.cpp    \
-	jsiter.cpp	\
-	jslock.cpp	\
-	jslog2.cpp	\
-	jslong.cpp	\
-	jsmath.cpp	\
-	jsnum.cpp	\
-	jsobj.cpp	\
-	json.cpp	\
-	jsopcode.cpp	\
-	jsparse.cpp	\
-	jsprf.cpp	\
-	jsregexp.cpp	\
-	jsscan.cpp	\
-	jsscope.cpp	\
-	jsscript.cpp	\
-	jsstr.cpp	\
-	jsutil.cpp	\
-	jsxdrapi.cpp	\
-	jsxml.cpp	\
-	prmjtime.cpp	\
-	$(NULL)
-
-ifdef ENABLE_JIT
-JS_CPPFILES +=		       \
-	jsbuiltins.cpp         \
-	jstracer.cpp	       \
-	nanojit/Assembler.cpp  \
-	nanojit/Fragmento.cpp  \
-	nanojit/LIR.cpp        \
-	nanojit/Native$(NANOJIT_ARCH).cpp \
-	nanojit/RegAlloc.cpp   \
-	nanojit/avmplus.cpp    \
-	$(NULL)
-
-endif
-
-ifndef BUILD_OPT
-#JS_CPPFILES +=                 \
-#        vprof/vprof.cpp        \
-#        $(NULL)
-endif
-
-LIB_CPPFILES  = $(JS_CPPFILES)
-LIB_ASFILES := $(wildcard *_$(OS_ARCH).s)
-PROG_CPPFILES = js.cpp
-
-ifdef USE_MSVC
-LIBRARY = $(OBJDIR)/js32.lib
-SHARED_LIBRARY = $(OBJDIR)/js32.dll
-PROGRAM = $(OBJDIR)/js.exe
-else
-LIBRARY = $(OBJDIR)/libjs.a
-SHARED_LIBRARY = $(OBJDIR)/libjs.$(SO_SUFFIX)
-PROGRAM = $(OBJDIR)/js
-endif
-
-include rules.mk
-
-MOZ_DEPTH = ../..
-include jsconfig.mk
-
-nsinstall-target:
-	cd ../../config; $(MAKE) OBJDIR=$(OBJDIR) OBJDIR_NAME=$(OBJDIR)
-
-#
-# Automatic header generation
-#
-
-AUTO_HEADERS =					\
-	$(OBJDIR)/jsautokw.h			\
-	$(OBJDIR)/jsautooplen.h			\
-	$(NULL)
-
-$(OBJDIR)/jsautokw.h: jskeyword.tbl
-
-$(OBJDIR)/jsautooplen.h: jsopcode.tbl
-
-GARBAGE += $(AUTO_HEADERS)
-GARBAGE	+= $(AUTO_HEADERS:$(OBJDIR)/jsauto%.h=$(OBJDIR)/js%gen$(HOST_BIN_SUFFIX))
-
-ifdef USE_MSVC
-
-GARBAGE	+= $(AUTO_HEADERS:$(OBJDIR)/jsauto%.h=$(OBJDIR)/js%gen.obj)
-
-$(AUTO_HEADERS): $(OBJDIR)/jsauto%.h: js%gen.cpp
-	@$(MAKE_OBJDIR)
-	$(CXX) -Fo$(OBJDIR)/ -c $(CFLAGS) $(OPTIMIZER) $<
-	link.exe -out:"$(OBJDIR)/js$*gen$(HOST_BIN_SUFFIX)" $(EXE_LINK_FLAGS) $(OBJDIR)/js$*gen.obj
-	$(OBJDIR)/js$*gen$(HOST_BIN_SUFFIX) $@
-else
-
-GARBAGE	+= $(AUTO_HEADERS:$(OBJDIR)/jsauto%.h=$(OBJDIR)/js%gen.d)
-$(AUTO_HEADERS): $(OBJDIR)/jsauto%.h: js%gen.cpp
-	@$(MAKE_OBJDIR)
-	$(CXX) -o $(OBJDIR)/js$*gen$(HOST_BIN_SUFFIX) $(CFLAGS) $(OPTIMIZER) $(LDFLAGS) $<
-	$(OBJDIR)/js$*gen$(HOST_BIN_SUFFIX) $@
-
-endif
-
-# force creation of autoheaders before compiling any source that may use them
-$(LIB_OBJS) : $(AUTO_HEADERS)
-
-#
-# An installed header file describing configuration options that affect
-# the API.
-#
-
-# Avoid rebuilding unless js-config.h's contents actually change.  The
-# timestamp on js-config.h.stamp corresponds to the last time we
-# checked that js-config.h was up to date.  If the stamp changes but
-# js-config.h does not, then make concludes that targets depending on
-# js-config.h don't need to be rebuilt.  The dummy '@true' rule here
-# keeps make from concluding that js-config.h never changes.
-$(OBJDIR)/js-config.h: $(OBJDIR)/js-config.h.stamp
-	@true
-
-js-config-switch=$(if $(value $($1)),-e 's/\#undef $1/\#define $1/')
-$(OBJDIR)/js-config.h.stamp: js-config.h.in Makefile.ref
-	sed < $< > $(@:.stamp=.tmp)			\
-	    $(call js-config-switch,JS_THREADSAFE)	\
-	    $(call js-config-switch,JS_HAS_CTYPES)		\
-	    $(call js-config-switch,JS_GC_ZEAL)		\
-	    -e :dummy
-	if ! [ -f $(@:.stamp=) ] || ! cmp $(@:.stamp=.tmp) $(@:.stamp=); then \
-	    mv $(@:.stamp=.tmp) $(@:.stamp=);				      \
-	fi
-	touch $@
-
-GARBAGE += $(OBJDIR)/js-config.h $(OBJDIR)/js-config.h.stamp
-
-# Force creation of js-config.h before compiling any source that may use it.
-$(LIB_OBJS) : $(OBJDIR)/js-config.h
-
-#
-# JS shell executable
-#
-
-ifdef USE_MSVC
-$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
-	link.exe -out:"$@" $(EXE_LINK_FLAGS) $^
-else
-$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
-	$(CXX) -o $@ $(CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) $(OTHER_LIBS) \
-	    $(PROG_LIBS)
-endif
-
-$(PROGRAM).pure: $(PROG_OBJS) $(LIBRARY)
-	purify $(PUREFLAGS) \
-	    $(CXX) -o $@ $(PURE_OS_CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) \
-		$(OTHER_LIBS) $(PROG_LIBS)
-
-ifndef PREBUILT_CPUCFG
-$(filter-out jscpucfg.h $(OBJDIR)/jsautocfg.h, $(HFILES)) $(CPPFILES): $(OBJDIR)/jsautocfg.h
-
-$(OBJDIR)/jsautocfg.h: $(OBJDIR)/jscpucfg
-	rm -f $@
-	$(OBJDIR)/jscpucfg > $@
-
-$(OBJDIR)/jscpucfg: $(OBJDIR)/jscpucfg.o
-	$(CXX) $(OS_LDFLAGS) -o $@ $(OBJDIR)/jscpucfg.o
-
-GARBAGE += $(OBJDIR)/jsautocfg.h $(OBJDIR)/jscpucfg \
-	   $(OBJDIR)/jscpucfg.o $(OBJDIR)/jscpucfg.d
-endif
-
-# Automatic make dependencies files
-DEPENDENCIES    = $(CPPFILES:%.cpp=$(OBJDIR)/%.d)
-
-#
-# Hardwire dependencies for some files 
-#
-ifdef USE_MSVC
-OBJ=obj
-else
-OBJ=o
-endif
-
-$(OBJDIR)/jsinvoke.$(OBJ): jsinterp.h jsinterp.cpp
-$(OBJDIR)/jsinvoke.obj : jsinterp.h jsinterp.cpp
-
--include $(DEPENDENCIES)
-
-TARNAME = jsref.tar
-TARFILES = files `cat files`
-
-SUFFIXES: .i
-%.i: %.cpp
-	$(CXX) -C -E $(CFLAGS) $< > $*.i
--- a/js/src/assembler/assembler/MacroAssembler.h
+++ b/js/src/assembler/assembler/MacroAssembler.h
@@ -49,16 +49,20 @@ namespace JSC { typedef MacroAssemblerMI
 #elif WTF_CPU_X86
 #include "MacroAssemblerX86.h"
 namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }
 
 #elif WTF_CPU_X86_64
 #include "MacroAssemblerX86_64.h"
 namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }
 
+#elif WTF_CPU_SPARC
+#include "MacroAssemblerSparc.h"
+namespace JSC { typedef MacroAssemblerSparc MacroAssemblerBase; }
+
 #else
 #error "The MacroAssembler is not supported on this platform."
 #endif
 
 
 namespace JSC {
 
 class MacroAssembler : public MacroAssemblerBase {
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/MacroAssemblerSparc.h
@@ -0,0 +1,1491 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
+ * May 28, 2008.
+ *
+ * The Initial Developer of the Original Code is
+ * Leon Sha <leon.sha@oracle.com>
+ * 
+ * Portions created by the Initial Developer are Copyright (C) 2010-2011
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef MacroAssemblerSparc_h
+#define MacroAssemblerSparc_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_SPARC
+
+#include "SparcAssembler.h"
+#include "AbstractMacroAssembler.h"
+
+namespace JSC {
+
+    class MacroAssemblerSparc : public AbstractMacroAssembler<SparcAssembler> {
+    public:
+        enum Condition {
+            Equal = SparcAssembler::ConditionE,
+            NotEqual = SparcAssembler::ConditionNE,
+            Above = SparcAssembler::ConditionGU,
+            AboveOrEqual = SparcAssembler::ConditionCC,
+            Below = SparcAssembler::ConditionCS,
+            BelowOrEqual = SparcAssembler::ConditionLEU,
+            GreaterThan = SparcAssembler::ConditionG,
+            GreaterThanOrEqual = SparcAssembler::ConditionGE,
+            LessThan = SparcAssembler::ConditionL,
+            LessThanOrEqual = SparcAssembler::ConditionLE,
+            Overflow = SparcAssembler::ConditionVS,
+            Signed = SparcAssembler::ConditionNEG,
+            Zero = SparcAssembler::ConditionE,
+            NonZero = SparcAssembler::ConditionNE
+        };
+
+        enum DoubleCondition {
+            // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+            DoubleEqual = SparcAssembler::DoubleConditionE,
+            DoubleNotEqual = SparcAssembler::DoubleConditionNE,
+            DoubleGreaterThan = SparcAssembler::DoubleConditionG,
+            DoubleGreaterThanOrEqual = SparcAssembler::DoubleConditionGE,
+            DoubleLessThan = SparcAssembler::DoubleConditionL,
+            DoubleLessThanOrEqual = SparcAssembler::DoubleConditionLE,
+            // If either operand is NaN, these conditions always evaluate to true.
+            DoubleEqualOrUnordered = SparcAssembler::DoubleConditionUE,
+            DoubleNotEqualOrUnordered = SparcAssembler::DoubleConditionNE,
+            DoubleGreaterThanOrUnordered = SparcAssembler::DoubleConditionUG,
+            DoubleGreaterThanOrEqualOrUnordered = SparcAssembler::DoubleConditionUGE,
+            DoubleLessThanOrUnordered = SparcAssembler::DoubleConditionUL,
+            DoubleLessThanOrEqualOrUnordered = SparcAssembler::DoubleConditionULE
+        };
+
+        static const RegisterID stackPointerRegister = SparcRegisters::sp;
+
+        static const Scale ScalePtr = TimesFour;
+        static const unsigned int TotalRegisters = 32;
+
+        void add32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.addcc_r(dest, src, dest);
+        }
+
+        void add32(Imm32 imm, Address address)
+        {
+            load32(address, SparcRegisters::g2);
+            add32(imm, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address);
+        }
+
+        void add32(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.addcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.addcc_r(dest, SparcRegisters::g3, dest);
+            }
+        }
+
+        void add32(Address src, RegisterID dest)
+        {
+            load32(src, SparcRegisters::g2);
+            m_assembler.addcc_r(dest, SparcRegisters::g2, dest);
+        }
+
+        void and32(Address src, RegisterID dest)
+        {
+            load32(src, SparcRegisters::g2);
+            m_assembler.andcc_r(dest, SparcRegisters::g2, dest);
+        }
+
+        void add32(Imm32 imm, RegisterID src, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.addcc_imm(src, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.addcc_r(src, SparcRegisters::g3, dest);
+            }
+        }
+
+        void and32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.andcc_r(dest, src, dest);
+        }
+
+        void and32(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.andcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.andcc_r(dest, SparcRegisters::g3, dest);
+            }
+        }
+
+
+        void lshift32(RegisterID shift_amount, RegisterID dest)
+        {
+            m_assembler.sll_r(dest, shift_amount, dest);
+        }
+
+        void lshift32(Imm32 imm, RegisterID dest)
+        {
+            // No need to check if imm.m_value.
+            // The last 5 bit of imm.m_value will be used anyway.
+            m_assembler.sll_imm(dest, imm.m_value, dest);
+        }
+
+        void mul32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.smulcc_r(dest, src, dest);
+        }
+
+        void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.smulcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.smulcc_r(SparcRegisters::g3, dest, dest);
+            }
+        }
+
+        void neg32(RegisterID srcDest)
+        {
+            m_assembler.subcc_r(SparcRegisters::g0, srcDest, srcDest);
+        }
+
+        void not32(RegisterID dest)
+        {
+            m_assembler.xnorcc_r(dest, SparcRegisters::g0, dest);
+        }
+
+        void or32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.orcc_r(dest, src, dest);
+        }
+
+        void or32(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.orcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.or_r(SparcRegisters::g3, dest, dest);
+            }
+        }
+
+
+        void or32(Address address, RegisterID dest)
+        {
+            load32(address, SparcRegisters::g2);
+            or32(SparcRegisters::g2, dest);
+        }
+
+        void rshift32(RegisterID shift_amount, RegisterID dest)
+        {
+            m_assembler.sra_r(dest, shift_amount, dest);
+        }
+
+        void rshift32(Imm32 imm, RegisterID dest)
+        {
+            // No need to check if imm.m_value.
+            // The last 5 bit of imm.m_value will be used anyway.
+            m_assembler.sra_imm(dest, imm.m_value, dest);
+        }
+    
+        void urshift32(RegisterID shift_amount, RegisterID dest)
+        {
+            m_assembler.srl_r(dest, shift_amount, dest);
+        }
+    
+        void urshift32(Imm32 imm, RegisterID dest)
+        {
+            // No need to check if imm.m_value.
+            // The last 5 bit of imm.m_value will be used anyway.
+            m_assembler.srl_imm(dest, imm.m_value, dest);
+        }
+
+        void sub32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.subcc_r(dest, src, dest);
+        }
+
+        void sub32(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.subcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.subcc_r(dest, SparcRegisters::g3, dest);
+            }
+        }
+
+        void sub32(Imm32 imm, Address address)
+        {
+            load32(address, SparcRegisters::g2);
+            sub32(imm, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address);
+        }
+
+        void sub32(Address src, RegisterID dest)
+        {
+            load32(src, SparcRegisters::g2);
+            sub32(SparcRegisters::g2, dest);
+        }
+
+        void xor32(RegisterID src, RegisterID dest)
+        {
+            m_assembler.xorcc_r(src, dest, dest);
+        }
+
+        void xor32(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.xorcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.xorcc_r(dest, SparcRegisters::g3, dest);
+            }
+        }
+
+        void xor32(Address src, RegisterID dest)
+        {
+            load32(src, SparcRegisters::g2);
+            xor32(SparcRegisters::g2, dest);
+        }
+
+        void load8(ImplicitAddress address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldub_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldub_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void load32(ImplicitAddress address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.lduw_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.lduw_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void load32(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.lduw_r(address.base, SparcRegisters::g2, dest);
+        }
+
+        void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset+3), SparcRegisters::g2);
+            m_assembler.ldub_r(address.base, SparcRegisters::g2, dest);
+            m_assembler.subcc_imm(SparcRegisters::g2, 1, SparcRegisters::g2);
+            m_assembler.ldub_r(address.base, SparcRegisters::g2, SparcRegisters::g3);
+            m_assembler.sll_imm(SparcRegisters::g3, 8, SparcRegisters::g3);
+            m_assembler.or_r(SparcRegisters::g3, dest, dest);
+            m_assembler.subcc_imm(SparcRegisters::g2, 1, SparcRegisters::g2);
+            m_assembler.ldub_r(address.base, SparcRegisters::g2, SparcRegisters::g3);
+            m_assembler.sll_imm(SparcRegisters::g3, 16, SparcRegisters::g3);
+            m_assembler.or_r(SparcRegisters::g3, dest, dest);
+            m_assembler.subcc_imm(SparcRegisters::g2, 1, SparcRegisters::g2);
+            m_assembler.ldub_r(address.base, SparcRegisters::g2, SparcRegisters::g3);
+            m_assembler.sll_imm(SparcRegisters::g3, 24, SparcRegisters::g3);
+            m_assembler.or_r(SparcRegisters::g3, dest, dest);
+        }
+
+        DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(0, SparcRegisters::g3);
+            m_assembler.lduw_r(address.base, SparcRegisters::g3, dest);
+            return dataLabel;
+        }
+
+        DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID hi, RegisterID lo)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(0, SparcRegisters::g3);
+            m_assembler.add_imm(SparcRegisters::g3, 4, SparcRegisters::g2);
+            m_assembler.lduw_r(address.base, SparcRegisters::g3, hi);
+            m_assembler.lduw_r(address.base, SparcRegisters::g2, lo);
+            return dataLabel;
+        }
+
+        Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
+        {
+            Label label(this);
+            load32(address, dest);
+            return label;
+        }
+
+        void load16(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.lduh_r(address.base, SparcRegisters::g2, dest);
+        }
+    
+        void load16(ImplicitAddress address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.lduh_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.lduh_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void store8(RegisterID src, ImplicitAddress address)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.stb_imm(src, address.base, address.offset);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.stb_r(src, address.base, SparcRegisters::g3);
+            }
+        }
+
+        void store8(RegisterID src, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.stb_r(src, address.base, SparcRegisters::g2);
+        }
+
+        void store8(Imm32 imm, ImplicitAddress address)
+        {
+            m_assembler.move_nocheck(imm.m_value, SparcRegisters::g2);
+            store8(SparcRegisters::g2, address);
+        }
+
+        void store8(Imm32 imm, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            move(imm, SparcRegisters::g3);
+            m_assembler.stb_r(SparcRegisters::g3, SparcRegisters::g2, address.base);
+        }
+
+        void store16(RegisterID src, ImplicitAddress address)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.sth_imm(src, address.base, address.offset);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.sth_r(src, address.base, SparcRegisters::g3);
+            }
+        }
+
+        void store16(RegisterID src, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.sth_r(src, address.base, SparcRegisters::g2);
+        }
+
+        void store16(Imm32 imm, ImplicitAddress address)
+        {
+            m_assembler.move_nocheck(imm.m_value, SparcRegisters::g2);
+            store16(SparcRegisters::g2, address);
+        }
+
+        void store16(Imm32 imm, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            move(imm, SparcRegisters::g3);
+            m_assembler.sth_r(SparcRegisters::g3, SparcRegisters::g2, address.base);
+        }
+
+        void load8ZeroExtend(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.ldub_r(address.base, SparcRegisters::g2, dest);
+        }
+
+        void load8ZeroExtend(Address address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldub_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldub_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void load8SignExtend(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.ldsb_r(address.base, SparcRegisters::g2, dest);
+        }
+
+        void load8SignExtend(Address address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldsb_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldsb_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void load16SignExtend(BaseIndex address, RegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.ldsh_r(address.base, SparcRegisters::g2, dest);
+        }
+
+        void load16SignExtend(Address address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldsh_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldsh_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
+        {
+            DataLabel32 dataLabel(this);
+            // Since this is for patch, we don't check is offset is imm13.
+            m_assembler.move_nocheck(0, SparcRegisters::g3);
+            m_assembler.stw_r(src, address.base, SparcRegisters::g3);
+            return dataLabel;
+        }
+
+
+        DataLabel32 store64WithAddressOffsetPatch(RegisterID hi, RegisterID lo, Address address)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+            m_assembler.add_r(SparcRegisters::g3, address.base, SparcRegisters::g3);
+            m_assembler.stw_imm(lo, SparcRegisters::g3, 4);
+            m_assembler.stw_imm(hi, SparcRegisters::g3, 0);
+            return dataLabel;
+        }
+
+        DataLabel32 store64WithAddressOffsetPatch(Imm32 hi, RegisterID lo, Address address)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+            m_assembler.add_r(SparcRegisters::g3, address.base, SparcRegisters::g3);
+            m_assembler.stw_imm(lo, SparcRegisters::g3, 4);
+            move(hi, SparcRegisters::g2);
+            m_assembler.stw_imm(SparcRegisters::g2, SparcRegisters::g3, 0);
+
+            return dataLabel;
+        }
+
+        DataLabel32 store64WithAddressOffsetPatch(Imm32 hi, Imm32 lo, Address address)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+            m_assembler.add_r(SparcRegisters::g3, address.base, SparcRegisters::g3);
+            move(lo, SparcRegisters::g2);
+            m_assembler.stw_imm(SparcRegisters::g2, SparcRegisters::g3, 4);
+            move(hi, SparcRegisters::g2);
+            m_assembler.stw_imm(SparcRegisters::g2, SparcRegisters::g3, 0);
+
+            return dataLabel;
+        }
+
+
+        void store32(RegisterID src, ImplicitAddress address)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.stw_imm(src, address.base, address.offset);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.stw_r(src, address.base, SparcRegisters::g3);
+            }
+        }
+
+        void store32(RegisterID src, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.stw_r(src, address.base, SparcRegisters::g2);
+        }
+
+        void store32(Imm32 imm, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            move(imm, SparcRegisters::g3);
+            m_assembler.stw_r(SparcRegisters::g3, SparcRegisters::g2, address.base);
+        }
+
+        void store32(Imm32 imm, ImplicitAddress address)
+        {
+            m_assembler.move_nocheck(imm.m_value, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address);
+        }
+
+        void store32(RegisterID src, void* address)
+        {
+            m_assembler.move_nocheck((int)address, SparcRegisters::g3);
+            m_assembler.stw_r(src, SparcRegisters::g0, SparcRegisters::g3);
+        }
+
+        void store32(Imm32 imm, void* address)
+        {
+            move(imm, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address);
+        }
+
+        void pop(RegisterID dest)
+        {
+            m_assembler.lduw_imm(SparcRegisters::sp, 0x68, dest);
+            m_assembler.addcc_imm(SparcRegisters::sp, 4, SparcRegisters::sp);
+        }
+
+        void push(RegisterID src)
+        {
+            m_assembler.subcc_imm(SparcRegisters::sp, 4, SparcRegisters::sp);
+            m_assembler.stw_imm(src, SparcRegisters::sp, 0x68);
+        }
+
+        void push(Address address)
+        {
+            load32(address, SparcRegisters::g2);
+            push(SparcRegisters::g2);
+        }
+
+        void push(Imm32 imm)
+        {
+            move(imm, SparcRegisters::g2);
+            push(SparcRegisters::g2);
+        }
+
+        void move(Imm32 imm, RegisterID dest)
+        {
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.or_imm(SparcRegisters::g0, imm.m_value, dest);
+            else
+                m_assembler.move_nocheck(imm.m_value, dest);
+        }
+
+        void move(RegisterID src, RegisterID dest)
+        {
+            m_assembler.or_r(src, SparcRegisters::g0, dest);
+        }
+
+        void move(ImmPtr imm, RegisterID dest)
+        {
+            move(Imm32(imm), dest);
+        }
+
+        void swap(RegisterID reg1, RegisterID reg2)
+        {
+            m_assembler.or_r(reg1, SparcRegisters::g0, SparcRegisters::g3);
+            m_assembler.or_r(reg2, SparcRegisters::g0, reg1);
+            m_assembler.or_r(SparcRegisters::g3, SparcRegisters::g0, reg2);
+        }
+
+        void signExtend32ToPtr(RegisterID src, RegisterID dest)
+        {
+            if (src != dest)
+                move(src, dest);
+        }
+
+        void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
+        {
+            if (src != dest)
+                move(src, dest);
+        }
+
+        Jump branch8(Condition cond, Address left, Imm32 right)
+        {
+            load8(left, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Jump branch32_force32(Condition cond, RegisterID left, Imm32 right)
+        {
+            m_assembler.move_nocheck(right.m_value, SparcRegisters::g3);
+            m_assembler.subcc_r(left, SparcRegisters::g3, SparcRegisters::g0);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branch32FixedLength(Condition cond, RegisterID left, Imm32 right)
+        {
+            m_assembler.move_nocheck(right.m_value, SparcRegisters::g2);
+            return branch32(cond, left, SparcRegisters::g2);
+        }
+
+        Jump branch32WithPatch(Condition cond, RegisterID left, Imm32 right, DataLabel32 &dataLabel)
+        {
+            // Always use move_nocheck, since the value is to be patched.
+            dataLabel = DataLabel32(this);
+            m_assembler.move_nocheck(right.m_value, SparcRegisters::g3);
+            m_assembler.subcc_r(left, SparcRegisters::g3, SparcRegisters::g0);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branch32(Condition cond, RegisterID left, RegisterID right)
+        {
+            m_assembler.subcc_r(left, right, SparcRegisters::g0);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branch32(Condition cond, RegisterID left, Imm32 right)
+        {
+            if (m_assembler.isimm13(right.m_value))
+                m_assembler.subcc_imm(left, right.m_value, SparcRegisters::g0);
+            else {
+                m_assembler.move_nocheck(right.m_value, SparcRegisters::g3);
+                m_assembler.subcc_r(left, SparcRegisters::g3, SparcRegisters::g0);
+            }
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branch32(Condition cond, RegisterID left, Address right)
+        {
+            load32(right, SparcRegisters::g2);
+            return branch32(cond, left, SparcRegisters::g2);
+        }
+
+        Jump branch32(Condition cond, Address left, RegisterID right)
+        {
+            load32(left, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Jump branch32(Condition cond, Address left, Imm32 right)
+        {
+            load32(left, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Jump branch32(Condition cond, BaseIndex left, Imm32 right)
+        {
+
+            load32(left, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
+        {
+            load32WithUnalignedHalfWords(left, SparcRegisters::g4);
+            return branch32(cond, SparcRegisters::g4, right);
+        }
+
+        Jump branch16(Condition cond, BaseIndex left, RegisterID right)
+        {
+            (void)(cond);
+            (void)(left);
+            (void)(right);
+            ASSERT_NOT_REACHED();
+            return jump();
+        }
+
+        Jump branch16(Condition cond, BaseIndex left, Imm32 right)
+        {
+            load16(left, SparcRegisters::g3);
+            move(right, SparcRegisters::g2);
+            m_assembler.subcc_r(SparcRegisters::g3, SparcRegisters::g2, SparcRegisters::g0);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
+        {
+            load8(address, SparcRegisters::g2);
+            return branchTest32(cond, SparcRegisters::g2, mask);
+        }
+
+        Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
+        {
+            m_assembler.andcc_r(reg, mask, SparcRegisters::g0);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
+        {
+            if (m_assembler.isimm13(mask.m_value))
+                m_assembler.andcc_imm(reg, mask.m_value, SparcRegisters::g0);
+            else {
+                m_assembler.move_nocheck(mask.m_value, SparcRegisters::g3);
+                m_assembler.andcc_r(reg, SparcRegisters::g3, SparcRegisters::g0);
+            }
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
+        {
+            load32(address, SparcRegisters::g2);
+            return branchTest32(cond, SparcRegisters::g2, mask);
+        }
+
+        Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
+        {
+            // FIXME. branchTest32 only used by PolyIC.
+            // PolyIC is not enabled for sparc now.
+            ASSERT(0);
+            return jump();
+        }
+
+        Jump jump()
+        {
+            return Jump(m_assembler.jmp());
+        }
+
+        void jump(RegisterID target)
+        {
+            m_assembler.jmpl_r(SparcRegisters::g0, target, SparcRegisters::g0);
+            m_assembler.nop();
+        }
+
+        void jump(Address address)
+        {
+            load32(address, SparcRegisters::g2);
+            m_assembler.jmpl_r(SparcRegisters::g2, SparcRegisters::g0, SparcRegisters::g0);
+            m_assembler.nop();
+        }
+
+        void jump(BaseIndex address)
+        {
+            load32(address, SparcRegisters::g2);
+            m_assembler.jmpl_r(SparcRegisters::g2, SparcRegisters::g0, SparcRegisters::g0);
+            m_assembler.nop();
+        }
+
+        Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            m_assembler.addcc_r(src, dest, dest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.addcc_imm(dest, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.addcc_r(dest, SparcRegisters::g3, dest);
+            }
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchAdd32(Condition cond, Address src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            load32(src, SparcRegisters::g2);
+            return branchAdd32(cond, SparcRegisters::g2, dest);
+        }
+
+        void mull32(RegisterID src1, RegisterID src2, RegisterID dest)
+        {
+            m_assembler.smulcc_r(src1, src2, dest);
+        }
+
+        Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            m_assembler.smulcc_r(src, dest, dest);
+            if (cond == Overflow) {
+                m_assembler.rdy(SparcRegisters::g2);
+                m_assembler.sra_imm(dest, 31, SparcRegisters::g3);
+                m_assembler.subcc_r(SparcRegisters::g2, SparcRegisters::g3, SparcRegisters::g2);
+                cond = NotEqual;
+            }
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            if (m_assembler.isimm13(imm.m_value))
+                m_assembler.smulcc_imm(src, imm.m_value, dest);
+            else {
+                m_assembler.move_nocheck(imm.m_value, SparcRegisters::g3);
+                m_assembler.smulcc_r(src, SparcRegisters::g3, dest);
+            }
+            if (cond == Overflow) {
+                m_assembler.rdy(SparcRegisters::g2);
+                m_assembler.sra_imm(dest, 31, SparcRegisters::g3);
+                m_assembler.subcc_r(SparcRegisters::g2, SparcRegisters::g3, SparcRegisters::g2);
+                cond = NotEqual;
+            }
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchMul32(Condition cond, Address src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            load32(src, SparcRegisters::g2);
+            return branchMul32(cond, SparcRegisters::g2, dest);
+        }
+
+        Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            m_assembler.subcc_r(dest, src, dest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            sub32(imm, dest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchSub32(Condition cond, Address src, RegisterID dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            load32(src, SparcRegisters::g2);
+            return branchSub32(cond, SparcRegisters::g2, dest);
+        }
+
+        Jump branchSub32(Condition cond, Imm32 imm, Address dest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            sub32(imm, dest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchNeg32(Condition cond, RegisterID srcDest)
+        {
+            ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+            neg32(srcDest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
+        {
+            ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
+            m_assembler.orcc_r(src, dest, dest);
+            return Jump(m_assembler.branch(SparcCondition(cond)));
+        }
+
+        void breakpoint()
+        {
+            m_assembler.ta_imm(8);
+        }
+
+        Call nearCall()
+        {
+            return Call(m_assembler.call(), Call::LinkableNear);
+        }
+
+        Call call(RegisterID target)
+        {
+            m_assembler.jmpl_r(target, SparcRegisters::g0, SparcRegisters::o7);
+            m_assembler.nop();
+            JmpSrc jmpSrc;
+            return Call(jmpSrc, Call::None);
+        }
+
+        void call(Address address)
+        {
+            if (m_assembler.isimm13(address.offset)) {
+                m_assembler.jmpl_imm(address.base, address.offset, SparcRegisters::o7);
+                m_assembler.nop();
+            } else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.jmpl_r(address.base, SparcRegisters::g3, SparcRegisters::o7);
+                m_assembler.nop();
+            }
+        }
+
+        void ret()
+        {
+            m_assembler.jmpl_imm(SparcRegisters::i7, 8, SparcRegisters::g0);
+            m_assembler.nop();
+        }
+
+        void ret_and_restore()
+        {
+            m_assembler.jmpl_imm(SparcRegisters::i7, 8, SparcRegisters::g0);
+            m_assembler.restore_r(SparcRegisters::g0, SparcRegisters::g0, SparcRegisters::g0);
+        }
+
+        void save(Imm32 size)
+        {
+            if (m_assembler.isimm13(size.m_value)) {
+                m_assembler.save_imm(SparcRegisters::sp, size.m_value, SparcRegisters::sp);
+            } else {
+                m_assembler.move_nocheck(size.m_value, SparcRegisters::g3);
+                m_assembler.save_r(SparcRegisters::sp, SparcRegisters::g3, SparcRegisters::sp);
+            }
+        }
+
+        void set32(Condition cond, Address left, RegisterID right, RegisterID dest)
+        {
+            load32(left, SparcRegisters::g2);
+            set32(cond, SparcRegisters::g2, right, dest);
+        }
+
+        void set32(Condition cond, RegisterID left, Address right, RegisterID dest)
+        {
+            load32(right, SparcRegisters::g2);
+            set32(cond, left, SparcRegisters::g2, dest);
+        }
+
+        void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+        {
+            m_assembler.subcc_r(left, right, SparcRegisters::g0);
+            m_assembler.or_imm(SparcRegisters::g0, 0, dest);
+            m_assembler.movcc_imm(1, dest, SparcCondition(cond));
+        }
+
+        void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+        {
+            if (m_assembler.isimm13(right.m_value))
+                m_assembler.subcc_imm(left, right.m_value, SparcRegisters::g0);
+            else {
+                m_assembler.move_nocheck(right.m_value, SparcRegisters::g3);
+                m_assembler.subcc_r(left, SparcRegisters::g3, SparcRegisters::g0);
+            }
+            m_assembler.or_imm(SparcRegisters::g0, 0, dest);
+            m_assembler.movcc_imm(1, dest, SparcCondition(cond));
+        }
+
+        void set32(Condition cond, Address left, Imm32 right, RegisterID dest)
+        {
+            load32(left, SparcRegisters::g2);
+            set32(cond, SparcRegisters::g2, right, dest);
+        }
+
+        void set8(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
+        {
+            // Sparc does not have byte register.
+            set32(cond, left, right, dest);
+        }
+
+        void set8(Condition cond, Address left, RegisterID right, RegisterID dest)
+        {
+            // Sparc doesn't have byte registers
+            load32(left, SparcRegisters::g2);
+            set32(cond, SparcRegisters::g2, right, dest);
+        }
+
+        void set8(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
+        {
+            // Sparc does not have byte register.
+            set32(cond, left, right, dest);
+        }
+
+        void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
+        {
+            load32(address, SparcRegisters::g2);
+            if (m_assembler.isimm13(mask.m_value))
+                m_assembler.andcc_imm(SparcRegisters::g2, mask.m_value, SparcRegisters::g0);
+            else {
+                m_assembler.move_nocheck(mask.m_value, SparcRegisters::g3);
+                m_assembler.andcc_r(SparcRegisters::g3, SparcRegisters::g2, SparcRegisters::g0);
+            }
+            m_assembler.or_imm(SparcRegisters::g0, 0, dest);
+            m_assembler.movcc_imm(1, dest, SparcCondition(cond));
+        }
+
+        void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
+        {
+            // Sparc does not have byte register.
+            setTest32(cond, address, mask, dest);
+        }
+
+        void lea(Address address, RegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.add_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.add_r(address.base, SparcRegisters::g3, dest);
+            }
+        }
+
+        void lea(BaseIndex address, RegisterID dest)
+        {
+            // lea only used by PolyIC.
+            // PolyIC is not enabled for sparc now.
+            ASSERT(0);
+        }
+
+        void add32(Imm32 imm, AbsoluteAddress address)
+        {
+            load32(address.m_ptr, SparcRegisters::g2);
+            add32(imm, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address.m_ptr);
+        }
+
+        void sub32(Imm32 imm, AbsoluteAddress address)
+        {
+            load32(address.m_ptr, SparcRegisters::g2);
+            sub32(imm, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address.m_ptr);
+        }
+
+        void load32(void* address, RegisterID dest)
+        {
+            m_assembler.move_nocheck((int)address, SparcRegisters::g3);
+            m_assembler.lduw_r(SparcRegisters::g3, SparcRegisters::g0, dest);
+        }
+
+        Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
+        {
+            load32(left.m_ptr, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
+        {
+            load32(left.m_ptr, SparcRegisters::g2);
+            return branch32(cond, SparcRegisters::g2, right);
+        }
+
+        Call call()
+        {
+            m_assembler.rdpc(SparcRegisters::g2);
+            m_assembler.add_imm(SparcRegisters::g2, 32, SparcRegisters::g2);
+            m_assembler.stw_imm(SparcRegisters::g2, SparcRegisters::fp, -8);
+            Call cl = Call(m_assembler.call(), Call::Linkable);
+            m_assembler.lduw_imm(SparcRegisters::fp, -8, SparcRegisters::g2);
+            m_assembler.jmpl_imm(SparcRegisters::g2, 0, SparcRegisters::g0);
+            m_assembler.nop();
+            return cl;
+        }
+
+        Call tailRecursiveCall()
+        {
+            return Call::fromTailJump(jump());
+        }
+
+        Call makeTailRecursiveCall(Jump oldJump)
+        {
+            return Call::fromTailJump(oldJump);
+        }
+
+        DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
+        {
+            DataLabelPtr dataLabel(this);
+            Imm32 imm = Imm32(initialValue);
+            m_assembler.move_nocheck(imm.m_value, dest);
+            return dataLabel;
+        }
+
+        DataLabel32 moveWithPatch(Imm32 initialValue, RegisterID dest)
+        {
+            DataLabel32 dataLabel(this);
+            m_assembler.move_nocheck(initialValue.m_value, dest);
+            return dataLabel;
+        }
+
+        Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+        {
+            dataLabel = moveWithPatch(initialRightValue, SparcRegisters::g2);
+            Jump jump = branch32(cond, left, SparcRegisters::g2);
+            return jump;
+        }
+
+        Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
+        {
+            load32(left, SparcRegisters::g2);
+            dataLabel = moveWithPatch(initialRightValue, SparcRegisters::g3);
+            Jump jump = branch32(cond, SparcRegisters::g3, SparcRegisters::g2);
+            return jump;
+        }
+
+        DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
+        {
+            DataLabelPtr dataLabel = moveWithPatch(initialValue, SparcRegisters::g2);
+            store32(SparcRegisters::g2, address);
+            return dataLabel;
+        }
+
+        DataLabelPtr storePtrWithPatch(ImplicitAddress address)
+        {
+            return storePtrWithPatch(ImmPtr(0), address);
+        }
+
+        // Floating point operators
+        bool supportsFloatingPoint() const
+        {
+            return true;
+        }
+
+        bool supportsFloatingPointTruncate() const
+        {
+            return true;
+        }
+
+        bool supportsFloatingPointSqrt() const
+        {
+            return true;
+        }
+
+        void moveDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fmovd_r(src, dest);
+        }
+
+        void loadFloat(BaseIndex address, FPRegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.ldf_r(address.base, SparcRegisters::g2, dest);
+            m_assembler.fstod_r(dest, dest);
+        }
+
+        void loadFloat(ImplicitAddress address, FPRegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldf_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldf_r(address.base, SparcRegisters::g3, dest);
+            }
+            m_assembler.fstod_r(dest, dest);
+        }
+
+        void loadFloat(const void* address, FPRegisterID dest)
+        {
+            m_assembler.move_nocheck((int)address, SparcRegisters::g3);
+            m_assembler.ldf_r(SparcRegisters::g3, SparcRegisters::g0, dest);
+            m_assembler.fstod_r(dest, dest);
+        }
+
+        void loadDouble(BaseIndex address, FPRegisterID dest)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.ldf_r(address.base, SparcRegisters::g2, dest);
+            m_assembler.add_imm(SparcRegisters::g2, 4, SparcRegisters::g2);
+            m_assembler.ldf_r(address.base, SparcRegisters::g2, dest + 1);
+        }
+
+        void loadDouble(ImplicitAddress address, FPRegisterID dest)
+        {
+            m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+            m_assembler.ldf_r(address.base, SparcRegisters::g3, dest);
+            m_assembler.add_imm(SparcRegisters::g3, 4, SparcRegisters::g3);
+            m_assembler.ldf_r(address.base, SparcRegisters::g3, dest + 1);
+        }
+
+        DataLabelPtr loadDouble(const void* address, FPRegisterID dest)
+        {
+            DataLabelPtr dataLabel(this);
+            m_assembler.move_nocheck((int)address, SparcRegisters::g3);
+            m_assembler.ldf_imm(SparcRegisters::g3, 0, dest);
+            m_assembler.ldf_imm(SparcRegisters::g3, 4, dest + 1);
+            return dataLabel;
+        }
+
+        void storeFloat(FPRegisterID src, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.stf_r(src, address.base, SparcRegisters::g2);
+        }
+
+        void storeFloat(FPRegisterID src, ImplicitAddress address)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.stf_imm(src, address.base, address.offset);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.stf_r(src, address.base, SparcRegisters::g3);
+            }
+        }
+
+        void storeFloat(ImmDouble imm, Address address)
+        {
+            union {
+                float f;
+                uint32 u32;
+            } u;
+            u.f = imm.u.d;
+            store32(Imm32(u.u32), address);
+        }
+
+        void storeFloat(ImmDouble imm, BaseIndex address)
+        {
+            union {
+                float f;
+                uint32 u32;
+            } u;
+            u.f = imm.u.d;
+            store32(Imm32(u.u32), address);
+        }
+
+        void storeDouble(FPRegisterID src, BaseIndex address)
+        {
+            m_assembler.sll_imm(address.index, address.scale, SparcRegisters::g2);
+            add32(Imm32(address.offset), SparcRegisters::g2);
+            m_assembler.stf_r(src, address.base, SparcRegisters::g2);
+            m_assembler.add_imm(SparcRegisters::g2, 4, SparcRegisters::g2);
+            m_assembler.stf_r(src + 1, address.base, SparcRegisters::g2);
+        }
+
+        void storeDouble(FPRegisterID src, ImplicitAddress address)
+        {
+            if (m_assembler.isimm13(address.offset + 4)) {
+                m_assembler.stf_imm(src, address.base, address.offset);
+                m_assembler.stf_imm(src + 1, address.base, address.offset + 4);
+            } else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.stf_r(src, address.base, SparcRegisters::g3);
+                m_assembler.add_imm(SparcRegisters::g3, 4, SparcRegisters::g3);
+                m_assembler.stf_r(src + 1, address.base, SparcRegisters::g3);
+            }
+        }
+
+        void storeDouble(ImmDouble imm, Address address)
+        {
+            store32(Imm32(imm.u.s.msb), address);
+            store32(Imm32(imm.u.s.lsb), Address(address.base, address.offset + 4));
+        }
+
+        void storeDouble(ImmDouble imm, BaseIndex address)
+        {
+            store32(Imm32(imm.u.s.msb), address);
+            store32(Imm32(imm.u.s.lsb),
+                    BaseIndex(address.base, address.index, address.scale, address.offset + 4));
+        }
+
+        void addDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.faddd_r(src, dest, dest);
+        }
+
+        void addDouble(Address src, FPRegisterID dest)
+        {
+            loadDouble(src, SparcRegisters::f30);
+            m_assembler.faddd_r(SparcRegisters::f30, dest, dest);
+        }
+
+        void divDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fdivd_r(dest, src, dest);
+        }
+
+        void divDouble(Address src, FPRegisterID dest)
+        {
+            loadDouble(src, SparcRegisters::f30);
+            m_assembler.fdivd_r(dest, SparcRegisters::f30, dest);
+        }
+
+        void subDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fsubd_r(dest, src, dest);
+        }
+
+        void subDouble(Address src, FPRegisterID dest)
+        {
+            loadDouble(src, SparcRegisters::f30);
+            m_assembler.fsubd_r(dest, SparcRegisters::f30, dest);
+        }
+
+        void mulDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fmuld_r(src, dest, dest);
+        }
+
+        void mulDouble(Address src, FPRegisterID dest)
+        {
+            loadDouble(src, SparcRegisters::f30);
+            m_assembler.fmuld_r(SparcRegisters::f30, dest, dest);
+        }
+
+        void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fsqrtd_r(src, dest);
+        }
+
+        void negDouble(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fnegd_r(src, dest);
+        }
+
+        void convertUInt32ToDouble(RegisterID src, FPRegisterID dest)
+        {
+            m_assembler.move_nocheck(0x43300000, SparcRegisters::g1);
+            m_assembler.stw_imm(SparcRegisters::g1, SparcRegisters::sp, 0x60);
+            m_assembler.stw_imm(src, SparcRegisters::sp, 0x64);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x60, SparcRegisters::f30);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x64, SparcRegisters::f31);
+            m_assembler.stw_imm(SparcRegisters::g0, SparcRegisters::sp, 0x64);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x60, dest);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x64, dest + 1);
+            m_assembler.fsubd_r(SparcRegisters::f30, dest, dest);
+            m_assembler.fabss_r(dest, dest);
+        }
+
+        void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
+        {
+            m_assembler.stw_imm(src, SparcRegisters::sp, 0x60);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x60, dest);
+            m_assembler.fitod_r(dest, dest);
+        }
+
+        void convertInt32ToDouble(Address address, FPRegisterID dest)
+        {
+            if (m_assembler.isimm13(address.offset))
+                m_assembler.ldf_imm(address.base, address.offset, dest);
+            else {
+                m_assembler.move_nocheck(address.offset, SparcRegisters::g3);
+                m_assembler.ldf_r(address.base, SparcRegisters::g3, dest);
+            }
+            m_assembler.fitod_r(dest, dest);
+        }
+
+        void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
+        {
+            m_assembler.move_nocheck((int)src.m_ptr, SparcRegisters::g3);
+            m_assembler.ldf_r(SparcRegisters::g3, SparcRegisters::g0, dest);
+            m_assembler.fitod_r(dest, dest);
+        }
+
+        void fastLoadDouble(RegisterID lo, RegisterID hi, FPRegisterID fpReg)
+        {
+            m_assembler.stw_imm(lo, SparcRegisters::sp, 0x64);
+            m_assembler.stw_imm(hi, SparcRegisters::sp, 0x60);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x60, fpReg);
+            m_assembler.ldf_imm(SparcRegisters::sp, 0x64, fpReg + 1);
+        }
+
+        void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
+        {
+            m_assembler.fdtos_r(src, dest);
+        }
+
+        void breakDoubleTo32(FPRegisterID srcDest, RegisterID typeReg, RegisterID dataReg) {
+            // We don't assume stack is aligned to 8.
+            // Always using stf, ldf instead of stdf, lddf.
+            m_assembler.stf_imm(srcDest, SparcRegisters::sp, 0x60);
+            m_assembler.stf_imm(srcDest + 1, SparcRegisters::sp, 0x64);
+            m_assembler.lduw_imm(SparcRegisters::sp, 0x60, typeReg);
+            m_assembler.lduw_imm(SparcRegisters::sp, 0x64, dataReg);
+        }
+
+        Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+        {
+            m_assembler.fcmpd_r(left, right);
+            return Jump(m_assembler.fbranch(SparcDoubleCondition(cond)));
+        }
+
+        // Truncates 'src' to an integer, and places the resulting 'dest'.
+        // If the result is not representable as a 32 bit value, branch.
+        // May also branch for some values that are representable in 32 bits
+        // (specifically, in this case, INT_MIN).
+        Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
+        {
+            m_assembler.fdtoi_r(src, SparcRegisters::f30);
+            m_assembler.stf_imm(SparcRegisters::f30, SparcRegisters::sp, 0x60);
+            m_assembler.lduw_imm(SparcRegisters::sp, 0x60, dest);
+
+            m_assembler.or_r(SparcRegisters::g0, SparcRegisters::g0, SparcRegisters::g2);
+            m_assembler.move_nocheck(0x80000000, SparcRegisters::g3);
+            m_assembler.subcc_r(SparcRegisters::g3, dest, SparcRegisters::g0);
+            m_assembler.movcc_imm(1, SparcRegisters::g2, SparcCondition(Equal));
+            m_assembler.move_nocheck(0x7fffffff, SparcRegisters::g3);
+            m_assembler.subcc_r(SparcRegisters::g3, dest, SparcRegisters::g0);
+            m_assembler.movcc_imm(1, SparcRegisters::g2, SparcCondition(Equal));
+
+            return branch32(Equal, SparcRegisters::g2, Imm32(1));
+        }
+
+        // Convert 'src' to an integer, and places the resulting 'dest'.
+        // If the result is not representable as a 32 bit value, branch.
+        // May also branch for some values that are representable in 32 bits
+        // (specifically, in this case, 0).
+        void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+        {
+            m_assembler.fdtoi_r(src, SparcRegisters::f30);
+            m_assembler.stf_imm(SparcRegisters::f30, SparcRegisters::sp, 0x60);
+            m_assembler.lduw_imm(SparcRegisters::sp, 0x60, dest);
+
+            // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
+            m_assembler.fitod_r(SparcRegisters::f30, SparcRegisters::f30);
+            failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, SparcRegisters::f30));
+
+            // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
+            failureCases.append(branchTest32(Zero, dest));
+        }
+
+        void zeroDouble(FPRegisterID srcDest)
+        {
+            m_assembler.fsubd_r(srcDest, srcDest, srcDest);
+        }
+
+    protected:
+        SparcAssembler::Condition SparcCondition(Condition cond)
+        {
+            return static_cast<SparcAssembler::Condition>(cond);
+        }
+
+        SparcAssembler::DoubleCondition SparcDoubleCondition(DoubleCondition cond)
+        {
+            return static_cast<SparcAssembler::DoubleCondition>(cond);
+        }
+
+    private:
+        friend class LinkBuffer;
+        friend class RepatchBuffer;
+
+        static void linkCall(void* code, Call call, FunctionPtr function)
+        {
+            SparcAssembler::linkCall(code, call.m_jmp, function.value());
+        }
+
+        static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+        {
+            SparcAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+        }
+
+        static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+        {
+            SparcAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
+        }
+
+    };
+
+}
+
+
+#endif // ENABLE(ASSEMBLER) && CPU(SPARC)
+
+#endif // MacroAssemblerSparc_h
new file mode 100644
--- /dev/null
+++ b/js/src/assembler/assembler/SparcAssembler.h
@@ -0,0 +1,1256 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=99:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
+ * May 28, 2008.
+ *
+ * The Initial Developer of the Original Code is
+ * Leon Sha <leon.sha@oracle.com>
+ * 
+ * Portions created by the Initial Developer are Copyright (C) 2010-2011
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either the GNU General Public License Version 2 or later (the "GPL"), or
+ * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef SparcAssembler_h
+#define SparcAssembler_h
+
+#include <wtf/Platform.h>
+
+// Some debug code uses s(n)printf for instruction logging.
+#include <stdio.h>
+
+#if ENABLE_ASSEMBLER && WTF_CPU_SPARC
+
+#include "AssemblerBufferWithConstantPool.h"
+#include <wtf/Assertions.h>
+
+#include "methodjit/Logging.h"
+#define IPFX  "        %s"
+#define ISPFX "        "
+#ifdef JS_METHODJIT_SPEW
+# define MAYBE_PAD (isOOLPath ? ">  " : "")
+# define PRETTY_PRINT_OFFSET(os) (((os)<0)?"-":""), (((os)<0)?-(os):(os))
+# define FIXME_INSN_PRINTING                                \
+    do {                                                    \
+        js::JaegerSpew(js::JSpew_Insns,                     \
+                       ISPFX "FIXME insn printing %s:%d\n", \
+                       __FILE__, __LINE__);                 \
+    } while (0)
+#else
+# define MAYBE_PAD ""
+# define FIXME_INSN_PRINTING ((void) 0)
+# define PRETTY_PRINT_OFFSET(os) "", 0
+#endif
+
+namespace JSC {
+
+    typedef uint32_t SparcWord;
+
+    namespace SparcRegisters {
+        typedef enum {
+            g0 = 0, // g0 is always 0
+            g1 = 1, // g1 is a scratch register for v8
+            g2 = 2,
+            g3 = 3,
+            g4 = 4,
+            g5 = 5, // Reserved for system
+            g6 = 6, // Reserved for system
+            g7 = 7, // Reserved for system
+
+            o0 = 8,
+            o1 = 9,
+            o2 = 10,
+            o3 = 11,
+            o4 = 12,
+            o5 = 13,
+            o6 = 14, // SP
+            o7 = 15,
+
+            l0 = 16,
+            l1 = 17,
+            l2 = 18,
+            l3 = 19,
+            l4 = 20,
+            l5 = 21,
+            l6 = 22,
+            l7 = 23,
+
+            i0 = 24,
+            i1 = 25,
+            i2 = 26,
+            i3 = 27,
+            i4 = 28,
+            i5 = 29,
+            i6 = 30, // FP
+            i7 = 31,
+
+            sp = o6,
+            fp = i6
+        } RegisterID;
+
+        typedef enum {
+            f0 = 0,
+            f1 = 1,
+            f2 = 2,
+            f3 = 3,
+            f4 = 4,
+            f5 = 5,
+            f6 = 6,
+            f7 = 7,
+            f8 = 8,
+            f9 = 9,
+            f10 = 10,
+            f11 = 11,
+            f12 = 12,
+            f13 = 13,
+            f14 = 14,
+            f15 = 15,
+            f16 = 16,
+            f17 = 17,
+            f18 = 18,
+            f19 = 19,
+            f20 = 20,
+            f21 = 21,
+            f22 = 22,
+            f23 = 23,
+            f24 = 24,
+            f25 = 25,
+            f26 = 26,
+            f27 = 27,
+            f28 = 28,
+            f29 = 29,
+            f30 = 30,
+            f31 = 31
+        } FPRegisterID;
+
+    } // namespace SparcRegisters
+
+    class SparcAssembler {
+    public:
+        typedef SparcRegisters::RegisterID RegisterID;
+        typedef SparcRegisters::FPRegisterID FPRegisterID;
+        AssemblerBuffer m_buffer;
+        bool oom() const { return m_buffer.oom(); }
+
+#ifdef JS_METHODJIT_SPEW
+        bool isOOLPath;
+        SparcAssembler() : isOOLPath(false) { }
+#else
+        SparcAssembler() { }
+#endif
+
+        // Sparc conditional constants
+        typedef enum {
+            ConditionE   = 0x1, // Zero
+            ConditionLE  = 0x2,
+            ConditionL   = 0x3,
+            ConditionLEU = 0x4,
+            ConditionCS  = 0x5,
+            ConditionNEG = 0x6,
+            ConditionVS  = 0x7,
+            ConditionA   = 0x8, // branch_always
+            ConditionNE  = 0x9, // Non-zero
+            ConditionG   = 0xa,
+            ConditionGE  = 0xb,
+            ConditionGU  = 0xc,
+            ConditionCC  = 0xd,
+            ConditionVC  = 0xf
+        } Condition;
+
+
+        typedef enum {
+            DoubleConditionNE  = 0x1,
+            DoubleConditionUL  = 0x3,
+            DoubleConditionL   = 0x4,
+            DoubleConditionUG  = 0x5,
+            DoubleConditionG   = 0x6,
+            DoubleConditionE   = 0x9,
+            DoubleConditionUE  = 0xa,
+            DoubleConditionGE  = 0xb,
+            DoubleConditionUGE = 0xc,
+            DoubleConditionLE  = 0xd,
+            DoubleConditionULE = 0xe
+        } DoubleCondition;
+
+        typedef enum {
+            BranchOnCondition,
+            BranchOnDoubleCondition
+        } BranchType;
+
+        class JmpSrc {
+            friend class SparcAssembler;
+        public:
+            JmpSrc()
+                : m_offset(-1)
+            {
+            }
+
+        private:
+            JmpSrc(int offset)
+                : m_offset(offset)
+            {
+            }
+
+            int m_offset;
+        };
+
+        class JmpDst {
+            friend class SparcAssembler;
+        public:
+            JmpDst()
+                : m_offset(-1)
+                , m_used(false)
+            {
+            }
+
+            bool isUsed() const { return m_used; }
+            void used() { m_used = true; }
+            bool isValid() const { return m_offset != -1; }
+        private:
+            JmpDst(int offset)
+                : m_offset(offset)
+                , m_used(false)
+            {
+                ASSERT(m_offset == offset);
+            }
+
+            int m_used : 1;
+            signed int m_offset : 31;
+        };
+
+        // Instruction formating
+
+        void format_2_1(int rd, int op2, int imm22)
+        {
+            m_buffer.putInt(rd << 25 | op2 << 22 | (imm22 & 0x3FFFFF));
+        }
+
+        void format_2_2(int a, int cond, int op2, int disp22)
+        {
+            format_2_1((a & 0x1) << 4 | (cond & 0xF), op2, disp22);
+        }
+
+        void format_2_3(int a, int cond, int op2, int cc1, int cc0, int p, int disp19)
+        {
+            format_2_2(a, cond, op2, (cc1 & 0x1) << 21 | (cc0 & 0x1) << 20 | (p & 0x1) << 19 | (disp19 & 0x7FFFF));
+        }
+
+        void format_2_4(int a, int rcond, int op2, int d16hi, int p, int rs1, int d16lo)
+        {
+            format_2_2(a, (rcond & 0x7), op2, (d16hi & 0x3) << 20 | (p & 0x1) << 19 | rs1 << 14 | (d16lo & 0x3FFF));
+        }
+
+        void format_3(int op1, int rd, int op3, int bits19)
+        {
+            m_buffer.putInt(op1 << 30 | rd << 25 | op3 << 19 | (bits19 & 0x7FFFF));
+        }
+
+        void format_3_1(int op1, int rd, int op3, int rs1, int bit8, int rs2)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | (bit8 & 0xFF) << 5 | rs2);
+        }
+
+        void format_3_1_imm(int op1, int rd, int op3, int rs1, int simm13)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | 1 << 13 | (simm13 & 0x1FFF));
+        }
+
+        void format_3_2(int op1, int rd, int op3, int rs1, int rcond, int rs2)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | (rcond & 0x3) << 10 | rs2);
+        }
+
+        void format_3_2_imm(int op1, int rd, int op3, int rs1, int rcond, int simm10)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | 1 << 13 | (rcond & 0x3) << 10 | (simm10 & 0x1FFF));
+        }
+
+        void format_3_3(int op1, int rd, int op3, int rs1, int cmask, int mmask)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | 1 << 13 | (cmask & 0x7) << 5 | (mmask & 0xF));
+        }
+        void format_3_4(int op1, int rd, int op3, int bits19)
+        {
+            format_3(op1, rd, op3, bits19);
+        }
+
+        void format_3_5(int op1, int rd, int op3, int rs1, int x, int rs2)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | (x & 0x1) << 12 | rs2);
+        }
+
+        void format_3_6(int op1, int rd, int op3, int rs1, int shcnt32)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | 1 << 13 | (shcnt32 & 0x1F));
+        }
+
+        void format_3_7(int op1, int rd, int op3, int rs1, int shcnt64)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | 1 << 13 | 1 << 12 | (shcnt64 & 0x3F));
+        }
+
+        void format_3_8(int op1, int rd, int op3, int rs1, int bits9, int rs2)
+        {
+            format_3(op1, rd, op3, rs1 << 14 | (bits9 & 0x1FF) << 5 | rs2);
+        }
+
+        void format_3_9(int op1, int cc1, int cc0, int op3, int rs1, int bits9, int rs2)
+        {
+            format_3(op1, (cc1 & 0x1) << 1 | (cc0 & 0x1), op3, rs1 << 14 | (bits9 & 0x1FF) << 5 | rs2);
+        }
+
+        void format_4_1(int rd, int op3, int rs1, int cc1, int cc0, int rs2)
+        {
+            format_3(2, rd, op3, rs1 << 14 | (cc1 & 0x1) << 12 | (cc0 & 0x1) << 11 | rs2);
+        }
+
+        void format_4_1_imm(int rd, int op3, int rs1, int cc1, int cc0, int simm11)
+        {
+            format_3(2, rd, op3, rs1 << 14 | (cc1 & 0x1) << 12 | 1 << 13 |(cc0 & 0x1) << 11 | (simm11 & 0x7FF));
+        }
+
+        void format_4_2(int rd, int op3, int cc2, int cond, int cc1, int cc0, int rs2)
+        {
+            format_3(2, rd, op3, (cc2 & 0x1) << 18 | (cond & 0xF) << 14 | (cc1 & 0x1) << 12 | (cc0 & 0x1) << 11 | rs2);
+        }
+
+        void format_4_2_imm(int rd, int op3, int cc2, int cond, int cc1, int cc0, int simm11)
+        {
+            format_3(2, rd, op3, (cc2 & 0x1) << 18 | (cond & 0xF) << 14 | 1 << 13 | (cc1 & 0x1) << 12 | (cc0 & 0x1) << 11 | (simm11 & 0x7FF));
+        }
+
+        void format_4_3(int rd, int op3, int rs1, int cc1, int cc0, int swap_trap)
+        {
+            format_3(2, rd, op3, rs1 << 14 | 1 << 13 | (cc1 & 0x1) << 12 | (cc0 & 0x1) << 11 | (swap_trap & 0x7F));
+        }
+
+        void format_4_4(int rd, int op3, int rs1, int rcond, int opf_low, int rs2)
+        {
+            format_3(2, rd, op3, rs1 << 14 | (rcond & 0x7) << 10 | (opf_low & 0x1F) << 5 | rs2);
+        }
+
+        void format_4_5(int rd, int op3, int cond, int opf_cc, int opf_low, int rs2)
+        {
+            format_3(2, rd, op3, (cond & 0xF) << 14 | (opf_cc & 0x7) << 11 | (opf_low & 0x3F) << 5 | rs2);
+        }
+
+        void addcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "addcc       %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x10, rs1, 0, rs2);
+        }
+
+        void addcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "addcc       %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x10, rs1, simm13);
+        }
+
+        void add_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "add         %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0, rs1, 0, rs2);
+        }
+
+        void add_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "add         %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0, rs1, simm13);
+        }
+
+        void andcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "andcc       %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x11, rs1, 0, rs2);
+        }
+
+        void andcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "andcc       %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x11, rs1, simm13);
+        }
+
+        void or_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "or          %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x2, rs1, 0, rs2);
+        }
+
+        void or_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "or          %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x2, rs1, simm13);
+        }
+
+        // sethi %hi(imm22) rd
+        void sethi(int imm22, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sethi       %%hi(0x%x), %s\n", MAYBE_PAD,
+                           imm22, nameGpReg(rd));
+            format_2_1(rd, 0x4, (imm22 >> 10));
+        }
+
+        void sll_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sll         %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_5(2, rd, 0x25, rs1, 0, rs2);
+        }
+
+        void sll_imm(int rs1, int shcnt32, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sll         %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), shcnt32, nameGpReg(rd));
+            format_3_6(2, rd, 0x25, rs1, shcnt32);
+        }
+
+        void sra_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sra         %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_5(2, rd, 0x27, rs1, 0, rs2);
+        }
+
+        void sra_imm(int rs1, int shcnt32, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sra         %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), shcnt32, nameGpReg(rd));
+            format_3_6(2, rd, 0x27, rs1, shcnt32);
+        }
+
+        void srl_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "srl         %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_5(2, rd, 0x26, rs1, 0, rs2);
+        }
+
+        void srl_imm(int rs1, int shcnt32, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "srl         %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), shcnt32, nameGpReg(rd));
+            format_3_6(2, rd, 0x26, rs1, shcnt32);
+        }
+
+        void subcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "subcc       %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x14, rs1, 0, rs2);
+        }
+
+        void subcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "subcc       %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x14, rs1, simm13);
+        }
+
+        void orcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "orcc        %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x12, rs1, 0, rs2);
+        }
+
+        void orcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "orcc        %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x12, rs1, simm13);
+        }
+
+        void xorcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "xorcc       %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x13, rs1, 0, rs2);
+        }
+
+        void xorcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "xorcc       %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x13, rs1, simm13);
+        }
+
+        void xnorcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "xnorcc      %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x17, rs1, 0, rs2);
+        }
+
+        void xnorcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "xnorcc      %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x17, rs1, simm13);
+        }
+
+        void smulcc_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "smulcc      %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x1b, rs1, 0, rs2);
+        }
+
+        void smulcc_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "smulcc      %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x1b, rs1, simm13);
+        }
+
+        void ldsb_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldsb        [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0x9, rs1, 0, rs2);
+        }
+
+        void ldsb_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldsb        [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(3, rd, 0x9, rs1, simm13);
+        }
+
+        void ldub_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldub        [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0x1, rs1, 0, rs2);
+        }
+
+        void ldub_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldub        [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(3, rd, 0x1, rs1, simm13);
+        }
+
+        void lduw_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "lduw        [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0x0, rs1, 0, rs2);
+        }
+
+        void lduwa_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "lduwa       [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0x10, rs1, 0x82, rs2);
+        }
+
+        void lduw_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "lduw        [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(3, rd, 0x0, rs1, simm13);
+        }
+
+        void ldsh_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldsh        [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0xa, rs1, 0, rs2);
+        }
+
+        void ldsh_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ldsh        [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(3, rd, 0xa, rs1, simm13);
+        }
+
+        void lduh_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "lduh        [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(3, rd, 0x2, rs1, 0, rs2);
+        }
+
+        void lduh_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "lduh        [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(3, rd, 0x2, rs1, simm13);
+        }
+
+        void stb_r(int rd, int rs2, int rs1)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "stb         %s, [%s + %s]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), nameGpReg(rs2));
+            format_3_1(3, rd, 0x5, rs1, 0, rs2);
+        }
+
+        void stb_imm(int rd, int rs1, int simm13)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "stb         %s, [%s + %d]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), simm13);
+            format_3_1_imm(3, rd, 0x5, rs1, simm13);
+        }
+
+        void sth_r(int rd, int rs2, int rs1)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sth         %s, [%s + %s]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), nameGpReg(rs2));
+            format_3_1(3, rd, 0x6, rs1, 0, rs2);
+        }
+
+        void sth_imm(int rd, int rs1, int simm13)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "sth         %s, [%s + %d]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), simm13);
+            format_3_1_imm(3, rd, 0x6, rs1, simm13);
+        }
+
+        void stw_r(int rd, int rs2, int rs1)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "stw         %s, [%s + %s]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), nameGpReg(rs2));
+            format_3_1(3, rd, 0x4, rs1, 0, rs2);
+        }
+
+        void stw_imm(int rd, int rs1, int simm13)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "stw         %s, [%s + %d]\n", MAYBE_PAD,
+                           nameGpReg(rd), nameGpReg(rs1), simm13);
+            format_3_1_imm(3, rd, 0x4, rs1, simm13);
+        }
+
+        void ldf_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ld          [%s + %s], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameFpReg(rd));
+            format_3_1(3, rd, 0x20, rs1, 0, rs2);
+        }
+
+        void ldf_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ld          [%s + %d], %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameFpReg(rd));
+            format_3_1_imm(3, rd, 0x20, rs1, simm13);
+        }
+
+        void stf_r(int rd, int rs2, int rs1)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "st          %s, [%s + %s]\n", MAYBE_PAD,
+                           nameFpReg(rd), nameGpReg(rs1), nameGpReg(rs2));
+            format_3_1(3, rd, 0x24, rs1, 0, rs2);
+        }
+
+        void stf_imm(int rd, int rs1, int simm13)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "st          %s, [%s + %d]\n", MAYBE_PAD,
+                           nameFpReg(rd), nameGpReg(rs1), simm13);
+            format_3_1_imm(3, rd, 0x24, rs1, simm13);
+        }
+
+        void fmovd_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fmovd       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0x2, rs2);
+        }
+
+        void fcmpd_r(int rs1, int rs2)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fcmpd       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs1), nameFpReg(rs2));
+            format_3_9(2, 0, 0, 0x35, rs1, 0x52, rs2);
+        }
+
+        void nop()
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "nop\n", MAYBE_PAD);
+            format_2_1(0, 0x4, 0);
+        }
+
+        void branch_con(Condition cond, int target)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "b%s         0x%x\n", MAYBE_PAD,
+                           nameICC(cond), target);
+            format_2_2(0, cond, 0x2, target);
+        }
+
+        void fbranch_con(DoubleCondition cond, int target)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fb%s        0x%x\n", MAYBE_PAD,
+                           nameFCC(cond), target);
+            format_2_2(0, cond, 0x6, target);
+        }
+
+        void rdy(int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "rdy         %s\n", MAYBE_PAD,
+                           nameFpReg(rd));
+            format_3_1(2, rd, 0x28, 0, 0, 0);
+        }
+
+        void rdpc(int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "rdpc        %s\n", MAYBE_PAD,
+                           nameGpReg(rd));
+            format_3_1(2, rd, 0x28, 5, 0, 0);
+        }
+        void jmpl_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "jmpl        %s + %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x38, rs1, 0, rs2);
+        }
+
+        void jmpl_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "jmpl        %s + %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x38, rs1, simm13);
+        }
+
+        void save_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "save        %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x3c, rs1, 0, rs2);
+        }
+
+        void save_imm(int rs1, int simm13, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "save        %s, %d, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), simm13, nameGpReg(rd));
+            format_3_1_imm(2, rd, 0x3c, rs1, simm13);
+        }
+
+        void restore_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "restore     %s, %s, %s\n", MAYBE_PAD,
+                           nameGpReg(rs1), nameGpReg(rs2), nameGpReg(rd));
+            format_3_1(2, rd, 0x3d, rs1, 0, rs2);
+        }
+
+        void ta_imm(int swap_trap)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "ta          %d\n", MAYBE_PAD,
+                           swap_trap);
+            format_4_3(0x8, 0xa, 0, 0, 0, swap_trap);
+        }
+
+        void movcc_imm(int simm11, int rd, Condition cond)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "mov%s       %d, %s\n", MAYBE_PAD,
+                           nameICC(cond), simm11, nameGpReg(rd));
+            format_4_2_imm(rd, 0x2c, 1, cond, 0, 0, simm11);
+        }
+
+        void fabss_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fabss       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0x9, rs2);
+        }
+
+        void faddd_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "faddd       %s, %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs1), nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, rs1, 0x42, rs2);
+        }
+
+        void fsubd_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fsubd       %s, %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs1), nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, rs1, 0x46, rs2);
+        }
+
+        void fmuld_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fmuld       %s, %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs1), nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, rs1, 0x4a, rs2);
+        }
+
+        void fdivd_r(int rs1, int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fdivd       %s, %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs1), nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, rs1, 0x4e, rs2);
+        }
+
+        void fsqrtd_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fsqartd     %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0x2a, rs2);
+        }
+
+        void fnegd_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fnegd       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0x06, rs2);
+        }
+
+        void fitod_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fitod       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0xc8, rs2);
+        }
+
+        void fdtoi_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fdtoi       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0xd2, rs2);
+        }
+
+        void fdtos_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fdtos       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0xc6, rs2);
+        }
+
+        void fstod_r(int rs2, int rd)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "fstod       %s, %s\n", MAYBE_PAD,
+                           nameFpReg(rs2), nameFpReg(rd));
+            format_3_8(2, rd, 0x34, 0, 0xc9, rs2);
+        }
+
+        static bool isimm13(int imm)
+        {
+            return (imm) <= 0xfff && (imm) >= -0x1000;
+        }
+
+        static bool isimm22(int imm)
+        {
+            return (imm) <= 0x1fffff && (imm) >= -0x200000;
+        }
+
+        void move_nocheck(int imm_v, RegisterID dest)
+        {
+            sethi(imm_v, dest);
+            or_imm(dest, imm_v & 0x3FF, dest);
+        }
+
+        JmpSrc call()
+        {
+            JmpSrc r = JmpSrc(m_buffer.size());
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "call        %d\n", MAYBE_PAD,
+                           r.m_offset);
+            m_buffer.putInt(0x40000000);
+            nop();
+            return r;
+        }
+
+        JmpSrc jump_common(BranchType branchtype, int cond)
+        {
+            if (branchtype == BranchOnCondition)
+                branch_con(Condition(cond), 0);
+            else
+                fbranch_con(DoubleCondition(cond), 0);
+
+            nop();
+            branch_con(ConditionA, 7);
+            nop();
+            move_nocheck(0, SparcRegisters::g2);
+            rdpc(SparcRegisters::g3);
+            jmpl_r(SparcRegisters::g2, SparcRegisters::g3, SparcRegisters::g0);
+            nop();
+            return JmpSrc(m_buffer.size());
+        }
+
+        JmpSrc branch(Condition cond)
+        {
+            return jump_common(BranchOnCondition, cond);
+        }
+
+        JmpSrc fbranch(DoubleCondition cond)
+        {
+            return jump_common(BranchOnDoubleCondition, cond);
+        }
+
+        JmpSrc jmp()
+        {
+            return jump_common(BranchOnCondition, ConditionA);
+        }
+
+        // Assembler admin methods:
+
+        JmpDst label()
+        {
+            JmpDst r = JmpDst(m_buffer.size());
+            js::JaegerSpew(js::JSpew_Insns,
+                           IPFX "#label     ((%d))\n", MAYBE_PAD, r.m_offset);
+            return r;
+        }
+
+        // General helpers
+
+        size_t size() const { return m_buffer.size(); }
+        unsigned char *buffer() const { return m_buffer.buffer(); }
+
+        static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+        {
+            return dst.m_offset - src.m_offset;
+        }
+    
+        static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+        {
+            return dst.m_offset - src.m_offset;
+        }
+    
+        static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+        {
+            return dst.m_offset - src.m_offset;
+        }
+
+        static unsigned getCallReturnOffset(JmpSrc call)
+        {
+            return call.m_offset + 20;
+        }
+
+        static void* getRelocatedAddress(void* code, JmpSrc jump)
+        {
+            ASSERT(jump.m_offset != -1);
+
+            return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+        }
+    
+        static void* getRelocatedAddress(void* code, JmpDst destination)
+        {
+            ASSERT(destination.m_offset != -1);
+
+            return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+        }
+
+        void* executableAllocAndCopy(ExecutableAllocator* allocator, ExecutablePool **poolp)
+        {
+            return m_buffer.executableAllocAndCopy(allocator, poolp);
+        }
+
+        void* executableCopy(void* buffer)
+        {
+            return memcpy(buffer, m_buffer.buffer(), size());
+        }
+
+        static void patchPointerInternal(void* where, int value)
+        {
+            // Patch move_nocheck.
+            uint32_t *branch = (uint32_t*) where;
+            branch[0] &= 0xFFC00000;
+            branch[0] |= (value >> 10) & 0x3FFFFF;
+            branch[1] &= 0xFFFFFC00;
+            branch[1] |= value & 0x3FF;
+            ExecutableAllocator::cacheFlush(where, 8);
+        }
+
+        static void patchbranch(void* where, int value)
+        {
+            uint32_t *branch = (uint32_t*) where;
+            branch[0] &= 0xFFC00000;
+            branch[0] |= value & 0x3FFFFF;
+            ExecutableAllocator::cacheFlush(where, 4);
+        }
+
+        static bool canRelinkJump(void* from, void* to)
+        {
+            return true;
+        }
+
+        static void relinkJump(void* from, void* to)
+        {
+            from = (void *)((int)from - 36);
+            js::JaegerSpew(js::JSpew_Insns,
+                           ISPFX "##link     ((%p)) jumps to ((%p))\n",
+                           from, to);
+
+            int value = ((int)to - (int)from) / 4;
+            if (isimm22(value)) 
+                patchbranch(from, value);
+            else {
+                patchbranch(from, 4);
+                from = (void *)((intptr_t)from + 16);
+                patchPointerInternal(from, (int)(value * 4 - 24));
+            }
+        }
+
+        void linkJump(JmpSrc from, JmpDst to)
+        {
+            ASSERT(from.m_offset != -1);
+            ASSERT(to.m_offset != -1);
+            intptr_t code = (intptr_t)(m_buffer.data());
+            void *where = (void *)((intptr_t)code + from.m_offset);
+            void *target = (void *)((intptr_t)code + to.m_offset);
+            relinkJump(where, target);
+        }
+
+        static void linkJump(void* code, JmpSrc from, void* to)
+        {
+            ASSERT(from.m_offset != -1);
+            void *where = (void *)((intptr_t)code + from.m_offset);
+            relinkJump(where, to);
+        }
+
+        static void relinkCall(void* from, void* to)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           ISPFX "##linkCall ((from=%p)) ((to=%p))\n",
+                           from, to);
+
+            int disp = ((int)to - (int)from)/4;
+            *(uint32_t *)((int)from) &= 0x40000000;
+            *(uint32_t *)((int)from) |= disp & 0x3fffffff;
+            ExecutableAllocator::cacheFlush(from, 4);
+        }
+
+        static void linkCall(void* code, JmpSrc where, void* to)
+        {
+            void *from = (void *)((intptr_t)code + where.m_offset);
+            relinkCall(from, to);
+        }
+
+        static void linkPointer(void* code, JmpDst where, void* value)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           ISPFX "##linkPointer     ((%p + %#x)) points to ((%p))\n",
+                           code, where.m_offset, value);
+
+            void *from = (void *)((intptr_t)code + where.m_offset);
+            patchPointerInternal(from, (int)value);
+        }
+
+        static void repatchInt32(void* where, int value)
+        {
+            js::JaegerSpew(js::JSpew_Insns,
+                           ISPFX "##repatchInt32 ((where=%p)) holds ((value=%d))\n",
+                           where, value);
+
+            patchPointerInternal(where, value);
+        }
+
+        static void repatchPointer(void* where, void* value)
+        { 
+            js::JaegerSpew(js::JSpew_Insns,
+                           ISPFX "##repatchPointer ((where = %p)) points to ((%p))\n",
+                           where, value);
+
+            patchPointerInternal(where, (int)value);
+        }
+
+        static void repatchLoadPtrToLEA(void* where)
+        {
+            // sethi is used. The offset is in a register
+            if (*(uint32_t *)((int)where) & 0x01000000)
+                where = (void *)((intptr_t)where + 8);
+
+            *(uint32_t *)((int)where) &= 0x3fffffff;
+            *(uint32_t *)((int)where) |= 0x80000000;
+            ExecutableAllocator::cacheFlush(where, 4);
+        }
+
+        static void repatchLEAToLoadPtr(void* where)
+        {
+            // sethi is used. The offset is in a register
+            if (*(uint32_t *)((int)where) & 0x01000000)
+                where = (void *)((intptr_t)where + 8);
+
+            *(uint32_t *)((int)where) &= 0x3fffffff;
+            *(uint32_t *)((int)where) |= 0xc0000000;
+            ExecutableAllocator::cacheFlush(where, 4);
+        }
+
+    private:
+        static char const * nameGpReg(int reg)
+        {
+            ASSERT(reg <= 31);
+            ASSERT(reg >= 0);
+            static char const * names[] = {
+                "%g0", "%g1", "%g2", "%g3",
+                "%g4", "%g5", "%g6", "%g7",
+                "%o0", "%o1", "%o2", "%o3",
+                "%o4", "%o5", "%sp", "%o7",
+                "%l0", "%l1", "%l2", "%l3",
+                "%l4", "%l5", "%l6", "%l7",
+                "%i0", "%i1", "%i2", "%i3",
+                "%i4", "%i5", "%fp", "%i7"
+            };
+            return names[reg];
+        }
+
+        static char const * nameFpReg(int reg)
+        {
+            ASSERT(reg <= 31);
+            ASSERT(reg >= 0);
+            static char const * names[] = {
+                "%f0",   "%f1",   "%f2",   "%f3",
+                "%f4",   "%f5",   "%f6",   "%f7",
+                "%f8",   "%f9",  "%f10",  "%f11",
+                "%f12",  "%f13",  "%f14",  "%f15",
+                "%f16",  "%f17",  "%f18",  "%f19",
+                "%f20",  "%f21",  "%f22",  "%f23",
+                "%f24",  "%f25",  "%f26",  "%f27",
+                "%f28",  "%f29",  "%f30",  "%f31"
+            };
+            return names[reg];
+        }
+
+        static char const * nameICC(Condition cc)
+        {
+            ASSERT(cc <= ConditionVC);
+            ASSERT(cc >= 0);
+
+            uint32_t    ccIndex = cc;
+            static char const * inames[] = {
+                "   ", "e  ",
+                "le ", "l  ",
+                "leu", "cs ",
+                "neg", "vs ",
+                "a  ", "ne ",
+                "g  ", "ge ",
+                "gu ", "cc ",
+                "   ", "vc "
+            };
+            return inames[ccIndex];
+        }
+
+        static char const * nameFCC(DoubleCondition cc)
+        {
+            ASSERT(cc <= DoubleConditionULE);
+            ASSERT(cc >= 0);
+
+            uint32_t    ccIndex = cc;
+            static char const * fnames[] = {
+                "   ", "ne ",
+                "   ", "ul ",
+                "l  ", "ug ",
+                "g  ", "   ",
+                "   ", "e  ",
+                "ue ", "ge ",
+                "ugu", "le ",
+                "ule", "   "
+            };
+            return fnames[ccIndex];
+        }
+
+
+    };
+
+} // namespace JSC
+
+#endif // ENABLE(ASSEMBLER) && CPU(SPARC)
+
+#endif // SparcAssembler_h
--- a/js/src/assembler/jit/ExecutableAllocator.h
+++ b/js/src/assembler/jit/ExecutableAllocator.h
@@ -30,16 +30,32 @@
 #include <limits>
 #include "assembler/wtf/Assertions.h"
 
 #include "jsapi.h"
 #include "jsprvtd.h"
 #include "jsvector.h"
 #include "jslock.h"
 
+#if WTF_CPU_SPARC
+#ifdef linux  // bugzilla 502369
+static void sync_instruction_memory(caddr_t v, u_int len)
+{
+    caddr_t end = v + len;
+    caddr_t p = v;
+    while (p < end) {
+        asm("flush %0" : : "r" (p));
+        p += 32;
+    }
+}
+#else
+extern  "C" void sync_instruction_memory(caddr_t v, u_int len);
+#endif
+#endif
+
 #if WTF_PLATFORM_IPHONE
 #include <libkern/OSCacheControl.h>
 #include <sys/mman.h>
 #endif
 
 #if WTF_PLATFORM_SYMBIAN
 #include <e32std.h>
 #endif
@@ -389,16 +405,21 @@ public:
             : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
             : "r0", "r1", "r2");
     }
 #elif WTF_PLATFORM_WINCE
     static void cacheFlush(void* code, size_t size)
     {
         CacheRangeFlush(code, size, CACHE_SYNC_ALL);
     }
+#elif WTF_CPU_SPARC
+    static void cacheFlush(void* code, size_t size)
+    {
+        sync_instruction_memory((caddr_t)code, size);
+    }
 #else
     #error "The cacheFlush support is missing on this platform."
 #endif
 
 private:
 
 #if ENABLE_ASSEMBLER_WX_EXCLUSIVE
     static void reprotectRegion(void*, size_t, ProtectionSeting);
--- a/js/src/assembler/wtf/Platform.h
+++ b/js/src/assembler/wtf/Platform.h
@@ -154,17 +154,17 @@
 /* CPU(SPARC64) - SPARC 64-bit */
 #if defined(__sparc__) && defined(__arch64__) || defined (__sparcv9)
 #define WTF_CPU_SPARC64 1
 #define WTF_CPU_BIG_ENDIAN 1
 #endif
 
 /* CPU(SPARC) - any SPARC, true for CPU(SPARC32) and CPU(SPARC64) */
 #if WTF_CPU_SPARC32 || WTF_CPU_SPARC64
-#define WTF_CPU_SPARC
+#define WTF_CPU_SPARC 1
 #endif
 
 /* CPU(X86) - i386 / x86 32-bit */
 #if   defined(__i386__) \
    || defined(i386)     \
    || defined(_M_IX86)  \
    || defined(_X86_)    \
    || defined(__THW_INTEL)
@@ -852,16 +852,18 @@ on MinGW. See https://bugs.webkit.org/sh
 #elif WTF_CPU_ARM_THUMB2 && WTF_PLATFORM_IPHONE
     #define ENABLE_JIT 1
 /* The JIT is tested & working on x86 OS/2 */
 #elif WTF_CPU_X86 && WTF_PLATFORM_OS2
     #define ENABLE_JIT 1
 /* The JIT is tested & working on x86 Windows */
 #elif WTF_CPU_X86 && WTF_PLATFORM_WIN
     #define ENABLE_JIT 1
+#elif WTF_CPU_SPARC
+    #define ENABLE_JIT 1
 #endif
 
 #if WTF_PLATFORM_QT
 #if WTF_CPU_X86_64 && WTF_PLATFORM_DARWIN
     #define ENABLE_JIT 1
 #elif WTF_CPU_X86 && WTF_PLATFORM_DARWIN
     #define ENABLE_JIT 1
     #define WTF_USE_JIT_STUB_ARGUMENT_VA_LIST 1
@@ -915,16 +917,17 @@ on MinGW. See https://bugs.webkit.org/sh
 #endif
 
 /* Yet Another Regex Runtime. */
 #if !defined(ENABLE_YARR_JIT)
 
 /* YARR supports x86 & x86-64, and has been tested on Mac and Windows. */
 #if (WTF_CPU_X86 \
  || WTF_CPU_X86_64 \
+ || WTF_CPU_SPARC \
  || WTF_CPU_ARM_TRADITIONAL \
  || WTF_CPU_ARM_THUMB2 \
  || WTF_CPU_X86)
 #define ENABLE_YARR_JIT 1
 #else
 #define ENABLE_YARR_JIT 0
 #endif
 
deleted file mode 100644
--- a/js/src/config.mk
+++ /dev/null
@@ -1,186 +0,0 @@
-# -*- Mode: makefile -*-
-# 
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1/GPL 2.0/LGPL 2.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-# 
-# The Original Code is Mozilla Communicator client code, released
-# March 31, 1998.
-# 
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1998-1999
-# the Initial Developer. All Rights Reserved.
-# 
-# Contributor(s):
-# 
-# Alternatively, the contents of this file may be used under the terms of
-# either of the GNU General Public License Version 2 or later (the "GPL"),
-# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-# in which case the provisions of the GPL or the LGPL are applicable instead
-# of those above. If you wish to allow use of your version of this file only
-# under the terms of either the GPL or the LGPL, and not to allow others to
-# use your version of this file under the terms of the MPL, indicate your
-# decision by deleting the provisions above and replace them with the notice
-# and other provisions required by the GPL or the LGPL. If you do not delete
-# the provisions above, a recipient may use your version of this file under
-# the terms of any one of the MPL, the GPL or the LGPL.
-# 
-# ***** END LICENSE BLOCK *****
-
-ifdef JS_DIST
-DIST = $(JS_DIST)
-else
-DIST = $(DEPTH)/../../dist
-endif
-
-# Set os+release dependent make variables
-OS_ARCH         := $(subst /,_,$(shell uname -s | sed /\ /s//_/))
-
-# Attempt to differentiate between SunOS 5.4 and x86 5.4
-OS_CPUARCH      := $(shell uname -m)
-ifeq ($(OS_CPUARCH),i86pc)
-OS_RELEASE      := $(shell uname -r)_$(OS_CPUARCH)
-else
-ifeq ($(OS_ARCH),AIX)
-OS_RELEASE      := $(shell uname -v).$(shell uname -r)
-else
-OS_RELEASE      := $(shell uname -r)
-endif
-endif
-ifeq ($(OS_ARCH),IRIX64)
-OS_ARCH         := IRIX
-endif
-
-# Handle output from win32 unames other than Netscape's version
-ifeq (,$(filter-out Windows_95 Windows_98, $(OS_ARCH)))
-	OS_ARCH   := WIN95
-endif
-ifeq ($(OS_ARCH),WIN95)
-	OS_ARCH	   := WINNT
-	OS_RELEASE := 4.0
-endif
-ifeq ($(OS_ARCH), Windows_NT)
-	OS_ARCH    := WINNT
-	OS_MINOR_RELEASE := $(shell uname -v)
-	ifeq ($(OS_MINOR_RELEASE),00)
-		OS_MINOR_RELEASE = 0
-	endif
-	OS_RELEASE := $(OS_RELEASE).$(OS_MINOR_RELEASE)
-endif
-ifeq (MINGW32_NT,$(findstring MINGW32_NT,$(OS_ARCH)))
-	OS_RELEASE := $(patsubst MINGW32_NT-%,%,$(OS_ARCH))
-	OS_ARCH    := WINNT
-endif
-
-# Virtually all Linux versions are identical.
-# Any distinctions are handled in linux.h
-ifeq ($(OS_ARCH),Linux)
-OS_CONFIG      := Linux_All
-else
-ifeq ($(OS_ARCH),dgux)
-OS_CONFIG      := dgux
-else
-ifeq ($(OS_ARCH),Darwin)
-OS_CONFIG      := Darwin
-else
-ifeq ($(OS_ARCH),Darwin64)
-OS_CONFIG       := Darwin64
-else
-OS_CONFIG       := $(OS_ARCH)$(OS_OBJTYPE)$(OS_RELEASE)
-endif
-endif
-endif
-endif
-
-ASFLAGS         =
-DEFINES         =
-
-ifeq ($(OS_ARCH), WINNT)
-INSTALL = nsinstall
-CP = cp
-else
-INSTALL	= $(DIST)/bin/nsinstall
-CP = cp
-endif
-
-ifdef BUILD_OPT
-ifdef USE_MSVC
-OPTIMIZER  = -O2 -GL
-INTERP_OPTIMIZER = -O2 -GL
-BUILTINS_OPTIMIZER = -O2 -GL
-LDFLAGS    += -LTCG
-else
-OPTIMIZER           = -Os -fno-exceptions -fno-rtti -fstrict-aliasing -Wstrict-aliasing=3
-BUILTINS_OPTIMIZER  = -O9 -fno-exceptions -fno-rtti -fstrict-aliasing
-INTERP_OPTIMIZER    = -O3 -fno-exceptions -fno-rtti -fstrict-aliasing
-endif
-DEFINES    += -UDEBUG -DNDEBUG -UDEBUG_$(USER)
-OBJDIR_TAG = _OPT
-else
-ifdef USE_MSVC
-OPTIMIZER  = -Zi
-INTERP_OPTIMIZER = -Zi
-BUILTINS_OPTIMIZER = $(INTERP_OPTIMIZER)
-else
-OPTIMIZER          = -g3 -fstrict-aliasing -fno-exceptions -fno-rtti -Wstrict-aliasing=3
-INTERP_OPTIMIZER   = -g3 -fstrict-aliasing -fno-exceptions -fno-rtti
-BUILTINS_OPTIMIZER = $(INTERP_OPTIMIZER)
-endif
-DEFINES    += -DDEBUG -DDEBUG_$(USER)
-OBJDIR_TAG = _DBG
-endif
-
-SO_SUFFIX = so
-
-NS_USE_NATIVE = 1
-
-include $(DEPTH)/ref-config/$(OS_CONFIG).mk
-
-ifndef OBJ_SUFFIX
-ifdef USE_MSVC
-OBJ_SUFFIX = obj
-else
-OBJ_SUFFIX = o
-endif
-endif
-
-ifndef HOST_BIN_SUFFIX
-ifeq ($(OS_ARCH),WINNT)
-HOST_BIN_SUFFIX = .exe
-else
-HOST_BIN_SUFFIX =
-endif
-endif
-
-# Name of the binary code directories
-ifdef OBJROOT
-# prepend $(DEPTH) to the root unless it is an absolute path
-OBJDIR = $(if $(filter /%,$(OBJROOT)),$(OBJROOT),$(DEPTH)/$(OBJROOT))
-else
-ifeq ($(DEPTH),.)
-OBJDIR = $(OS_CONFIG)$(OBJDIR_TAG).$(if $(BUILD_IDG),OBJD,OBJ)
-else
-OBJDIR = $(DEPTH)/$(OS_CONFIG)$(OBJDIR_TAG).$(if $(BUILD_IDG),OBJD,OBJ)
-endif
-endif
-
-VPATH = $(OBJDIR)
-
-LCJAR = js15lc30.jar
-
-# Library name
-LIBDIR := lib
-ifeq ($(CPU_ARCH), x86_64)
-LIBDIR := lib64
-endif
-
--- a/js/src/configure.in
+++ b/js/src/configure.in
@@ -2867,17 +2867,22 @@ arm*-*)
     ENABLE_MONOIC=1
     ENABLE_POLYIC=1
     AC_DEFINE(JS_CPU_ARM)
     AC_DEFINE(JS_NUNBOX32)
     ;;
 sparc*-*)
     ENABLE_TRACEJIT=1
     NANOJIT_ARCH=Sparc
+    ENABLE_METHODJIT=1
+    ENABLE_MONOIC=1
+    ENABLE_POLYIC=1
+    ENABLE_POLYIC_TYPED_ARRAY=1
     AC_DEFINE(JS_CPU_SPARC)
+    AC_DEFINE(JS_NUNBOX32)
     ;;
 esac
 
 MOZ_ARG_DISABLE_BOOL(methodjit,
 [  --disable-methodjit           Disable method JIT support],
   ENABLE_METHODJIT= )
 
 MOZ_ARG_DISABLE_BOOL(monoic,
new file mode 100644
--- /dev/null
+++ b/js/src/jsalloc.cpp
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=99 ft=cpp:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
+ * July 16, 2009.
+ *
+ * The Initial Developer of the Original Code is
+ *   the Mozilla Corporation.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsalloc.h"
+#include "jscntxt.h"
+
+namespace js {
+
+void *
+ContextAllocPolicy::onOutOfMemory(void *p, size_t nbytes)
+{
+    return cx->runtime->onOutOfMemory(p, nbytes, cx);
+}
+
+void
+ContextAllocPolicy::reportAllocOverflow() const
+{
+    js_ReportAllocationOverflow(cx);
+}
+
+} /* namespace js */
new file mode 100644
--- /dev/null
+++ b/js/src/jsalloc.h
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=99 ft=cpp:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
+ * July 16, 2009.
+ *
+ * The Initial Developer of the Original Code is
+ *   the Mozilla Corporation.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsalloc_h_
+#define jsalloc_h_
+
+#include "jspubtd.h"
+#include "jsutil.h"
+#include "jsstaticcheck.h"
+
+namespace js {
+
+/*
+ * Allocation policies.  These model the concept:
+ *  - public copy constructor, assignment, destructor
+ *  - void *malloc_(size_t)
+ *      Responsible for OOM reporting on NULL return value.
+ *  - void *realloc_(size_t)
+ *      Responsible for OOM reporting on NULL return value.
+ *  - void free_(void *)
+ *  - reportAllocOverflow()
+ *      Called on overflow before the container returns NULL.
+ */
+
+/* Policy for using system memory functions and doing no error reporting. */
+class SystemAllocPolicy
+{
+  public:
+    void *malloc_(size_t bytes) { return js_malloc(bytes); }
+    void *realloc_(void *p, size_t bytes) { return js_realloc(p, bytes); }
+    void free_(void *p) { js_free(p); }
+    void reportAllocOverflow() const {}
+};
+
+/*
+ * Allocation policy that calls the system memory functions and reports errors
+ * to the context. Since the JSContext given on construction is stored for
+ * the lifetime of the container, this policy may only be used for containers
+ * whose lifetime is a shorter than the given JSContext.
+ *
+ * FIXME bug 647103 - rewrite this in terms of temporary allocation functions,
+ * not the system ones.
+ */
+class ContextAllocPolicy
+{
+    JSContext *const cx;
+
+    /*
+     * Non-inline helper to call JSRuntime::onOutOfMemory with minimal
+     * code bloat.
+     */
+    JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes);
+
+  public:
+    ContextAllocPolicy(JSContext *cx) : cx(cx) {}
+
+    JSContext *context() const {
+        return cx;
+    }
+
+    void *malloc_(size_t bytes) {
+        void *p = js_malloc(bytes);
+        if (JS_UNLIKELY(!p))
+            p = onOutOfMemory(NULL, bytes);
+        return p;
+    }
+
+    void *realloc_(void *p, size_t bytes) {
+        void *p2 = js_realloc(p, bytes);
+        if (JS_UNLIKELY(!p2))
+            p2 = onOutOfMemory(p2, bytes);
+        return p2;
+    }
+
+    void free_(void *p) {
+        js_free(p);
+    }
+
+    JS_FRIEND_API(void) reportAllocOverflow() const;
+};
+
+} /* namespace js */
+
+#endif /* jsalloc_h_ */
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -79,16 +79,17 @@
 #include "jsregexp.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jstracer.h"
 #include "prmjtime.h"
 #include "jsstaticcheck.h"
 #include "jsvector.h"
+#include "jsweakmap.h"
 #include "jswrapper.h"
 #include "jstypedarray.h"
 
 #include "jsatominlines.h"
 #include "jscntxtinlines.h"
 #include "jsinferinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
@@ -1171,17 +1172,17 @@ JS_ToggleOptions(JSContext *cx, uint32 o
     uintN oldopts = cx->allOptions();
     uintN newopts = oldopts ^ options;
     return SetOptionsCommon(cx, newopts);
 }
 
 JS_PUBLIC_API(const char *)
 JS_GetImplementationVersion(void)
 {
-    return "JavaScript-C 1.8.0 pre-release 1 2007-10-03";
+    return "JavaScript-C 1.8.5+ 2011-04-16";
 }
 
 JS_PUBLIC_API(JSCompartmentCallback)
 JS_SetCompartmentCallback(JSRuntime *rt, JSCompartmentCallback callback)
 {
     JSCompartmentCallback old = rt->compartmentCallback;
     rt->compartmentCallback = callback;
     return old;
@@ -1678,16 +1679,17 @@ static JSStdName standard_class_atoms[] 
     {js_InitNamespaceClass,             EAGER_ATOM_AND_CLASP(Namespace)},
     {js_InitQNameClass,                 EAGER_ATOM_AND_CLASP(QName)},
 #endif
 #if JS_HAS_GENERATORS
     {js_InitIteratorClasses,            EAGER_ATOM_AND_CLASP(StopIteration)},
 #endif
     {js_InitJSONClass,                  EAGER_ATOM_AND_CLASP(JSON)},
     {js_InitTypedArrayClasses,          EAGER_CLASS_ATOM(ArrayBuffer), &js::ArrayBuffer::jsclass},
+    {js_InitWeakMapClass,               EAGER_CLASS_ATOM(WeakMap), &WeakMap::jsclass},
     {NULL,                              0, NULL, NULL}
 };
 
 /*
  * Table of top-level function and constant names and their init functions.
  * If you add a "standard" global function or property, remember to update
  * this table.
  */
@@ -2059,17 +2061,20 @@ JS_GetGlobalForScopeChain(JSContext *cx)
     CHECK_REQUEST(cx);
     return GetGlobalForScopeChain(cx);
 }
 
 JS_PUBLIC_API(jsval)
 JS_ComputeThis(JSContext *cx, jsval *vp)
 {
     assertSameCompartment(cx, JSValueArray(vp, 2));
-    return BoxThisForVp(cx, Valueify(vp)) ? vp[1] : JSVAL_NULL;
+    CallReceiver call = CallReceiverFromVp(Valueify(vp));
+    if (!BoxNonStrictThis(cx, call))
+        return JSVAL_NULL;
+    return Jsvalify(call.thisv());
 }
 
 JS_PUBLIC_API(void *)
 JS_malloc(JSContext *cx, size_t nbytes)
 {
     return cx->malloc_(nbytes);
 }
 
@@ -4053,17 +4058,17 @@ JS_PUBLIC_API(JSIdArray *)
 JS_Enumerate(JSContext *cx, JSObject *obj)
 {
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, obj);
 
     AutoIdVector props(cx);
     JSIdArray *ida;
     if (!GetPropertyNames(cx, obj, JSITER_OWNONLY, &props) || !VectorToIdArray(cx, props, &ida))
-        return false;
+        return NULL;
     for (size_t n = 0; n < size_t(ida->length); ++n)
         JS_ASSERT(js_CheckForStringIndex(ida->vector[n]) == ida->vector[n]);
     return ida;
 }
 
 /*
  * XXX reverse iterator for properties, unreverse and meld with jsinterp.c's
  *     prop_iterator_class somehow...
@@ -4835,26 +4840,29 @@ JS_PUBLIC_API(JSObject *)
 JS_CompileScript(JSContext *cx, JSObject *obj, const char *bytes, size_t length,
                  const char *filename, uintN lineno)
 {
     JS_THREADSAFE_ASSERT(cx->compartment != cx->runtime->atomsCompartment);
     return JS_CompileScriptForPrincipals(cx, obj, NULL, bytes, length, filename, lineno);
 }
 
 JS_PUBLIC_API(JSBool)
-JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj, const char *bytes, size_t length)
+JS_BufferIsCompilableUnit(JSContext *cx, JSBool bytes_are_utf8, JSObject *obj, const char *bytes, size_t length)
 {
     jschar *chars;
     JSBool result;
     JSExceptionState *exnState;
     JSErrorReporter older;
 
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, obj);
-    chars = js_InflateString(cx, bytes, &length);
+    if (bytes_are_utf8)
+        chars = js_InflateString(cx, bytes, &length, JS_TRUE);
+    else
+        chars = js_InflateString(cx, bytes, &length);
     if (!chars)
         return JS_TRUE;
 
     /*
      * Return true on any out-of-memory error, so our caller doesn't try to
      * collect more buffered source.
      */
     result = JS_TRUE;
@@ -5218,17 +5226,17 @@ JS_DecompileFunctionBody(JSContext *cx, 
 JS_PUBLIC_API(JSBool)
 JS_ExecuteScript(JSContext *cx, JSObject *obj, JSObject *scriptObj, jsval *rval)
 {
     JS_THREADSAFE_ASSERT(cx->compartment != cx->runtime->atomsCompartment);
 
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, obj, scriptObj);
 
-    JSBool ok = Execute(cx, obj, scriptObj->getScript(), NULL, 0, Valueify(rval));
+    JSBool ok = Execute(cx, *obj, scriptObj->getScript(), NULL, 0, Valueify(rval));
     LAST_FRAME_CHECKS(cx, ok);
     return ok;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_ExecuteScriptVersion(JSContext *cx, JSObject *obj, JSObject *scriptObj, jsval *rval,
                         JSVersion version)
 {
@@ -5253,17 +5261,17 @@ EvaluateUCScriptForPrincipalsCommon(JSCo
                                                chars, length, filename, lineno, compileVersion);
     if (!script) {
         LAST_FRAME_CHECKS(cx, script);
         return false;
     }
     script->isUncachedEval = true;
 
     JS_ASSERT(script->getVersion() == compileVersion);
-    bool ok = Execute(cx, obj, script, NULL, 0, Valueify(rval));
+    bool ok = Execute(cx, *obj, script, NULL, 0, Valueify(rval));
     LAST_FRAME_CHECKS(cx, ok);
 
     js_DestroyScript(cx, script);
     return ok;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_EvaluateUCScriptForPrincipalsVersion(JSContext *cx, JSObject *obj,
@@ -5402,17 +5410,17 @@ JS_New(JSContext *cx, JSObject *ctor, ui
     // This is not a simple variation of JS_CallFunctionValue because JSOP_NEW
     // is not a simple variation of JSOP_CALL. We have to determine what class
     // of object to create, create it, and clamp the return value to an object,
     // among other details. js_InvokeConstructor does the hard work.
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return NULL;
 
-    args.callee().setObject(*ctor);
+    args.calleev().setObject(*ctor);
     args.thisv().setNull();
     memcpy(args.argv(), argv, argc * sizeof(jsval));
 
     bool ok = InvokeConstructor(cx, args);
 
     JSObject *obj = NULL;
     if (ok) {
         if (args.rval().isObject()) {
@@ -5751,16 +5759,23 @@ JS_EncodeCharacters(JSContext *cx, const
 }
 
 JS_PUBLIC_API(JSBool)
 JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst, size_t *dstlenp)
 {
     return js_InflateStringToBuffer(cx, src, srclen, dst, dstlenp);
 }
 
+JS_PUBLIC_API(JSBool)
+JS_DecodeUTF8(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+              size_t *dstlenp)
+{
+    return js_InflateUTF8StringToBuffer(cx, src, srclen, dst, dstlenp);
+}
+
 JS_PUBLIC_API(char *)
 JS_EncodeString(JSContext *cx, JSString *str)
 {
     const jschar *chars = str->getChars(cx);
     if (!chars)
         return NULL;
     return js_DeflateString(cx, chars, str->length());
 }
@@ -5817,38 +5832,16 @@ JS_Stringify(JSContext *cx, jsval *vp, J
 JS_PUBLIC_API(JSBool)
 JS_TryJSON(JSContext *cx, jsval *vp)
 {
     CHECK_REQUEST(cx);
     assertSameCompartment(cx, *vp);
     return js_TryJSON(cx, Valueify(vp));
 }
 
-JS_PUBLIC_API(JSONParser *)
-JS_BeginJSONParse(JSContext *cx, jsval *vp)
-{
-    CHECK_REQUEST(cx);
-    return js_BeginJSONParse(cx, Valueify(vp));
-}
-
-JS_PUBLIC_API(JSBool)
-JS_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len)
-{
-    CHECK_REQUEST(cx);
-    return js_ConsumeJSONText(cx, jp, data, len);
-}
-
-JS_PUBLIC_API(JSBool)
-JS_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver)
-{
-    CHECK_REQUEST(cx);
-    assertSameCompartment(cx, reviver);
-    return js_FinishJSONParse(cx, jp, Valueify(reviver));
-}
-
 JS_PUBLIC_API(JSBool)
 JS_ParseJSON(JSContext *cx, const jschar *chars, uint32 len, jsval *vp)
 {
     CHECK_REQUEST(cx);
 
     return ParseJSONWithReviver(cx, chars, len, NullValue(), Valueify(vp));
 }
 
--- a/js/src/jsapi.h
+++ b/js/src/jsapi.h
@@ -1401,16 +1401,17 @@ template<> class AnchorPermitted<jsval> 
 
 template<typename T>
 class Anchor: AnchorPermitted<T> {
   public:
     Anchor() { }
     explicit Anchor(T t) { hold = t; }
     inline ~Anchor();
     T &get() { return hold; }
+    const T &get() const { return hold; }
     void set(const T &t) { hold = t; }
     void clear() { hold = 0; }
   private:
     T hold;
     /* Anchors should not be assigned or passed to functions. */
     Anchor(const Anchor &);
     const Anchor &operator=(const Anchor &);
 };
@@ -1944,16 +1945,17 @@ struct JSClass {
     JSClassInternal     reserved1;
     void                *reserved[19];
 };
 
 #define JSCLASS_HAS_PRIVATE             (1<<0)  /* objects have private slot */
 #define JSCLASS_NEW_ENUMERATE           (1<<1)  /* has JSNewEnumerateOp hook */
 #define JSCLASS_NEW_RESOLVE             (1<<2)  /* has JSNewResolveOp hook */
 #define JSCLASS_PRIVATE_IS_NSISUPPORTS  (1<<3)  /* private is (nsISupports *) */
+#define JSCLASS_CONCURRENT_FINALIZER    (1<<4)  /* finalize is called on background thread */
 #define JSCLASS_NEW_RESOLVE_GETS_START  (1<<5)  /* JSNewResolveOp gets starting
                                                    object in prototype chain
                                                    passed in via *objp in/out
                                                    parameter */
 #define JSCLASS_CONSTRUCT_PROTOTYPE     (1<<6)  /* call constructor on class
                                                    prototype */
 #define JSCLASS_DOCUMENT_OBSERVER       (1<<7)  /* DOM document observer */
 
@@ -2763,18 +2765,18 @@ JS_TypeHandlerThis(JSContext*, JSTypeFun
 /*
  * Given a buffer, return JS_FALSE if the buffer might become a valid
  * javascript statement with the addition of more lines.  Otherwise return
  * JS_TRUE.  The intent is to support interactive compilation - accumulate
  * lines in a buffer until JS_BufferIsCompilableUnit is true, then pass it to
  * the compiler.
  */
 extern JS_PUBLIC_API(JSBool)
-JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj,
-                          const char *bytes, size_t length);
+JS_BufferIsCompilableUnit(JSContext *cx, JSBool bytes_are_utf8,
+                          JSObject *obj, const char *bytes, size_t length);
 
 extern JS_PUBLIC_API(JSObject *)
 JS_CompileScript(JSContext *cx, JSObject *obj,
                  const char *bytes, size_t length,
                  const char *filename, uintN lineno);
 
 extern JS_PUBLIC_API(JSObject *)
 JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
@@ -3285,25 +3287,31 @@ JS_SetCStringsAreUTF8(void);
  *
  * NB: Neither function stores an additional zero byte or jschar after the
  * transcoded string.
  *
  * If JS_CStringsAreUTF8() is true then JS_EncodeCharacters encodes to
  * UTF-8, and JS_DecodeBytes decodes from UTF-8, which may create additional
  * errors if the character sequence is malformed.  If UTF-8 support is
  * disabled, the functions deflate and inflate, respectively.
+ *
+ * JS_DecodeUTF8() always behaves the same independently of JS_CStringsAreUTF8().
  */
 JS_PUBLIC_API(JSBool)
 JS_EncodeCharacters(JSContext *cx, const jschar *src, size_t srclen, char *dst,
                     size_t *dstlenp);
 
 JS_PUBLIC_API(JSBool)
 JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst,
                size_t *dstlenp);
 
+JS_PUBLIC_API(JSBool)
+JS_DecodeUTF8(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+              size_t *dstlenp);
+
 /*
  * A variation on JS_EncodeCharacters where a null terminated string is
  * returned that you are expected to call JS_free on when done.
  */
 JS_PUBLIC_API(char *)
 JS_EncodeString(JSContext *cx, JSString *str);
 
 /*
@@ -3388,40 +3396,31 @@ class JSAutoByteString {
 
 /************************************************************************/
 /*
  * JSON functions
  */
 typedef JSBool (* JSONWriteCallback)(const jschar *buf, uint32 len, void *data);
 
 /*
- * JSON.stringify as specified by ES3.1 (draft)
+ * JSON.stringify as specified by ES5.
  */
 JS_PUBLIC_API(JSBool)
 JS_Stringify(JSContext *cx, jsval *vp, JSObject *replacer, jsval space,
              JSONWriteCallback callback, void *data);
 
 /*
  * Retrieve a toJSON function. If found, set vp to its result.
  */
 JS_PUBLIC_API(JSBool)
 JS_TryJSON(JSContext *cx, jsval *vp);
 
 /*
- * JSON.parse as specified by ES3.1 (draft)
+ * JSON.parse as specified by ES5.
  */
-JS_PUBLIC_API(JSONParser *)
-JS_BeginJSONParse(JSContext *cx, jsval *vp);
-
-JS_PUBLIC_API(JSBool)
-JS_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len);
-
-JS_PUBLIC_API(JSBool)
-JS_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver);
-
 JS_PUBLIC_API(JSBool)
 JS_ParseJSON(JSContext *cx, const jschar *chars, uint32 len, jsval *vp);
 
 JS_PUBLIC_API(JSBool)
 JS_ParseJSONWithReviver(JSContext *cx, const jschar *chars, uint32 len, jsval reviver, jsval *vp);
 
 /************************************************************************/
 
--- a/js/src/jsarena.cpp
+++ b/js/src/jsarena.cpp
@@ -39,16 +39,17 @@
 
 /*
  * Lifetime-based fast allocation, inspired by much prior art, including
  * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
  * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
  */
 #include <stdlib.h>
 #include <string.h>
+#include "jsalloc.h"
 #include "jstypes.h"
 #include "jsstdint.h"
 #include "jsbit.h"
 #include "jsarena.h"
 #include "jsprvtd.h"
 
 using namespace js;
 
--- a/js/src/jsarray.cpp
+++ b/js/src/jsarray.cpp
@@ -1454,17 +1454,17 @@ array_toString(JSContext *cx, uintN argc
         return true;
     }
 
     LeaveTrace(cx);
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, 0, &args))
         return false;
 
-    args.callee() = join;
+    args.calleev() = join;
     args.thisv().setObject(*obj);
 
     /* Do the call. */
     if (!Invoke(cx, args, 0))
         return false;
     *vp = args.rval();
     return true;
 }
--- a/js/src/jsatom.cpp
+++ b/js/src/jsatom.cpp
@@ -206,17 +206,19 @@ const char *const js_common_atom_names[]
     "delete",                   /* deleteAtom                   */
     "getOwnPropertyNames",      /* getOwnPropertyNames          */
     "enumerate",                /* enumerateAtom                */
     "fix",                      /* fixAtom                      */
 
     "has",                      /* hasAtom                      */
     "hasOwn",                   /* hasOwnAtom                   */
     "keys",                     /* keysAtom                     */
-    "iterate"                   /* iterateAtom                  */
+    "iterate",                  /* iterateAtom                  */
+
+    "WeakMap"                   /* WeakMapAtom                  */
 };
 
 JS_STATIC_ASSERT(JS_ARRAY_LENGTH(js_common_atom_names) * sizeof(JSAtom *) ==
                  LAZY_ATOM_OFFSET_START - ATOM_OFFSET_START);
 
 /*
  * Interpreter macros called by the trace recorder assume common atom indexes
  * fit in one byte of immediate operand.
--- a/js/src/jsatom.h
+++ b/js/src/jsatom.h
@@ -417,16 +417,18 @@ struct JSAtomState
     JSAtom              *enumerateAtom;
     JSAtom              *fixAtom;
 
     JSAtom              *hasAtom;
     JSAtom              *hasOwnAtom;
     JSAtom              *keysAtom;
     JSAtom              *iterateAtom;
 
+    JSAtom              *WeakMapAtom;
+
     /* Less frequently used atoms, pinned lazily by JS_ResolveStandardClass. */
     struct {
         JSAtom          *XMLListAtom;
         JSAtom          *decodeURIAtom;
         JSAtom          *decodeURIComponentAtom;
         JSAtom          *defineGetterAtom;
         JSAtom          *defineSetterAtom;
         JSAtom          *encodeURIAtom;
--- a/js/src/jsclone.cpp
+++ b/js/src/jsclone.cpp
@@ -463,17 +463,17 @@ JSStructuredCloneWriter::writeArrayBuffe
 }
 
 bool
 JSStructuredCloneWriter::startObject(JSObject *obj)
 {
     JS_ASSERT(obj->isArray() || obj->isObject());
 
     /* Fail if obj is already on the stack. */
-    HashSet<JSObject *>::AddPtr p = memory.lookupForAdd(obj);
+    MemorySet::AddPtr p = memory.lookupForAdd(obj);
     if (p) {
         JSContext *cx = context();
         if (callbacks && callbacks->reportError)
             callbacks->reportError(cx, JS_SCERR_RECURSION);
         else
             JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_SC_RECURSION);
         return false;
     }
--- a/js/src/jsclone.h
+++ b/js/src/jsclone.h
@@ -174,17 +174,18 @@ struct JSStructuredCloneWriter {
     // counts.length() == objs.length() and sum(counts) == ids.length().
     js::Vector<size_t> counts;
 
     // Ids of properties remaining to be written.
     js::AutoIdVector ids;
 
     // The "memory" list described in the HTML5 internal structured cloning algorithm.
     // memory has the same elements as objs.
-    js::HashSet<JSObject *> memory;
+    typedef js::HashSet<JSObject *> MemorySet;
+    MemorySet memory;
 
     // The user defined callbacks that will be used for cloning.
     const JSStructuredCloneCallbacks *callbacks;
 
     // Any value passed to JS_WriteStructuredClone.
     void *closure;
 };
 
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -296,18 +296,17 @@ StackSpace::pushSegmentForInvoke(JSConte
         return false;
 
     StackSegment *seg = new(start) StackSegment;
     seg->setPreviousInMemory(currentSegment);
     currentSegment = seg;
 
     ag->cx = cx;
     ag->seg = seg;
-    ag->argv_ = seg->valueRangeBegin() + 2;
-    ag->argc_ = argc;
+    ImplicitCast<CallArgs>(*ag) = CallArgsFromVp(argc, seg->valueRangeBegin());
 
     /* Use invokeArgEnd to root [vp, vpend) until the frame is pushed. */
 #ifdef DEBUG
     ag->prevInvokeSegment = invokeSegment;
     invokeSegment = seg;
     ag->prevInvokeFrame = invokeFrame;
     invokeFrame = NULL;
 #endif
@@ -1027,16 +1026,19 @@ js_DestroyContext(JSContext *cx, JSDestr
     if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC
 #ifdef JS_THREADSAFE
         || cx->outstandingRequests != 0
 #endif
         ) {
         JS_ASSERT(!rt->gcRunning);
 
         JS_UNLOCK_GC(rt);
+#ifdef JS_THREADSAFE
+        rt->gcHelperThread.waitBackgroundSweepEnd(rt);
+#endif
 
         if (last) {
 #ifdef JS_THREADSAFE
             /*
              * If this thread is not in a request already, begin one now so
              * that we wait for any racing GC started on a not-last context to
              * finish, before we plow ahead and unpin atoms. Note that even
              * though we begin a request here if necessary, we end all
@@ -1107,16 +1109,19 @@ js_DestroyContext(JSContext *cx, JSDestr
 #endif
     js_ClearContextThread(cx);
     JS_ASSERT_IF(JS_CLIST_IS_EMPTY(&t->contextList), !t->data.requestDepth);
 #endif
 #ifdef JS_METER_DST_OFFSET_CACHING
     cx->dstOffsetCache.dumpStats();
 #endif
     JS_UNLOCK_GC(rt);
+#ifdef JS_THREADSAFE
+    rt->gcHelperThread.waitBackgroundSweepEnd(rt);
+#endif
     Foreground::delete_(cx);
 }
 
 JSContext *
 js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
 {
     JSContext *cx = *iterp;
 
@@ -1717,17 +1722,17 @@ js_InvokeOperationCallback(JSContext *cx
         js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL);
 
         /*
          * On trace we can exceed the GC quota, see comments in NewGCArena. So
          * we check the quota and report OOM here when we are off trace.
          */
         bool delayedOutOfMemory;
         JS_LOCK_GC(rt);
-        delayedOutOfMemory = (rt->gcBytes > rt->gcMaxBytes);
+        delayedOutOfMemory = rt->gcBytes > rt->gcMaxBytes;
         JS_UNLOCK_GC(rt);
         if (delayedOutOfMemory) {
             js_ReportOutOfMemory(cx);
             return false;
         }
     }
     
 #ifdef JS_THREADSAFE
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -114,37 +114,16 @@ struct FragPI;
 typedef nanojit::HashMap<uint32, FragPI, nanojit::DefaultHash<uint32> > FragStatsMap;
 #endif
 
 namespace mjit {
 class JaegerCompartment;
 }
 
 /*
- * Allocation policy that calls JSContext memory functions and reports errors
- * to the context. Since the JSContext given on construction is stored for
- * the lifetime of the container, this policy may only be used for containers
- * whose lifetime is a shorter than the given JSContext.
- */
-class ContextAllocPolicy
-{
-    JSContext *cx;
-
-  public:
-    ContextAllocPolicy(JSContext *cx) : cx(cx) {}
-    JSContext *context() const { return cx; }
-
-    /* Inline definitions below. */
-    void *malloc_(size_t bytes);
-    void free_(void *p);
-    void *realloc_(void *p, size_t bytes);
-    void reportAllocOverflow() const;
-};
-
-/*
  * A StackSegment (referred to as just a 'segment') contains a prev-linked set
  * of stack frames and the slots associated with each frame. A segment and its
  * contained frames/slots also have a precise memory layout that is described
  * in the js::StackSpace comment. A key layout invariant for segments is that
  * prev-linked frames are adjacent in memory, separated only by the values that
  * constitute the locals and expression stack of the prev-frame.
  *
  * The set of stack frames in a non-empty segment start at the segment's
@@ -406,17 +385,17 @@ class InvokeArgsGuard : public CallArgs
 };
 
 /*
  * This type can be used to call Invoke when the arguments have already been
  * pushed onto the stack as part of normal execution.
  */
 struct InvokeArgsAlreadyOnTheStack : CallArgs
 {
-    InvokeArgsAlreadyOnTheStack(Value *vp, uintN argc) : CallArgs(vp + 2, argc) {}
+    InvokeArgsAlreadyOnTheStack(Value *vp, uintN argc) : CallArgs(argc, vp + 2) {}
 };
 
 /* See StackSpace::pushInvokeFrame. */
 class InvokeFrameGuard
 {
     friend class StackSpace;
     JSContext        *cx_;  /* null implies nothing pushed */
     JSFrameRegs      regs_;
@@ -1075,29 +1054,30 @@ struct JSRuntime {
     uint32              protoHazardShape;
 
     /* Garbage collector state, used by jsgc.c. */
     js::GCChunkSet      gcChunkSet;
 
     js::RootedValueMap  gcRootsHash;
     js::GCLocks         gcLocksHash;
     jsrefcount          gcKeepAtoms;
-    size_t              gcBytes;
-    size_t              gcTriggerBytes;
+    uint32              gcBytes;
+    uint32              gcTriggerBytes;
     size_t              gcLastBytes;
     size_t              gcMaxBytes;
     size_t              gcMaxMallocBytes;
     size_t              gcChunksWaitingToExpire;
     uint32              gcEmptyArenaPoolLifespan;
     uint32              gcNumber;
     js::GCMarker        *gcMarkingTracer;
     uint32              gcTriggerFactor;
     int64               gcJitReleaseTime;
     JSGCMode            gcMode;
     volatile bool       gcIsNeeded;
+    JSObject           *gcWeakMapList;
 
     /*
      * Compartment that triggered GC. If more than one Compatment need GC,
      * gcTriggerCompartment is reset to NULL and a global GC is performed.
      */
     JSCompartment       *gcTriggerCompartment;
 
     /* Compartment that is currently involved in per-compartment GC */
@@ -1286,19 +1266,16 @@ struct JSRuntime {
 
 #ifdef DEBUG
     /* Function invocation metering. */
     jsrefcount          inlineCalls;
     jsrefcount          nativeCalls;
     jsrefcount          nonInlineCalls;
     jsrefcount          constructs;
 
-    jsrefcount          liveObjectProps;
-    jsrefcount          liveObjectPropsPreSweep;
-
     /*
      * NB: emptyShapes (in JSCompartment) is init'ed iff at least one
      * of these envars is set:
      *
      *  JS_PROPTREE_STATFILE  statistics on the property tree forest
      *  JS_PROPTREE_DUMPFILE  all paths in the property tree forest
      */
     const char          *propTreeStatFilename;
@@ -1374,16 +1351,17 @@ struct JSRuntime {
 
     JSRuntime();
     ~JSRuntime();
 
     bool init(uint32 maxbytes);
 
     void setGCTriggerFactor(uint32 factor);
     void setGCLastBytes(size_t lastBytes);
+    void reduceGCTriggerBytes(uint32 amount);
 
     /*
      * Call the system malloc while checking for GC memory pressure and
      * reporting OOM error when cx is not null.
      */
     void* malloc_(size_t bytes, JSContext *cx = NULL) {
         updateMallocCounter(bytes);
         void *p = ::js_malloc(bytes);
@@ -1450,17 +1428,16 @@ struct JSRuntime {
     void updateMallocCounter(size_t nbytes) {
         /* We tolerate any thread races when updating gcMallocBytes. */
         ptrdiff_t newCount = gcMallocBytes - ptrdiff_t(nbytes);
         gcMallocBytes = newCount;
         if (JS_UNLIKELY(newCount <= 0))
             onTooMuchMalloc();
     }
 
-  private:
     /*
      * The function must be called outside the GC lock.
      */
     JS_FRIEND_API(void) onTooMuchMalloc();
 
     /*
      * This should be called after system malloc/realloc returns NULL to try
      * to recove some memory or to report an error. Failures in malloc and
@@ -3274,40 +3251,16 @@ js_RegenerateShapeForGC(JSRuntime *rt)
     uint32 shape = rt->shapeGen;
     shape = (shape + 1) | (shape & js::SHAPE_OVERFLOW_BIT);
     rt->shapeGen = shape;
     return shape;
 }
 
 namespace js {
 
-inline void *
-ContextAllocPolicy::malloc_(size_t bytes)
-{
-    return cx->malloc_(bytes);
-}
-
-inline void
-ContextAllocPolicy::free_(void *p)
-{
-    cx->free_(p);
-}
-
-inline void *
-ContextAllocPolicy::realloc_(void *p, size_t bytes)
-{
-    return cx->realloc_(p, bytes);
-}
-
-inline void
-ContextAllocPolicy::reportAllocOverflow() const
-{
-    js_ReportAllocationOverflow(cx);
-}
-
 template<class T>
 class AutoVectorRooter : protected AutoGCRooter
 {
   public:
     explicit AutoVectorRooter(JSContext *cx, ptrdiff_t tag
                               JS_GUARD_OBJECT_NOTIFIER_PARAM)
         : AutoGCRooter(cx, tag), vector(cx)
     {
@@ -3357,17 +3310,18 @@ class AutoVectorRooter : protected AutoG
     const T *end() const { return vector.end(); }
     T *end() { return vector.end(); }
 
     const T &back() const { return vector.back(); }
 
     friend void AutoGCRooter::trace(JSTracer *trc);
 
   private:
-    Vector<T, 8> vector;
+    typedef Vector<T, 8> VectorImpl;
+    VectorImpl vector;
     JS_DECL_USE_GUARD_OBJECT_NOTIFIER
 };
 
 class AutoValueVector : public AutoVectorRooter<Value>
 {
   public:
     explicit AutoValueVector(JSContext *cx
                              JS_GUARD_OBJECT_NOTIFIER_PARAM)
--- a/js/src/jscntxtinlines.h
+++ b/js/src/jscntxtinlines.h
@@ -257,18 +257,17 @@ StackSpace::pushInvokeArgs(JSContext *cx
 #ifdef DEBUG
     ag->prevInvokeSegment = invokeSegment;
     invokeSegment = currentSegment;
     ag->prevInvokeFrame = invokeFrame;
     invokeFrame = cx->maybefp();
 #endif
 
     ag->cx = cx;
-    ag->argv_ = vp + 2;
-    ag->argc_ = argc;
+    ImplicitCast<CallArgs>(*ag) = CallArgsFromVp(argc, vp);
     return true;
 }
 
 JS_REQUIRES_STACK JS_ALWAYS_INLINE void
 StackSpace::popInvokeArgs(const InvokeArgsGuard &ag)
 {
     if (JS_UNLIKELY(ag.seg != NULL)) {
         popSegmentForInvoke(ag);
--- a/js/src/jscompartment.cpp
+++ b/js/src/jscompartment.cpp
@@ -164,17 +164,17 @@ JSCompartment::init(JSContext *cx)
     return true;
 #endif
 }
 
 bool
 JSCompartment::arenaListsAreEmpty()
 {
   for (unsigned i = 0; i < FINALIZE_LIMIT; i++) {
-       if (!arenas[i].isEmpty())
+       if (!arenas[i].isEmpty() || arenas[i].hasToBeFinalized)
            return false;
   }
   return true;
 }
 
 static bool
 IsCrossCompartmentWrapper(JSObject *wrapper)
 {
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -372,18 +372,18 @@ class DtoaCache {
 struct JS_FRIEND_API(JSCompartment) {
     JSRuntime                    *rt;
     JSPrincipals                 *principals;
     js::gc::Chunk                *chunk;
 
     js::gc::ArenaList            arenas[js::gc::FINALIZE_LIMIT];
     js::gc::FreeLists            freeLists;
 
-    size_t                       gcBytes;
-    size_t                       gcTriggerBytes;
+    uint32                       gcBytes;
+    uint32                       gcTriggerBytes;
     size_t                       gcLastBytes;
 
     bool                         hold;
 
 #ifdef JS_GCMETER
     js::gc::JSGCArenaStats       compartmentStats[js::gc::FINALIZE_LIMIT];
 #endif
 
@@ -477,22 +477,23 @@ struct JS_FRIEND_API(JSCompartment) {
     bool wrap(JSContext *cx, js::StrictPropertyOp *op);
     bool wrap(JSContext *cx, js::PropertyDescriptor *desc);
     bool wrap(JSContext *cx, js::AutoIdVector &props);
 
     void markTypes(JSTracer *trc);
     void sweep(JSContext *cx, uint32 releaseInterval);
     void purge(JSContext *cx);
     void finishArenaLists();
-    void finalizeObjectArenaLists(JSContext *cx);
-    void finalizeShapeArenaLists(JSContext *cx);
-    void finalizeStringArenaLists(JSContext *cx);
+    void finalizeObjectArenaLists(JSContext *cx, JSGCInvocationKind gckind);
+    void finalizeStringArenaLists(JSContext *cx, JSGCInvocationKind gckind);
+    void finalizeShapeArenaLists(JSContext *cx, JSGCInvocationKind gckind);
     bool arenaListsAreEmpty();
 
     void setGCLastBytes(size_t lastBytes);
+    void reduceGCTriggerBytes(uint32 amount);
 
     js::DtoaCache dtoaCache;
 
   private:
     js::MathCache                *mathCache;
 
     js::MathCache *allocMathCache(JSContext *cx);
 
deleted file mode 100644
--- a/js/src/jsconfig.mk
+++ /dev/null
@@ -1,169 +0,0 @@
-# -*- Mode: makefile -*-
-# 
-# ***** BEGIN LICENSE BLOCK *****
-# Version: MPL 1.1/GPL 2.0/LGPL 2.1
-#
-# The contents of this file are subject to the Mozilla Public License Version
-# 1.1 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-# http://www.mozilla.org/MPL/
-#
-# Software distributed under the License is distributed on an "AS IS" basis,
-# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
-# for the specific language governing rights and limitations under the
-# License.
-# 
-# The Original Code is Mozilla Communicator client code, released
-# March 31, 1998.
-# 
-# The Initial Developer of the Original Code is
-# Netscape Communications Corporation.
-# Portions created by the Initial Developer are Copyright (C) 1998-1999
-# the Initial Developer. All Rights Reserved.
-# 
-# Contributor(s):
-# 
-# Alternatively, the contents of this file may be used under the terms of
-# either of the GNU General Public License Version 2 or later (the "GPL"),
-# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
-# in which case the provisions of the GPL or the LGPL are applicable instead
-# of those above. If you wish to allow use of your version of this file only
-# under the terms of either the GPL or the LGPL, and not to allow others to
-# use your version of this file under the terms of the MPL, indicate your
-# decision by deleting the provisions above and replace them with the notice
-# and other provisions required by the GPL or the LGPL. If you do not delete
-# the provisions above, a recipient may use your version of this file under
-# the terms of any one of the MPL, the GPL or the LGPL.
-# 
-# ***** END LICENSE BLOCK *****
-
-ifndef OBJDIR
-  ifdef OBJDIR_NAME
-    OBJDIR = $(OBJDIR_NAME)
-  endif
-endif
-
-NSPR_VERSION = v4.0
-NSPR_LIBSUFFIX = 4
-
-NSPR_LOCAL       = $(MOZ_DEPTH)/dist/$(OBJDIR)/nspr
-NSPR_DIST        = $(MOZ_DEPTH)/dist/$(OBJDIR)
-NSPR_OBJDIR      = $(OBJDIR)
-ifeq ($(OS_ARCH), SunOS)
-  NSPR_OBJDIR   := $(subst _sparc,,$(NSPR_OBJDIR))
-endif
-ifeq ($(OS_ARCH), Linux)
-  LINUX_REL     := $(shell uname -r)
-  ifneq (,$(findstring 2.0,$(LINUX_REL)))
-    NSPR_OBJDIR := $(subst _All,2.0_x86_glibc_PTH,$(NSPR_OBJDIR))
-  else
-    NSPR_OBJDIR := $(subst _All,2.2_x86_glibc_PTH,$(NSPR_OBJDIR))
-  endif
-endif
-ifeq ($(OS_ARCH), AIX)
-  NSPR_OBJDIR   := $(subst 4.1,4.2,$(NSPR_OBJDIR))
-endif
-ifeq ($(OS_CONFIG), IRIX6.2)
-  NSPR_OBJDIR   := $(subst 6.2,6.2_n32_PTH,$(NSPR_OBJDIR))
-endif
-ifeq ($(OS_CONFIG), IRIX6.5)
-  NSPR_OBJDIR   := $(subst 6.5,6.5_n32_PTH,$(NSPR_OBJDIR))
-endif
-ifeq ($(OS_ARCH), WINNT)
-  ifeq ($(OBJDIR), WIN32_D.OBJ)
-    NSPR_OBJDIR  = WINNT4.0_DBG.OBJ
-  endif
-  ifeq ($(OBJDIR), WIN32_O.OBJ)
-    NSPR_OBJDIR  = WINNT4.0_OPT.OBJ
-  endif
-endif
-NSPR_SHARED      = /share/builds/components/nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
-ifeq ($(OS_ARCH), WINNT)
-  NSPR_SHARED    = nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
-endif
-NSPR_VERSIONFILE = $(NSPR_LOCAL)/Version
-NSPR_CURVERSION := $(shell cat $(NSPR_VERSIONFILE) 2>/dev/null)
-
-get_nspr:
-	@echo "Grabbing NSPR component..."
-ifeq ($(NSPR_VERSION), $(NSPR_CURVERSION))
-	@echo "No need, NSPR is up to date in this tree (ver=$(NSPR_VERSION))."
-else
-	mkdir -p $(NSPR_LOCAL)
-	mkdir -p $(NSPR_DIST)
-  ifneq ($(OS_ARCH), WINNT)
-	cp       $(NSPR_SHARED)/*.jar $(NSPR_LOCAL)
-  else
-	sh       $(MOZ_DEPTH)/../reltools/compftp.sh $(NSPR_SHARED) $(NSPR_LOCAL) *.jar
-  endif
-	unzip -o $(NSPR_LOCAL)/mdbinary.jar -d $(NSPR_DIST)
-	mkdir -p $(NSPR_DIST)/include
-	unzip -o $(NSPR_LOCAL)/mdheader.jar -d $(NSPR_DIST)/include
-	rm -rf   $(NSPR_DIST)/META-INF
-	rm -rf   $(NSPR_DIST)/include/META-INF
-	echo $(NSPR_VERSION) > $(NSPR_VERSIONFILE)
-endif
-
-SHIP_DIST  = $(MOZ_DEPTH)/dist/$(OBJDIR)
-SHIP_DIR   = $(SHIP_DIST)/SHIP
-
-SHIP_LIBS      = libjs.$(SO_SUFFIX) libjs.a
-ifeq ($(OS_ARCH), WINNT)
-  SHIP_LIBS    = js32.dll js32.lib
-endif
-SHIP_LIBS     += $(LCJAR)
-SHIP_LIBS     := $(addprefix $(SHIP_DIST)/lib/, $(SHIP_LIBS))
-
-SHIP_INCS      = js*.h prmjtime.h resource.h *.msg *.tbl
-SHIP_INCS     := $(addprefix $(SHIP_DIST)/include/, $(SHIP_INCS))
-
-SHIP_BINS      = js
-ifeq ($(OS_ARCH), WINNT)
-  SHIP_BINS   := $(addsuffix .exe, $(SHIP_BINS))
-endif
-SHIP_BINS     := $(addprefix $(SHIP_DIST)/bin/, $(SHIP_BINS))
-
-ifdef BUILD_OPT
-  JSREFJAR = jsref_opt.jar
-else
-ifdef BUILD_IDG
-  JSREFJAR = jsref_idg.jar
-else
-  JSREFJAR = jsref_dbg.jar
-endif
-endif
-
-ship:
-	mkdir -p $(SHIP_DIR)/$(LIBDIR)
-	mkdir -p $(SHIP_DIR)/include
-	mkdir -p $(SHIP_DIR)/bin
-	cp $(SHIP_LIBS) $(SHIP_DIR)/$(LIBDIR)
-	cp $(SHIP_INCS) $(SHIP_DIR)/include
-	cp $(SHIP_BINS) $(SHIP_DIR)/bin
-	cd $(SHIP_DIR); \
-	  zip -r $(JSREFJAR) bin lib include
-ifdef BUILD_SHIP
-	cp $(SHIP_DIR)/$(JSREFJAR) $(BUILD_SHIP)
-endif
-
-CWD = $(shell pwd)
-shipSource: $(SHIP_DIR)/jsref_src.lst .FORCE
-	mkdir -p $(SHIP_DIR)
-	cd $(MOZ_DEPTH)/.. ; \
-	  zip $(CWD)/$(SHIP_DIR)/jsref_src.jar -@ < $(CWD)/$(SHIP_DIR)/jsref_src.lst
-ifdef BUILD_SHIP
-	cp $(SHIP_DIR)/jsref_src.jar $(BUILD_SHIP)
-endif
-
-JSREFSRCDIRS := $(shell cat $(DEPTH)/SpiderMonkey.rsp)
-$(SHIP_DIR)/jsref_src.lst: .FORCE
-	mkdir -p $(SHIP_DIR)
-	rm -f $@
-	touch $@
-	for d in $(JSREFSRCDIRS); do                                \
-	  cd $(MOZ_DEPTH)/..;                                       \
-	  ls -1 -d $$d | grep -v CVS | grep -v \.OBJ >> $(CWD)/$@;  \
-	  cd $(CWD);                                                \
-	done
-
-.FORCE:
--- a/js/src/jsdate.cpp
+++ b/js/src/jsdate.cpp
@@ -2139,17 +2139,17 @@ date_toJSON(JSContext *cx, uintN argc, V
     }
 
     /* Step 6. */
     LeaveTrace(cx);
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, 0, &args))
         return false;
 
-    args.callee() = toISO;
+    args.calleev() = toISO;
     args.thisv().setObject(*obj);
 
     if (!Invoke(cx, args, 0))
         return false;
     *vp = args.rval();
     return true;
 }
 
--- a/js/src/jsdbgapi.cpp
+++ b/js/src/jsdbgapi.cpp
@@ -1394,59 +1394,21 @@ JS_GetFramePC(JSContext *cx, JSStackFram
 }
 
 JS_PUBLIC_API(JSStackFrame *)
 JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp)
 {
     return js_GetScriptedCaller(cx, fp);
 }
 
-JSPrincipals *
-js_StackFramePrincipals(JSContext *cx, JSStackFrame *fp)
-{
-    JSSecurityCallbacks *callbacks;
-
-    if (fp->isFunctionFrame()) {
-        callbacks = JS_GetSecurityCallbacks(cx);
-        if (callbacks && callbacks->findObjectPrincipals) {
-            if (&fp->fun()->compiledFunObj() != &fp->callee())
-                return callbacks->findObjectPrincipals(cx, &fp->callee());
-            /* FALL THROUGH */
-        }
-    }
-    if (fp->isScriptFrame())
-        return fp->script()->principals;
-    return NULL;
-}
-
-JSPrincipals *
-js_EvalFramePrincipals(JSContext *cx, JSObject *callee, JSStackFrame *caller)
-{
-    JSPrincipals *principals, *callerPrincipals;
-    JSSecurityCallbacks *callbacks;
-
-    callbacks = JS_GetSecurityCallbacks(cx);
-    if (callbacks && callbacks->findObjectPrincipals)
-        principals = callbacks->findObjectPrincipals(cx, callee);
-    else
-        principals = NULL;
-    if (!caller)
-        return principals;
-    callerPrincipals = js_StackFramePrincipals(cx, caller);
-    return (callerPrincipals && principals &&
-            callerPrincipals->subsume(callerPrincipals, principals))
-           ? principals
-           : callerPrincipals;
-}
-
 JS_PUBLIC_API(void *)
 JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp)
 {
     if (fp->annotation() && fp->isScriptFrame()) {
-        JSPrincipals *principals = js_StackFramePrincipals(cx, fp);
+        JSPrincipals *principals = fp->principals(cx);
 
         if (principals && principals->globalPrivilegesEnabled(cx, principals)) {
             /*
              * Give out an annotation only if privileges have not been revoked
              * or disabled globally.
              */
             return fp->annotation();
         }
@@ -1461,17 +1423,17 @@ JS_SetFrameAnnotation(JSContext *cx, JSS
     fp->setAnnotation(annotation);
 }
 
 JS_PUBLIC_API(void *)
 JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp)
 {
     JSPrincipals *principals;
 
-    principals = js_StackFramePrincipals(cx, fp);
+    principals = fp->principals(cx);
     if (!principals)
         return NULL;
     return principals->getPrincipalArray(cx, principals);
 }
 
 JS_PUBLIC_API(JSBool)
 JS_IsScriptFrame(JSContext *cx, JSStackFrame *fp)
 {
@@ -1525,17 +1487,17 @@ JS_GetFrameThis(JSContext *cx, JSStackFr
 {
     if (fp->isDummyFrame())
         return false;
 
     js::AutoCompartment ac(cx, &fp->scopeChain());
     if (!ac.enter())
         return false;
 
-    if (!fp->computeThis(cx))
+    if (!ComputeThis(cx, fp))
         return false;
     *thisv = Jsvalify(fp->thisValue());
     return true;
 }
 
 JS_PUBLIC_API(JSFunction *)
 JS_GetFrameFunction(JSContext *cx, JSStackFrame *fp)
 {
@@ -1663,26 +1625,26 @@ JS_EvaluateUCInStackFrame(JSContext *cx,
         return false;
 
     /*
      * NB: This function breaks the assumption that the compiler can see all
      * calls and properly compute a static level. In order to get around this,
      * we use a static level that will cause us not to attempt to optimize
      * variable references made by this frame.
      */
-    JSScript *script = Compiler::compileScript(cx, scobj, fp, js_StackFramePrincipals(cx, fp),
+    JSScript *script = Compiler::compileScript(cx, scobj, fp, fp->principals(cx),
                                                TCF_COMPILE_N_GO, chars, length,
                                                filename, lineno, cx->findVersion(),
                                                NULL, UpvarCookie::UPVAR_LEVEL_LIMIT);
 
     if (!script)
         return false;
 
     script->isUncachedEval = true;
-    bool ok = Execute(cx, scobj, script, fp, JSFRAME_DEBUGGER | JSFRAME_EVAL, Valueify(rval));
+    bool ok = Execute(cx, *scobj, script, fp, JSFRAME_DEBUGGER | JSFRAME_EVAL, Valueify(rval));
 
     js_DestroyScript(cx, script);
     return ok;
 }
 
 JS_PUBLIC_API(JSBool)
 JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp,
                         const char *bytes, uintN length,
--- a/js/src/jsdbgapi.h
+++ b/js/src/jsdbgapi.h
@@ -266,26 +266,16 @@ extern JS_PUBLIC_API(jsbytecode *)
 JS_GetFramePC(JSContext *cx, JSStackFrame *fp);
 
 /*
  * Get the closest scripted frame below fp.  If fp is null, start from cx->fp.
  */
 extern JS_PUBLIC_API(JSStackFrame *)
 JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp);
 
-/*
- * Return a weak reference to fp's principals.  A null return does not denote
- * an error, it means there are no principals.
- */
-extern JSPrincipals *
-js_StackFramePrincipals(JSContext *cx, JSStackFrame *fp);
-
-JSPrincipals *
-js_EvalFramePrincipals(JSContext *cx, JSObject *callee, JSStackFrame *caller);
-
 extern JS_PUBLIC_API(void *)
 JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp);
 
 extern JS_PUBLIC_API(void)
 JS_SetFrameAnnotation(JSContext *cx, JSStackFrame *fp, void *annotation);
 
 extern JS_PUBLIC_API(void *)
 JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp);
--- a/js/src/jsexn.cpp
+++ b/js/src/jsexn.cpp
@@ -292,17 +292,17 @@ InitExnPrivate(JSContext *cx, JSObject *
                   : NULL;
     older = JS_SetErrorReporter(cx, NULL);
     state = JS_SaveExceptionState(cx);
 
     callerid = ATOM_TO_JSID(cx->runtime->atomState.callerAtom);
     stackDepth = 0;
     valueCount = 0;
     for (fp = js_GetTopStackFrame(cx, FRAME_EXPAND_NONE); fp; fp = fp->prev()) {
-        if (fp->scopeChain().compartment() != cx->compartment)
+        if (fp->compartment() != cx->compartment)
             break;
         if (fp->isNonEvalFunctionFrame()) {
             Value v = NullValue();
             if (checkAccess &&
                 !checkAccess(cx, &fp->callee(), callerid, JSACC_READ, &v)) {
                 break;
             }
             valueCount += fp->numActualArgs();
@@ -335,17 +335,17 @@ InitExnPrivate(JSContext *cx, JSObject *
     priv->message = message;
     priv->filename = filename;
     priv->lineno = lineno;
     priv->stackDepth = stackDepth;
 
     values = GetStackTraceValueBuffer(priv);
     elem = priv->stackElems;
     for (fp = js_GetTopStackFrame(cx, FRAME_EXPAND_NONE); fp != fpstop; fp = fp->prev()) {
-        if (fp->scopeChain().compartment() != cx->compartment)
+        if (fp->compartment() != cx->compartment)
             break;
         if (!fp->isFunctionFrame() || fp->isEvalFrame()) {
             elem->funName = NULL;
             elem->argc = 0;
         } else {
             elem->funName = fp->fun()->atom
                             ? fp->fun()->atom
                             : cx->runtime->emptyString;
--- a/js/src/jsfun.cpp
+++ b/js/src/jsfun.cpp
@@ -1057,17 +1057,17 @@ CreateFunCallObject(JSContext *cx, JSSta
     return callobj;
 }
 
 JSObject *
 CreateEvalCallObject(JSContext *cx, JSStackFrame *fp)
 {
     JSObject *callobj = NewCallObject(cx, fp->script(), fp->scopeChain(), NULL);
     if (!callobj)
-        return false;
+        return NULL;
 
     callobj->setPrivate(fp);
     fp->setScopeChainWithOwnCallObj(*callobj);
     return callobj;
 }
 
 } // namespace js
 
@@ -1467,17 +1467,17 @@ JSStackFrame::getValidCalleeObject(JSCon
      * Check for an escape attempt by a joined function object, which must go
      * through the frame's |this| object's method read barrier for the method
      * atom by which it was uniquely associated with a property.
      */
     const Value &thisv = functionThis();
     if (thisv.isObject()) {
         JS_ASSERT(funobj.getFunctionPrivate() == fun);
 
-        if (&fun->compiledFunObj() == &funobj && fun->methodAtom()) {
+        if (fun->compiledFunObj() == funobj && fun->methodAtom()) {
             JSObject *thisp = &thisv.toObject();
             JSObject *first_barriered_thisp = NULL;
 
             do {
                 /*
                  * While a non-native object is responsible for handling its
                  * entire prototype chain, notable non-natives including dense
                  * and typed arrays have native prototypes, so keep going.
@@ -1493,33 +1493,33 @@ JSStackFrame::getValidCalleeObject(JSCon
                          * yet, so we cross it here; the method barrier *was*
                          * crossed but after the call, in which case we fetch
                          * and validate the cloned (unjoined) funobj from the
                          * method property's slot.
                          *
                          * In either case we must allow for the method property
                          * to have been replaced, or its value overwritten.
                          */
-                        if (shape->isMethod() && &shape->methodObject() == &funobj) {
+                        if (shape->isMethod() && shape->methodObject() == funobj) {
                             if (!thisp->methodReadBarrier(cx, *shape, vp))
                                 return false;
-                            calleeValue().setObject(vp->toObject());
+                            calleev().setObject(vp->toObject());
                             return true;
                         }
 
                         if (shape->hasSlot()) {
                             Value v = thisp->getSlot(shape->slot);
                             JSObject *clone;
 
                             if (IsFunctionObject(v, &clone) &&
                                 GET_FUNCTION_PRIVATE(cx, clone) == fun &&
                                 clone->hasMethodObj(*thisp)) {
                                 JS_ASSERT_IF(!clone->getType()->singleton, clone != &funobj);
                                 *vp = v;
-                                calleeValue().setObject(*clone);
+                                calleev().setObject(*clone);
                                 return true;
                             }
                         }
                     }
 
                     if (!first_barriered_thisp)
                         first_barriered_thisp = thisp;
                 }
@@ -1538,17 +1538,17 @@ JSStackFrame::getValidCalleeObject(JSCon
              * stack frame on the stack when the method was deleted. We've lost
              * track of the method, so we associate it with the first barriered
              * object found starting from thisp on the prototype chain.
              */
             JSObject *newfunobj = CloneFunctionObject(cx, fun, fun->getParent());
             if (!newfunobj)
                 return false;
             newfunobj->setMethodObj(*first_barriered_thisp);
-            calleeValue().setObject(*newfunobj);
+            calleev().setObject(*newfunobj);
             vp->setObject(*newfunobj);
             return true;
         }
     }
 
     return true;
 }
 
@@ -2196,17 +2196,17 @@ js_fun_call(JSContext *cx, uintN argc, V
     }
 
     /* Allocate stack space for fval, obj, and the args. */
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return JS_FALSE;
 
     /* Push fval, thisv, and the args. */
-    args.callee() = fval;
+    args.calleev() = fval;
     args.thisv() = thisv;
     memcpy(args.argv(), argv, argc * sizeof *argv);
 
     bool ok = Invoke(cx, args, 0);
     *vp = args.rval();
     return ok;
 }
 
@@ -2247,17 +2247,17 @@ js_fun_apply(JSContext *cx, uintN argc, 
     /* Step 6. */
     uintN n = uintN(JS_MIN(length, JS_ARGS_LENGTH_MAX));
 
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, n, &args))
         return false;
 
     /* Push fval, obj, and aobj's elements as args. */
-    args.callee() = fval;
+    args.calleev() = fval;
     args.thisv() = vp[2];
 
     /* Steps 7-8. */
     if (!GetElements(cx, aobj, n, args.argv()))
         return false;
 
     /* Step 9. */
     if (!Invoke(cx, args, 0))
@@ -2371,17 +2371,17 @@ CallOrConstructBoundFunction(JSContext *
         return false;
 
     /* 15.3.4.5.1, 15.3.4.5.2 step 4. */
     for (uintN i = 0; i < argslen; i++)
         args[i] = obj->getBoundFunctionArgument(i);
     memcpy(args.argv() + argslen, vp + 2, argc * sizeof(Value));
 
     /* 15.3.4.5.1, 15.3.4.5.2 step 5. */
-    args.callee().setObject(*target);
+    args.calleev().setObject(*target);
 
     if (!constructing)
         args.thisv() = boundThis;
 
     if (constructing ? !InvokeConstructor(cx, args) : !Invoke(cx, args, 0))
         return false;
 
     *vp = args.rval();
@@ -2508,22 +2508,23 @@ OnBadFormal(JSContext *cx, TokenKind tt)
     else
         JS_ASSERT(cx->isExceptionPending());
     return false;
 }
 
 static JSBool
 Function(JSContext *cx, uintN argc, Value *vp)
 {
+    CallArgs call = CallArgsFromVp(argc, vp);
+
     JS::Anchor<JSObject *> obj(NewFunction(cx, NULL));
     if (!obj.get())
         return false;
 
-    JSObject &callee = JS_CALLEE(cx, vp).toObject();
-    JSObject &calleeParent = *callee.getParent();
+    JSObject &calleeParent = *call.callee().getParent();
 
     /*
      * NB: (new Function) is not lexically closed by its caller, it's just an
      * anonymous function in the top-level scope that its constructor inhabits.
      * Thus 'var x = 42; f = new Function("return x"); print(f())' prints 42,
      * and so would a call to f from another top-level's script or function.
      *
      * In older versions, before call objects, a new Function was adopted by
@@ -2531,55 +2532,32 @@ Function(JSContext *cx, uintN argc, Valu
      * top-level reachable from scopeChain (in HTML frames, e.g.).
      */
     JSFunction *fun = js_NewFunction(cx, obj.get(), NULL, 0, JSFUN_LAMBDA | JSFUN_INTERPRETED,
                                      &calleeParent, cx->runtime->atomState.anonymousAtom, NULL, NULL);
     if (!fun)
         return false;
 
     /*
-     * Function is static and not called directly by other functions in this
-     * file, therefore it is callable only as a native function by js_Invoke.
-     * Find the scripted caller, possibly skipping other native frames such as
-     * are built for Function.prototype.call or .apply activations that invoke
-     * Function indirectly from a script.
-     */
-    JSStackFrame *caller = js_GetScriptedCaller(cx, NULL);
-    uintN lineno;
-    const char *filename;
-    JSPrincipals *principals;
-    if (caller) {
-        principals = js_EvalFramePrincipals(cx, &callee, caller);
-        filename = js_ComputeFilename(cx, caller, principals, &lineno);
-    } else {
-        filename = NULL;
-        lineno = 0;
-        principals = NULL;
-    }
-
-    /* Belt-and-braces: check that the caller has access to parent. */
-    if (!js_CheckPrincipalsAccess(cx, &calleeParent, principals,
-                                  CLASS_ATOM(cx, Function))) {
-        return false;
-    }
-
-    /*
      * CSP check: whether new Function() is allowed at all.
      * Report errors via CSP is done in the script security manager.
      * js_CheckContentSecurityPolicy is defined in jsobj.cpp
      */
     if (!js_CheckContentSecurityPolicy(cx, &calleeParent)) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CSP_BLOCKED_FUNCTION);
         return false;
     }
 
     Bindings bindings(cx);
     AutoBindingsRooter root(cx, bindings);
 
-    Value *argv = JS_ARGV(cx, vp);
+    uintN lineno;
+    const char *filename = CurrentScriptFileAndLine(cx, &lineno);
+
+    Value *argv = call.argv();
     uintN n = argc ? argc - 1 : 0;
     if (n > 0) {
         /*
          * Collect the function-argument arguments into one string, separated
          * by commas, then make a tokenstream from that string, and scan it to
          * get the arguments.  We need to throw the full scanner at the
          * problem, because the argument string can legitimately contain
          * comments and linefeeds.  XXX It might be better to concatenate
@@ -2709,20 +2687,22 @@ Function(JSContext *cx, uintN argc, Valu
         strAnchor.set(str);
         chars = str->getChars(cx);
         length = str->length();
     } else {
         chars = cx->runtime->emptyString->chars();
         length = 0;
     }
 
-    JS_SET_RVAL(cx, vp, ObjectValue(*obj.get()));
-    return Compiler::compileFunctionBody(cx, fun, principals, &bindings,
-                                         chars, length, filename, lineno,
-                                         cx->findVersion());
+    JSPrincipals *principals = PrincipalsForCompiledCode(call, cx);
+    bool ok = Compiler::compileFunctionBody(cx, fun, principals, &bindings,
+                                            chars, length, filename, lineno,
+                                            cx->findVersion());
+    call.rval().setObject(obj);
+    return ok;
 }
 
 namespace js {
 
 bool
 IsBuiltinFunctionConstructor(JSFunction *fun)
 {
     return fun->maybeNative() == Function;
@@ -2737,17 +2717,17 @@ LookupInterpretedFunctionPrototype(JSCon
     JS_ASSERT(!fun->isFunctionPrototype());
     JS_ASSERT(!funobj->isBoundFunction());
 #endif
 
     jsid id = ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom);
     const Shape *shape = funobj->nativeLookup(id);
     if (!shape) {
         if (!ResolveInterpretedFunctionPrototype(cx, funobj))
-            return false;
+            return NULL;
         shape = funobj->nativeLookup(id);
     }
     JS_ASSERT(!shape->configurable());
     JS_ASSERT(shape->isDataDescriptor());
     JS_ASSERT(shape->hasSlot());
     JS_ASSERT(!shape->isMethod());
     return shape;
 }
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -83,16 +83,17 @@
 #include "jsxml.h"
 #endif
 
 #include "jsprobes.h"
 #include "jscntxtinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 #include "jshashtable.h"
+#include "jsweakmap.h"
 
 #include "jsstrinlines.h"
 #include "jscompartment.h"
 
 #ifdef MOZ_VALGRIND
 # define JS_VALGRIND
 #endif
 #ifdef JS_VALGRIND
@@ -227,26 +228,118 @@ Arena<T>::mark(T *thing, JSTracer *trc)
 
 #ifdef JS_DUMP_CONSERVATIVE_GC_ROOTS
     if (alignedThing != thing)
         return CGCT_VALIDWITHOFFSET;
 #endif
     return CGCT_VALID;
 }
 
+template<typename T>
+inline bool
+Arena<T>::finalize(JSContext *cx)
+{
+    JS_ASSERT_IF(header()->hasFreeThings, header()->freeList);
+    JS_ASSERT(!getMarkingDelay()->link);
+    JS_ASSERT(getMarkingDelay()->unmarkedChildren == 0);
+    JS_ASSERT(header()->isUsed);
+
+    FreeCell *nextFree = header()->freeList;
+    FreeCell *freeList = NULL;
+    FreeCell **tailp = &freeList;
+    bool allClear = true;
+
+    T *thingsEnd = &t.things[ThingsPerArena-1];
+    T *thing = &t.things[0];
+    thingsEnd++;
+
+    if (!nextFree) {
+        nextFree = thingsEnd->asFreeCell();
+    } else {
+        JS_ASSERT(thing->asFreeCell() <= nextFree);
+        JS_ASSERT(nextFree < thingsEnd->asFreeCell());
+    }
+
+    for (;; thing++) {
+        if (thing->asFreeCell() == nextFree) {
+            if (thing == thingsEnd)
+                break;
+            nextFree = nextFree->link;
+            if (!nextFree) {
+                nextFree = thingsEnd->asFreeCell();
+            } else {
+                JS_ASSERT(thing->asFreeCell() < nextFree);
+                JS_ASSERT(nextFree < thingsEnd->asFreeCell());
+            }
+        } else if (thing->asFreeCell()->isMarked()) {
+            allClear = false;
+            continue;
+        } else {
+            thing->finalize(cx);
+#ifdef DEBUG
+            memset(thing, JS_FREE_PATTERN, sizeof(T));
+#endif
+        }
+        FreeCell *t = thing->asFreeCell();
+        *tailp = t;
+        tailp = &t->link;
+    }
+
+#ifdef DEBUG
+    /* Check that the free list is consistent. */
+    unsigned nfree = 0;
+    if (freeList) {
+        JS_ASSERT(tailp != &freeList);
+        FreeCell *t = freeList;
+        for (;;) {
+            ++nfree;
+            if (&t->link == tailp)
+                break;
+            JS_ASSERT(t < t->link);
+            t = t->link;
+        }
+    }
+#endif
+
+    if (allClear) {
+        JS_ASSERT(nfree == ThingsPerArena);
+        JS_ASSERT((T *)tailp == &t.things[ThingsPerArena-1]);
+        *tailp = NULL;
+        header()->freeList = freeList;
+#ifdef DEBUG
+        header()->hasFreeThings = true;
+#endif
+        JS_ASSERT((T *)header()->freeList == &t.things[0]);
+    } else {
+        JS_ASSERT(nfree < ThingsPerArena);
+        *tailp = NULL;
+        header()->freeList = freeList;
+#ifdef DEBUG
+        header()->hasFreeThings = (nfree == 0) ? false : true;
+#endif
+    }
+    return allClear;
+}
+
 #ifdef DEBUG
 bool
 checkArenaListsForThing(JSCompartment *comp, void *thing)
 {
     if (comp->arenas[FINALIZE_OBJECT0].arenasContainThing<JSObject>(thing) ||
+        comp->arenas[FINALIZE_OBJECT0_BACKGROUND].arenasContainThing<JSObject>(thing) ||
         comp->arenas[FINALIZE_OBJECT2].arenasContainThing<JSObject_Slots2>(thing) ||
+        comp->arenas[FINALIZE_OBJECT2_BACKGROUND].arenasContainThing<JSObject_Slots2>(thing) ||
         comp->arenas[FINALIZE_OBJECT4].arenasContainThing<JSObject_Slots4>(thing) ||
+        comp->arenas[FINALIZE_OBJECT4_BACKGROUND].arenasContainThing<JSObject_Slots4>(thing) ||
         comp->arenas[FINALIZE_OBJECT8].arenasContainThing<JSObject_Slots8>(thing) ||
+        comp->arenas[FINALIZE_OBJECT8_BACKGROUND].arenasContainThing<JSObject_Slots8>(thing) ||
         comp->arenas[FINALIZE_OBJECT12].arenasContainThing<JSObject_Slots12>(thing) ||
+        comp->arenas[FINALIZE_OBJECT12_BACKGROUND].arenasContainThing<JSObject_Slots12>(thing) ||
         comp->arenas[FINALIZE_OBJECT16].arenasContainThing<JSObject_Slots16>(thing) ||
+        comp->arenas[FINALIZE_OBJECT16_BACKGROUND].arenasContainThing<JSObject_Slots16>(thing) ||
         comp->arenas[FINALIZE_FUNCTION].arenasContainThing<JSFunction>(thing) ||
         comp->arenas[FINALIZE_SHAPE].arenasContainThing<Shape>(thing) ||
 #if JS_HAS_XML_SUPPORT
         comp->arenas[FINALIZE_XML].arenasContainThing<JSXML>(thing) ||
 #endif
         comp->arenas[FINALIZE_STRING].arenasContainThing<JSString>(thing) ||
         comp->arenas[FINALIZE_EXTERNAL_STRING].arenasContainThing<JSExternalString>(thing) ||
         comp->arenas[FINALIZE_SHORT_STRING].arenasContainThing<JSShortString>(thing))
@@ -279,39 +372,45 @@ JSCompartment::finishArenaLists()
 }
 
 void
 Chunk::clearMarkBitmap()
 {
     PodZero(&bitmaps[0], ArenasPerChunk);
 }
 
-void
+bool
 Chunk::init(JSRuntime *rt)
 {
     info.runtime = rt;
     info.age = 0;
     info.emptyArenaLists.init();
     info.emptyArenaLists.cellFreeList = &arenas[0];
+#ifdef JS_THREADSAFE
+    info.chunkLock = JS_NEW_LOCK();
+    if (!info.chunkLock)
+        return false;
+#endif
     Arena<FreeCell> *arena = &arenas[0];
     Arena<FreeCell> *last = &arenas[JS_ARRAY_LENGTH(arenas) - 1];
     while (arena < last) {
         arena->header()->next = arena + 1;
         arena->header()->compartment = NULL;
 #ifdef DEBUG
         arena->header()->isUsed = false;
 #endif
         ++arena;
     }
     last->header()->next = NULL;
     last->header()->compartment = NULL;
 #ifdef DEBUG
     last->header()->isUsed = false;
 #endif
     info.numFree = ArenasPerChunk;
+    return true;
 }
 
 bool
 Chunk::unused()
 {
     return info.numFree == ArenasPerChunk;
 }
 
@@ -327,47 +426,61 @@ Chunk::withinArenasRange(Cell *cell)
     uintptr_t addr = uintptr_t(cell);
     if (addr >= uintptr_t(&arenas[0]) && addr < uintptr_t(&arenas[ArenasPerChunk]))
         return true;
     return false;
 }
 
 template <typename T>
 Arena<T> *
-Chunk::allocateArena(JSCompartment *comp, unsigned thingKind)
+Chunk::allocateArena(JSContext *cx, unsigned thingKind)
 {
+#ifdef JS_THREADSAFE
+    Conditionally<AutoLock> lockIf(cx->runtime->gcHelperThread.sweeping, info.chunkLock);
+#endif
+    JSCompartment *comp = cx->compartment;
     JS_ASSERT(hasAvailableArenas());
     Arena<T> *arena = info.emptyArenaLists.getNext<T>(comp, thingKind);
     JS_ASSERT(arena);
     JS_ASSERT(arena->header()->isUsed);
     --info.numFree;
-
     JSRuntime *rt = info.runtime;
-    rt->gcBytes += sizeof(Arena<T>);
-    comp->gcBytes += sizeof(Arena<T>);
+
+    JS_ATOMIC_ADD(&rt->gcBytes, sizeof(Arena<T>));
+    JS_ATOMIC_ADD(&comp->gcBytes, sizeof(Arena<T>));
+    METER(JS_ATOMIC_INCREMENT(&rt->gcStats.nallarenas));
     if (comp->gcBytes >= comp->gcTriggerBytes)
         TriggerCompartmentGC(comp);
-    METER(rt->gcStats.nallarenas++);
+
     return arena;
 }
 
 template <typename T>
 void
 Chunk::releaseArena(Arena<T> *arena)
 {
     JSRuntime *rt = info.runtime;
+#ifdef JS_THREADSAFE
+    Conditionally<AutoLock> lockIf(rt->gcHelperThread.sweeping, info.chunkLock);
+#endif
     JSCompartment *comp = arena->header()->compartment;
     METER(rt->gcStats.afree++);
     JS_ASSERT(rt->gcStats.nallarenas != 0);
-    METER(rt->gcStats.nallarenas--);
-    JS_ASSERT(rt->gcBytes >= sizeof(Arena<T>));
-    JS_ASSERT(comp->gcBytes >= sizeof(Arena<T>));
-
-    rt->gcBytes -= sizeof(Arena<T>);
-    comp->gcBytes -= sizeof(Arena<T>);
+    METER(JS_ATOMIC_DECREMENT(&rt->gcStats.nallarenas));
+
+    JS_ASSERT(size_t(rt->gcBytes) >= sizeof(Arena<T>));
+    JS_ASSERT(size_t(comp->gcBytes) >= sizeof(Arena<T>));
+#ifdef JS_THREADSAFE
+    if (rt->gcHelperThread.sweeping) {
+        rt->reduceGCTriggerBytes(GC_HEAP_GROWTH_FACTOR * sizeof(Arena<T>));
+        comp->reduceGCTriggerBytes(GC_HEAP_GROWTH_FACTOR * sizeof(Arena<T>));
+    }
+#endif
+    JS_ATOMIC_ADD(&rt->gcBytes, -sizeof(Arena<T>));
+    JS_ATOMIC_ADD(&comp->gcBytes, -sizeof(Arena<T>));
     info.emptyArenaLists.insert((Arena<Cell> *)arena);
 #ifdef DEBUG
     arena->header()->isUsed = false;
 #endif
     arena->header()->compartment = NULL;
     ++info.numFree;
     if (unused())
         info.age = 0;
@@ -395,16 +508,19 @@ GetGCChunk(JSRuntime *rt)
 inline void
 ReleaseGCChunk(JSRuntime *rt, jsuword chunk)
 {
     void *p = reinterpret_cast<void *>(chunk);
     JS_ASSERT(p);
 #ifdef MOZ_GCTIMER
     JS_ATOMIC_INCREMENT(&destroyChunkCount);
 #endif
+#ifdef JS_THREADSAFE
+    JS_DESTROY_LOCK(((Chunk *)chunk)->info.chunkLock);
+#endif
     JS_ASSERT(rt->gcStats.nchunks != 0);
     METER(rt->gcStats.nchunks--);
     rt->gcChunkAllocator->free_(p);
 }
 
 inline Chunk *
 AllocateGCChunk(JSRuntime *rt)
 {
@@ -419,16 +535,19 @@ AllocateGCChunk(JSRuntime *rt)
 
 inline void
 ReleaseGCChunk(JSRuntime *rt, Chunk *p)
 {
     JS_ASSERT(p);
 #ifdef MOZ_GCTIMER
     JS_ATOMIC_INCREMENT(&destroyChunkCount);
 #endif
+#ifdef JS_THREADSAFE
+    JS_DESTROY_LOCK(p->info.chunkLock);
+#endif
     JS_ASSERT(rt->gcStats.nchunks != 0);
     METER(rt->gcStats.nchunks--);
     rt->gcChunkAllocator->free_(p);
 }
 
 static Chunk *
 PickChunk(JSRuntime *rt)
 {
@@ -448,17 +567,20 @@ PickChunk(JSRuntime *rt)
      */
     GCChunkSet::AddPtr p = rt->gcChunkSet.lookupForAdd(chunk);
     JS_ASSERT(!p);
     if (!rt->gcChunkSet.add(p, chunk)) {
         ReleaseGCChunk(rt, chunk);
         return NULL;
     }
 
-    chunk->init(rt);
+    if (!chunk->init(rt)) {
+        ReleaseGCChunk(rt, chunk);
+        return NULL;
+    }
 
     return chunk;
 }
 
 static void
 ExpireGCChunks(JSRuntime *rt)
 {
     static const size_t MaxAge = 3;
@@ -491,17 +613,17 @@ AllocateArena(JSContext *cx, unsigned th
     if (!chunk || !chunk->hasAvailableArenas()) {
         chunk = PickChunk(rt);
         if (!chunk) {
             TriggerGC(rt);
             return NULL;
         }
         cx->compartment->chunk = chunk;
     }
-    return chunk->allocateArena<T>(cx->compartment, thingKind);
+    return chunk->allocateArena<T>(cx, thingKind);
 }
 
 JS_FRIEND_API(bool)
 IsAboutToBeFinalized(JSContext *cx, const void *thing)
 {
     if (JSAtom::isStatic(thing))
         return false;
     JS_ASSERT(cx);
@@ -647,31 +769,37 @@ MarkIfGCThingWord(JSTracer *trc, jsuword
     if (!aheader->compartment)
         return CGCT_NOTLIVE;
 
     ConservativeGCTest test;
     thingKind = aheader->thingKind;
 
     switch (thingKind) {
       case FINALIZE_OBJECT0:
+      case FINALIZE_OBJECT0_BACKGROUND:
         test = MarkCell<JSObject>(cell, trc);
         break;
       case FINALIZE_OBJECT2:
+      case FINALIZE_OBJECT2_BACKGROUND:
         test = MarkCell<JSObject_Slots2>(cell, trc);
         break;
       case FINALIZE_OBJECT4:
+      case FINALIZE_OBJECT4_BACKGROUND:
         test = MarkCell<JSObject_Slots4>(cell, trc);
         break;
       case FINALIZE_OBJECT8:
+      case FINALIZE_OBJECT8_BACKGROUND:
         test = MarkCell<JSObject_Slots8>(cell, trc);
         break;
       case FINALIZE_OBJECT12:
+      case FINALIZE_OBJECT12_BACKGROUND:
         test = MarkCell<JSObject_Slots12>(cell, trc);
         break;
       case FINALIZE_OBJECT16:
+      case FINALIZE_OBJECT16_BACKGROUND:
         test = MarkCell<JSObject_Slots16>(cell, trc);
         break;
       case FINALIZE_STRING:
         test = MarkCell<JSString>(cell, trc);
         break;
       case FINALIZE_EXTERNAL_STRING:
         test = MarkCell<JSExternalString>(cell, trc);
         break;
@@ -869,16 +997,18 @@ js_FinishGC(JSRuntime *rt)
     for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c) {
         JSCompartment *comp = *c;
         comp->finishArenaLists();
         Foreground::delete_(comp);
     }
     rt->compartments.clear();
     rt->atomsCompartment = NULL;
 
+    rt->gcWeakMapList = NULL;
+
     for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
         ReleaseGCChunk(rt, r.front());
     rt->gcChunkSet.clear();
 
 #ifdef JS_THREADSAFE
     rt->gcHelperThread.finish(rt);
 #endif
 
@@ -1048,29 +1178,47 @@ JSRuntime::setGCLastBytes(size_t lastByt
     float trigger1 = float(lastBytes) * float(gcTriggerFactor) / 100.0f;
     float trigger2 = float(Max(lastBytes, GC_ARENA_ALLOCATION_TRIGGER)) *
                      GC_HEAP_GROWTH_FACTOR;
     float maxtrigger = Max(trigger1, trigger2);
     gcTriggerBytes = (float(gcMaxBytes) < maxtrigger) ? gcMaxBytes : size_t(maxtrigger);
 }
 
 void
+JSRuntime::reduceGCTriggerBytes(uint32 amount) {
+    JS_ASSERT(amount > 0);
+    JS_ASSERT((gcTriggerBytes - amount) > 0);
+    if (gcTriggerBytes - amount < GC_ARENA_ALLOCATION_TRIGGER * GC_HEAP_GROWTH_FACTOR)
+        return;
+    gcTriggerBytes -= amount;
+}
+
+void
 JSCompartment::setGCLastBytes(size_t lastBytes)
 {
     gcLastBytes = lastBytes;
 
     /* FIXME bug 603916 - we should unify the triggers here. */
     float trigger1 = float(lastBytes) * float(rt->gcTriggerFactor) / 100.0f;
     float trigger2 = float(Max(lastBytes, GC_ARENA_ALLOCATION_TRIGGER)) *
                      GC_HEAP_GROWTH_FACTOR;
     float maxtrigger = Max(trigger1, trigger2);
     gcTriggerBytes = (float(rt->gcMaxBytes) < maxtrigger) ? rt->gcMaxBytes : size_t(maxtrigger);
 }
 
 void
+JSCompartment::reduceGCTriggerBytes(uint32 amount) {
+    JS_ASSERT(amount > 0);
+    JS_ASSERT((gcTriggerBytes - amount) > 0);
+    if (gcTriggerBytes - amount < GC_ARENA_ALLOCATION_TRIGGER * GC_HEAP_GROWTH_FACTOR)
+        return;
+    gcTriggerBytes -= amount;
+}
+
+void
 FreeLists::purge()
 {
     /*
      * Return the free list back to the arena so the GC finalization will not
      * run the finalizers over unitialized bytes from free things.
      */
     for (FreeCell ***p = finalizables; p != JS_ARRAY_END(finalizables); ++p)
         *p = NULL;
@@ -1118,89 +1266,118 @@ RunLastDitchGC(JSContext *cx)
     Conditionally<AutoUnlockAtomsCompartment>
         unlockAtomsCompartmenIf(cx->compartment == rt->atomsCompartment &&
                                   rt->atomsCompartmentIsLocked, cx);
 #endif
     /* The last ditch GC preserves all atoms. */
     AutoKeepAtoms keep(rt);
     js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL);
 
+#ifdef JS_THREADSAFE
+    if (rt->gcBytes >= rt->gcMaxBytes)
+        cx->runtime->gcHelperThread.waitBackgroundSweepEnd(cx->runtime);
+#endif
+
     return rt->gcBytes < rt->gcMaxBytes;
 }
 
 template <typename T>
 inline bool
 RefillTypedFreeList(JSContext *cx, unsigned thingKind)
 {
     JSCompartment *compartment = cx->compartment;
     JS_ASSERT_IF(compartment->freeLists.finalizables[thingKind],
                  !*compartment->freeLists.finalizables[thingKind]);
 
     JS_ASSERT(!cx->runtime->gcRunning);
     if (cx->runtime->gcRunning)
         return false;
 
     bool canGC = !JS_ON_TRACE(cx) && !JS_THREAD_DATA(cx)->waiveGCQuota;
+#ifdef JS_THREADSAFE
+    bool waited = false;
+#endif
+
     do {
         if (canGC && JS_UNLIKELY(NeedLastDitchGC(cx))) {
             if (!RunLastDitchGC(cx))
                 break;
 
             /*
              * The JSGC_END callback can legitimately allocate new GC
              * things and populate the free list. If that happens, just
              * return that list head.
              */
             if (compartment->freeLists.finalizables[thingKind])
                 return true;
             canGC = false;
         }
 
         ArenaList *arenaList = GetFinalizableArenaList(compartment, thingKind);
-        Arena<T> *a = reinterpret_cast<Arena<T> *>(arenaList->getNextWithFreeList());
-        if (a) {
-            JS_ASSERT(a->header()->freeList);
-            JS_ASSERT(sizeof(T) == a->header()->thingSize);
-            compartment->freeLists.populate(a, thingKind);
-            return true;
+#ifdef JS_THREADSAFE
+try_again:
+#endif
+        Arena<T> *a = NULL;
+        if (!arenaList->hasToBeFinalized) {
+            a = reinterpret_cast<Arena<T> *>(arenaList->getNextWithFreeList(cx));
+            if (a) {
+                JS_ASSERT(a->header()->freeList);
+                JS_ASSERT(sizeof(T) == a->header()->thingSize);
+                compartment->freeLists.populate(a, thingKind);
+                return true;
+            }
         }
 
         /*
          * If the allocation fails rt->gcIsNeeded will be set and we will run
          * the GC on the next loop iteration if the last ditch GC is allowed.
          */
         a = AllocateArena<T>(cx, thingKind);
         if (a) {
             compartment->freeLists.populate(a, thingKind);
             arenaList->insert((Arena<FreeCell> *) a);
             a->getMarkingDelay()->init();
             return true;
         }
+#ifdef JS_THREADSAFE
+        if (!waited) {
+            /* The background thread can still free arenas during the finalization phase. */
+            cx->runtime->gcHelperThread.waitBackgroundSweepEnd(cx->runtime);
+            waited = true;
+            goto try_again;
+        }
+#endif
     } while (canGC);
 
     METER(cx->runtime->gcStats.fail++);
     js_ReportOutOfMemory(cx);
     return false;
 }
 
 bool
 RefillFinalizableFreeList(JSContext *cx, unsigned thingKind)
 {
     switch (thingKind) {
       case FINALIZE_OBJECT0:
+      case FINALIZE_OBJECT0_BACKGROUND:
         return RefillTypedFreeList<JSObject>(cx, thingKind);
       case FINALIZE_OBJECT2:
+      case FINALIZE_OBJECT2_BACKGROUND:
         return RefillTypedFreeList<JSObject_Slots2>(cx, thingKind);
       case FINALIZE_OBJECT4:
+      case FINALIZE_OBJECT4_BACKGROUND:
         return RefillTypedFreeList<JSObject_Slots4>(cx, thingKind);
       case FINALIZE_OBJECT8:
+      case FINALIZE_OBJECT8_BACKGROUND:
         return RefillTypedFreeList<JSObject_Slots8>(cx, thingKind);
       case FINALIZE_OBJECT12:
+      case FINALIZE_OBJECT12_BACKGROUND:
         return RefillTypedFreeList<JSObject_Slots12>(cx, thingKind);
       case FINALIZE_OBJECT16:
+      case FINALIZE_OBJECT16_BACKGROUND:
         return RefillTypedFreeList<JSObject_Slots16>(cx, thingKind);
       case FINALIZE_STRING:
         return RefillTypedFreeList<JSString>(cx, thingKind);
       case FINALIZE_EXTERNAL_STRING:
         return RefillTypedFreeList<JSExternalString>(cx, thingKind);
       case FINALIZE_SHORT_STRING:
         return RefillTypedFreeList<JSShortString>(cx, thingKind);
       case FINALIZE_FUNCTION:
@@ -1381,41 +1558,47 @@ GCMarker::markDelayedChildren()
             : NULL;
         markingDelay->link = NULL;
 #ifdef DEBUG
         markLaterCount -= Arena<FreeCell>::ThingsPerArena;
 #endif
 
         switch (a->header()->thingKind) {
           case FINALIZE_OBJECT0:
+          case FINALIZE_OBJECT0_BACKGROUND:
             reinterpret_cast<Arena<JSObject> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_OBJECT2:
+          case FINALIZE_OBJECT2_BACKGROUND:
             reinterpret_cast<Arena<JSObject_Slots2> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_OBJECT4:
+          case FINALIZE_OBJECT4_BACKGROUND:
             reinterpret_cast<Arena<JSObject_Slots4> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_OBJECT8:
+          case FINALIZE_OBJECT8_BACKGROUND:
             reinterpret_cast<Arena<JSObject_Slots8> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_OBJECT12:
+          case FINALIZE_OBJECT12_BACKGROUND:
             reinterpret_cast<Arena<JSObject_Slots12> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_OBJECT16:
+          case FINALIZE_OBJECT16_BACKGROUND:
             reinterpret_cast<Arena<JSObject_Slots16> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_STRING:
             reinterpret_cast<Arena<JSString> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_EXTERNAL_STRING:
             reinterpret_cast<Arena<JSExternalString> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_SHORT_STRING:
-            JS_ASSERT(false);
+            JS_NOT_REACHED("no delayed marking");
             break;
           case FINALIZE_FUNCTION:
             reinterpret_cast<Arena<JSFunction> *>(a)->markDelayedChildren(this);
             break;
           case FINALIZE_SHAPE:
             reinterpret_cast<Arena<Shape> *>(a)->markDelayedChildren(this);
             break;
 #if JS_HAS_XML_SUPPORT
@@ -1499,17 +1682,17 @@ AutoIdArray::trace(JSTracer *trc)
 {
     JS_ASSERT(tag == IDARRAY);
     gc::MarkIdRange(trc, idArray->length, idArray->vector, "JSAutoIdArray.idArray");
 }
 
 void
 AutoEnumStateRooter::trace(JSTracer *trc)
 {
-    js::gc::MarkObject(trc, *obj, "js::AutoEnumStateRooter.obj");
+    gc::MarkObject(trc, *obj, "js::AutoEnumStateRooter.obj");
 }
 
 inline void
 AutoGCRooter::trace(JSTracer *trc)
 {
     switch (tag) {
       case JSVAL:
         MarkValue(trc, static_cast<AutoValueRooter *>(this)->val, "js::AutoValueRooter.val");
@@ -1580,34 +1763,34 @@ AutoGCRooter::trace(JSTracer *trc)
             MarkObject(trc, *obj, "js::AutoObjectRooter.obj");
         return;
 
       case ID:
         MarkId(trc, static_cast<AutoIdRooter *>(this)->id_, "js::AutoIdRooter.val");
         return;
 
       case VALVECTOR: {
-        Vector<Value, 8> &vector = static_cast<js::AutoValueVector *>(this)->vector;
+        AutoValueVector::VectorImpl &vector = static_cast<AutoValueVector *>(this)->vector;
         MarkValueRange(trc, vector.length(), vector.begin(), "js::AutoValueVector.vector");
         return;
       }
 
       case STRING:
-        if (JSString *str = static_cast<js::AutoStringRooter *>(this)->str)
+        if (JSString *str = static_cast<AutoStringRooter *>(this)->str)
             MarkString(trc, str, "js::AutoStringRooter.str");
         return;
 
       case IDVECTOR: {
-        Vector<jsid, 8> &vector = static_cast<js::AutoIdVector *>(this)->vector;
+        AutoIdVector::VectorImpl &vector = static_cast<AutoIdVector *>(this)->vector;
         MarkIdRange(trc, vector.length(), vector.begin(), "js::AutoIdVector.vector");
         return;
       }
 
       case SHAPEVECTOR: {
-        Vector<const Shape *, 8> &vector = static_cast<js::AutoShapeVector *>(this)->vector;
+        AutoShapeVector::VectorImpl &vector = static_cast<js::AutoShapeVector *>(this)->vector;
         MarkShapeRange(trc, vector.length(), vector.begin(), "js::AutoShapeVector.vector");
         return;
       }
 
       case BINDINGS: {
         static_cast<js::AutoBindingsRooter *>(this)->bindings.trace(trc);
         return;
       }
@@ -1793,154 +1976,214 @@ js_DestroyScriptsToGC(JSContext *cx, JSC
 
             js_DestroyCachedScript(cx, script);
         }
     }
 }
 
 template<typename T>
 static void
-FinalizeArenaList(JSCompartment *comp, JSContext *cx, unsigned thingKind)
+FinalizeArenaList(JSCompartment *comp, JSContext *cx, JSGCInvocationKind gckind, unsigned thingKind)
 {
     JS_STATIC_ASSERT(!(sizeof(T) & Cell::CellMask));
     ArenaList *arenaList = GetFinalizableArenaList(comp, thingKind);
     Arena<FreeCell> **ap = &arenaList->head;
     Arena<T> *a = (Arena<T> *) *ap;
     if (!a)
         return;
     JS_ASSERT(sizeof(T) == arenaList->head->header()->thingSize);
 
 #ifdef JS_GCMETER
     uint32 nlivearenas = 0, nkilledarenas = 0, nthings = 0;
 #endif
     for (;;) {
-        ArenaHeader *header = a->header();
-        JS_ASSERT_IF(header->hasFreeThings, header->freeList);
-        JS_ASSERT(header->thingKind == thingKind);
-        JS_ASSERT(!a->getMarkingDelay()->link);
-        JS_ASSERT(a->getMarkingDelay()->unmarkedChildren == 0);
-        JS_ASSERT(a->header()->isUsed);
-
-        FreeCell *nextFree = header->freeList;
-        FreeCell *freeList = NULL;
-        FreeCell **tailp = &freeList;
-        bool allClear = true;
-
-        T *thingsEnd = &a->t.things[a->ThingsPerArena-1];
-        T *thing = &a->t.things[0];
-        thingsEnd++;
-
-        if (!nextFree) {
-            nextFree = thingsEnd->asFreeCell();
-        } else {
-            JS_ASSERT(thing->asFreeCell() <= nextFree);
-            JS_ASSERT(nextFree < thingsEnd->asFreeCell());
-        }
-
-        for (;; thing++) {
-            if (thing->asFreeCell() == nextFree) {
-                if (thing == thingsEnd)
-                    break;
-                nextFree = nextFree->link;
-                if (!nextFree) {
-                    nextFree = thingsEnd->asFreeCell();
-                } else {
-                    JS_ASSERT(thing->asFreeCell() < nextFree);
-                    JS_ASSERT(nextFree < thingsEnd->asFreeCell());
-                }
-            } else if (thing->isMarked()) {
-                allClear = false;
-                METER(nthings++);
-                continue;
-            } else {
-                thing->finalize(cx);
-#ifdef DEBUG
-                memset(thing, JS_FREE_PATTERN, sizeof(T));
-#endif
-            }
-            FreeCell *t = thing->asFreeCell();
-            *tailp = t;
-            tailp = &t->link;
-        }
-
-#ifdef DEBUG
-        /* Check that the free list is consistent. */
-        unsigned nfree = 0;
-        if (freeList) {
-            JS_ASSERT(tailp != &freeList);
-            FreeCell *t = freeList;
-            for (;;) {
-                ++nfree;
-                if (&t->link == tailp)
-                    break;
-                JS_ASSERT(t < t->link);
-                t = t->link;
-            }
-        }
-#endif
+        JS_ASSERT(a->header()->thingKind == thingKind);
+        bool allClear = a->finalize(cx);
         if (allClear) {
-            /*
-             * Forget just assembled free list head for the arena and
-             * add the arena itself to the destroy list.
-             */
-            JS_ASSERT(nfree == a->ThingsPerArena);
-            JS_ASSERT((T *)tailp == &a->t.things[a->ThingsPerArena-1]);
-            *tailp = NULL;
-            header->freeList = freeList;
-#ifdef DEBUG
-            header->hasFreeThings = true;
-#endif
-            *ap = (header->next);
-            JS_ASSERT((T *)header->freeList == &a->t.things[0]);
+            *ap = a->header()->next;
             a->chunk()->releaseArena(a);
             METER(nkilledarenas++);
         } else {
-            JS_ASSERT(nfree < a->ThingsPerArena);
-            *tailp = NULL;
-            header->freeList = freeList;
-#ifdef DEBUG
-            header->hasFreeThings = (nfree == 0) ? false : true;
-#endif
-            ap = &header->next;
+            ap = &a->header()->next;
             METER(nlivearenas++);
         }
         if (!(a = (Arena<T> *) *ap))
             break;
     }
     arenaList->cursor = arenaList->head;
     METER(UpdateCompartmentStats(comp, thingKind, nlivearenas, nkilledarenas, nthings));
 }
 
+template<typename T>
+static void
+FinalizeArenaListLater(JSContext *cx, ArenaList *arenaList, Arena<FreeCell> *head)
+{
+    JS_STATIC_ASSERT(!(sizeof(T) & Cell::CellMask));
+    JS_ASSERT(arenaList->hasToBeFinalized);
+    Arena<FreeCell> **ap = &head;
+    Arena<T> *a = (Arena<T> *) *ap;
+    JS_ASSERT(a);
+#ifdef DEBUG
+    int thingKind = head->header()->thingKind;
+    JSCompartment *comp = head->header()->compartment;
+#endif
+    JS_ASSERT(sizeof(T) == head->header()->thingSize);
+
+#ifdef JS_GCMETER
+    uint32 nlivearenas = 0, nkilledarenas = 0, nthings = 0;
+#endif
+    for (;;) {
+        bool allClear = a->finalize(cx);
+        /* We don't delete the head because the next allcoated arena has to link to it. */
+        if (allClear && (a != (Arena<T> *)head)) {
+            *ap = a->header()->next;
+            a->chunk()->releaseArena(a);
+            METER(nkilledarenas++);
+        } else {
+            ap = &a->header()->next;
+            METER(nlivearenas++);
+        }
+        if (!(a = (Arena<T> *) *ap))
+            break;
+    }
+    arenaList->cursor = (Arena<FreeCell> *)head;
+    arenaList->hasToBeFinalized = false;
+    METER(UpdateCompartmentStats(comp, thingKind, nlivearenas, nkilledarenas, nthings));
+}
+
 void
-JSCompartment::finalizeObjectArenaLists(JSContext *cx)
+FinalizeArenaList(JSContext *cx, ArenaList *list, Arena<FreeCell> *head)
 {
-    FinalizeArenaList<JSObject>(this, cx, FINALIZE_OBJECT0);
-    FinalizeArenaList<JSObject_Slots2>(this, cx, FINALIZE_OBJECT2);
-    FinalizeArenaList<JSObject_Slots4>(this, cx, FINALIZE_OBJECT4);
-    FinalizeArenaList<JSObject_Slots8>(this, cx, FINALIZE_OBJECT8);
-    FinalizeArenaList<JSObject_Slots12>(this, cx, FINALIZE_OBJECT12);
-    FinalizeArenaList<JSObject_Slots16>(this, cx, FINALIZE_OBJECT16);
-    FinalizeArenaList<JSFunction>(this, cx, FINALIZE_FUNCTION);
+    JS_ASSERT(list->head);
+    JS_ASSERT(head);
+    js::gc::FinalizeKind kind = js::gc::FinalizeKind(head->header()->thingKind);
+
+    switch (kind) {
+      case FINALIZE_OBJECT0:
+      case FINALIZE_OBJECT2:
+      case FINALIZE_OBJECT4:
+      case FINALIZE_OBJECT8:
+      case FINALIZE_OBJECT12:
+      case FINALIZE_OBJECT16:
+      case FINALIZE_FUNCTION:
+      case FINALIZE_SHAPE:
+      case FINALIZE_EXTERNAL_STRING:
+        JS_NOT_REACHED("no background finalization");
+        break;
+      case FINALIZE_OBJECT0_BACKGROUND:
+        FinalizeArenaListLater<JSObject>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_OBJECT2_BACKGROUND:
+        FinalizeArenaListLater<JSObject_Slots2>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_OBJECT4_BACKGROUND:
+        FinalizeArenaListLater<JSObject_Slots4>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_OBJECT8_BACKGROUND:
+        FinalizeArenaListLater<JSObject_Slots8>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_OBJECT12_BACKGROUND:
+        FinalizeArenaListLater<JSObject_Slots12>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_OBJECT16_BACKGROUND:
+        FinalizeArenaListLater<JSObject_Slots16>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_STRING:
+        FinalizeArenaListLater<JSString>(cx, list, (Arena<FreeCell> *)head);
+        break;
+      case FINALIZE_SHORT_STRING:
+        FinalizeArenaListLater<JSShortString>(cx, list, (Arena<FreeCell> *)head);
+        break;
+ #if JS_HAS_XML_SUPPORT
+      case FINALIZE_XML:
+        JS_NOT_REACHED("no background finalization");
+        break;
+#endif
+      default:
+        JS_NOT_REACHED("wrong kind");
+    }
+}
+
+#ifdef JS_THREADSAFE
+template<typename T>
+void BackgroundFinalize(JSCompartment *comp, JSContext *cx, JSGCInvocationKind gckind, unsigned thingKind)
+{
+    ArenaList *list = GetFinalizableArenaList(comp, thingKind);
+    if (list->head && list->head->header()->next) {
+        cx->gcBackgroundFree->finalizeLater(list);
+    } else {
+        FinalizeArenaList<T>(comp, cx, gckind, thingKind);
+    }
+}
+#endif
+
+void
+JSCompartment::finalizeObjectArenaLists(JSContext *cx, JSGCInvocationKind gckind)
+{
+    FinalizeArenaList<JSObject>(this, cx, gckind, FINALIZE_OBJECT0);
+    FinalizeArenaList<JSObject_Slots2>(this, cx, gckind, FINALIZE_OBJECT2);
+    FinalizeArenaList<JSObject_Slots4>(this, cx, gckind, FINALIZE_OBJECT4);
+    FinalizeArenaList<JSObject_Slots8>(this, cx, gckind, FINALIZE_OBJECT8);
+    FinalizeArenaList<JSObject_Slots12>(this, cx, gckind, FINALIZE_OBJECT12);
+    FinalizeArenaList<JSObject_Slots16>(this, cx, gckind, FINALIZE_OBJECT16);
+    FinalizeArenaList<JSFunction>(this, cx, gckind, FINALIZE_FUNCTION);
+
+#ifdef JS_THREADSAFE
+    if (cx->gcBackgroundFree && gckind != GC_LAST_CONTEXT && cx->runtime->state != JSRTS_LANDING) {
+        BackgroundFinalize<JSObject>(this, cx, gckind, FINALIZE_OBJECT0_BACKGROUND);
+        BackgroundFinalize<JSObject_Slots2>(this, cx, gckind, FINALIZE_OBJECT2_BACKGROUND);
+        BackgroundFinalize<JSObject_Slots4>(this, cx, gckind, FINALIZE_OBJECT4_BACKGROUND);
+        BackgroundFinalize<JSObject_Slots8>(this, cx, gckind, FINALIZE_OBJECT8_BACKGROUND);
+        BackgroundFinalize<JSObject_Slots12>(this, cx, gckind, FINALIZE_OBJECT12_BACKGROUND);
+        BackgroundFinalize<JSObject_Slots16>(this, cx, gckind, FINALIZE_OBJECT16_BACKGROUND);
+    } else {
+        FinalizeArenaList<JSObject>(this, cx, gckind, FINALIZE_OBJECT0_BACKGROUND);
+        FinalizeArenaList<JSObject_Slots2>(this, cx, gckind, FINALIZE_OBJECT2_BACKGROUND);
+        FinalizeArenaList<JSObject_Slots4>(this, cx, gckind, FINALIZE_OBJECT4_BACKGROUND);
+        FinalizeArenaList<JSObject_Slots8>(this, cx, gckind, FINALIZE_OBJECT8_BACKGROUND);
+        FinalizeArenaList<JSObject_Slots12>(this, cx, gckind, FINALIZE_OBJECT12_BACKGROUND);
+        FinalizeArenaList<JSObject_Slots16>(this, cx, gckind, FINALIZE_OBJECT16_BACKGROUND);
+    }
+#else
+    FinalizeArenaList<JSObject>(this, cx, gckind, FINALIZE_OBJECT0_BACKGROUND);
+    FinalizeArenaList<JSObject_Slots2>(this, cx, gckind, FINALIZE_OBJECT2_BACKGROUND);
+    FinalizeArenaList<JSObject_Slots4>(this, cx, gckind, FINALIZE_OBJECT4_BACKGROUND);
+    FinalizeArenaList<JSObject_Slots8>(this, cx, gckind, FINALIZE_OBJECT8_BACKGROUND);
+    FinalizeArenaList<JSObject_Slots12>(this, cx, gckind, FINALIZE_OBJECT12_BACKGROUND);
+    FinalizeArenaList<JSObject_Slots16>(this, cx, gckind, FINALIZE_OBJECT16_BACKGROUND);
+#endif
+
 #if JS_HAS_XML_SUPPORT
-    FinalizeArenaList<JSXML>(this, cx, FINALIZE_XML);
+    FinalizeArenaList<JSXML>(this, cx, gckind, FINALIZE_XML);
 #endif
 }
 
 void
-JSCompartment::finalizeStringArenaLists(JSContext *cx)
+JSCompartment::finalizeStringArenaLists(JSContext *cx, JSGCInvocationKind gckind)
 {
-    FinalizeArenaList<JSShortString>(this, cx, FINALIZE_SHORT_STRING);
-    FinalizeArenaList<JSString>(this, cx, FINALIZE_STRING);
-    FinalizeArenaList<JSExternalString>(this, cx, FINALIZE_EXTERNAL_STRING);
+#ifdef JS_THREADSAFE
+    if (cx->gcBackgroundFree && gckind != GC_LAST_CONTEXT && cx->runtime->state != JSRTS_LANDING) {
+        BackgroundFinalize<JSShortString>(this, cx, gckind, FINALIZE_SHORT_STRING);
+        BackgroundFinalize<JSString>(this, cx, gckind, FINALIZE_STRING);
+    } else {
+        FinalizeArenaList<JSShortString>(this, cx, gckind, FINALIZE_SHORT_STRING);
+        FinalizeArenaList<JSString>(this, cx, gckind, FINALIZE_STRING);
+    }
+    FinalizeArenaList<JSExternalString>(this, cx, gckind, FINALIZE_EXTERNAL_STRING);
+#else
+    FinalizeArenaList<JSShortString>(this, cx, gckind, FINALIZE_SHORT_STRING);
+    FinalizeArenaList<JSString>(this, cx, gckind, FINALIZE_STRING);
+    FinalizeArenaList<JSExternalString>(this, cx, gckind, FINALIZE_EXTERNAL_STRING);
+#endif
 }
 
 void
-JSCompartment::finalizeShapeArenaLists(JSContext *cx)
+JSCompartment::finalizeShapeArenaLists(JSContext *cx, JSGCInvocationKind gckind)
 {
-    FinalizeArenaList<Shape>(this, cx, FINALIZE_SHAPE);
+    FinalizeArenaList<Shape>(this, cx, gckind, FINALIZE_SHAPE);
 }
 
 #ifdef JS_THREADSAFE
 
 namespace js {
 
 bool
 GCHelperThread::init(JSRuntime *rt)
@@ -2039,18 +2282,53 @@ GCHelperThread::replenishAndFreeLater(vo
         freeCursorEnd = freeCursor + FREE_ARRAY_LENGTH;
         *freeCursor++ = ptr;
         return;
     } while (false);
     Foreground::free_(ptr);
 }
 
 void
+GCHelperThread::replenishAndFinalizeLater(ArenaList *list)
+{
+    JS_ASSERT(cx);
+    JS_ASSERT(finalizeCursor == finalizeCursorEnd);
+    do {
+        if (finalizeCursor && !finalizeVector.append(finalizeCursorEnd - FREE_ARRAY_LENGTH))
+            break;
+        finalizeCursor = (void **) OffTheBooks::malloc_(FREE_ARRAY_SIZE);
+        if (!finalizeCursor) {
+            finalizeCursorEnd = NULL;
+            break;
+        }
+        finalizeCursorEnd = finalizeCursor + FREE_ARRAY_LENGTH;
+        *finalizeCursor++ = list;
+        *finalizeCursor++ = list->head;
+        return;
+    } while (false);
+    FinalizeArenaList(cx, list, list->head);
+}
+
+void
 GCHelperThread::doSweep()
 {
+    JS_ASSERT(cx);
+    if (finalizeCursor) {
+        void **array = finalizeCursorEnd - FREE_ARRAY_LENGTH;
+        finalizeElementsAndArray(array, finalizeCursor);
+        finalizeCursor = finalizeCursorEnd = NULL;
+    } else {
+        JS_ASSERT(!finalizeCursorEnd);
+    }
+    for (void ***iter = finalizeVector.begin(); iter != finalizeVector.end(); ++iter) {
+        void **array = *iter;
+        finalizeElementsAndArray(array, array + FREE_ARRAY_LENGTH);
+    }
+    finalizeVector.resize(0);
+    cx = NULL;
     if (freeCursor) {
         void **array = freeCursorEnd - FREE_ARRAY_LENGTH;
         freeElementsAndArray(array, freeCursor);
         freeCursor = freeCursorEnd = NULL;
     } else {
         JS_ASSERT(!freeCursorEnd);
     }
     for (void ***iter = freeVector.begin(); iter != freeVector.end(); ++iter) {
@@ -2195,17 +2473,31 @@ MarkAndSweep(JSContext *cx, JSCompartmen
     /*
      * Mark phase.
      */
     GCMarker gcmarker(cx);
     JS_ASSERT(IS_GC_MARKING_TRACER(&gcmarker));
     JS_ASSERT(gcmarker.getMarkColor() == BLACK);
     rt->gcMarkingTracer = &gcmarker;
     gcmarker.stackLimit = cx->stackLimit;
-
+#ifdef JS_THREADSAFE
+    /*
+     * cx->gcBackgroundFree is set if we need several mark-and-sweep loops to
+     * finish the GC.
+     */
+    if (!cx->gcBackgroundFree) {
+        /* Wait until the sweeping from the previois GC finishes. */
+        rt->gcHelperThread.waitBackgroundSweepEnd(rt);
+        cx->gcBackgroundFree = &rt->gcHelperThread;
+    } else {
+        rt->gcHelperThread.waitBackgroundSweepEnd(rt);
+    }
+    JS_ASSERT(!rt->gcHelperThread.sweeping);
+    cx->gcBackgroundFree->setContext(cx);
+#endif
     for (GCChunkSet::Range r(rt->gcChunkSet.all()); !r.empty(); r.popFront())
          r.front()->clearMarkBitmap();
 
     if (comp) {
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
             (*c)->markCrossCompartmentWrappers(&gcmarker);
     } else {
         js_MarkScriptFilenames(rt);
@@ -2218,37 +2510,28 @@ MarkAndSweep(JSContext *cx, JSCompartmen
      * tracing.
      */
     gcmarker.markDelayedChildren();
 
     /*
      * Mark weak roots.
      */
     while (true) {
-        if (!js_TraceWatchPoints(&gcmarker))
+        if (!js_TraceWatchPoints(&gcmarker) &&
+            !WeakMap::markIteratively(&gcmarker)) {
             break;
+        }
         gcmarker.markDelayedChildren();
     }
 
     rt->gcMarkingTracer = NULL;
 
     if (rt->gcCallback)
         (void) rt->gcCallback(cx, JSGC_MARK_END);
 
-#ifdef JS_THREADSAFE
-    /*
-     * cx->gcBackgroundFree is set if we need several mark-and-sweep loops to
-     * finish the GC.
-     */
-    if (!cx->gcBackgroundFree) {
-        /* Wait until the sweeping from the previois GC finishes. */
-        rt->gcHelperThread.waitBackgroundSweepEnd(rt);
-        cx->gcBackgroundFree = &rt->gcHelperThread;
-    }
-#endif
 #ifdef DEBUG
     /* Make sure that we didn't mark an object in another compartment */
     if (comp) {
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
             JS_ASSERT_IF(*c != comp && *c != rt->atomsCompartment, checkArenaListAllUnmarked(*c));
     }
 #endif
 
@@ -2262,55 +2545,54 @@ MarkAndSweep(JSContext *cx, JSCompartmen
      * We first sweep atom state so we can use IsAboutToBeFinalized on
      * JSString held in a hashtable to check if the hashtable entry can be
      * freed. Note that even after the entry is freed, JSObject finalizers can
      * continue to access the corresponding JSString* assuming that they are
      * unique. This works since the atomization API must not be called during
      * the GC.
      */
     TIMESTAMP(startSweep);
+
+    /* Finalize unreachable (key,value) pairs in all weak maps. */
+    WeakMap::sweep(cx);
+
     js_SweepAtomState(cx);
 
     /* Finalize watch points associated with unreachable objects. */
     js_SweepWatchPoints(cx);
 
-#ifdef DEBUG
-    /* Save the pre-sweep count of scope-mapped properties. */
-    rt->liveObjectPropsPreSweep = rt->liveObjectProps;
-#endif
-
     /*
      * We finalize objects before other GC things to ensure that object's finalizer 
      * can access them even if they will be freed. Sweep the runtime's property trees 
      * after finalizing objects, in case any had watchpoints referencing tree nodes.
      * Do this before sweeping compartments, so that we sweep all shapes in
      * unreachable compartments.
      */
     if (comp) {
         comp->sweep(cx, 0);
-        comp->finalizeObjectArenaLists(cx);
+        comp->finalizeObjectArenaLists(cx, gckind);
         TIMESTAMP(sweepObjectEnd);
-        comp->finalizeStringArenaLists(cx);
+        comp->finalizeStringArenaLists(cx, gckind);
         TIMESTAMP(sweepStringEnd);
-        comp->finalizeShapeArenaLists(cx);
+        comp->finalizeShapeArenaLists(cx, gckind);
         TIMESTAMP(sweepShapeEnd);
     } else {
         SweepCrossCompartmentWrappers(cx);
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
-            (*c)->finalizeObjectArenaLists(cx);
+            (*c)->finalizeObjectArenaLists(cx, gckind);
 
         TIMESTAMP(sweepObjectEnd);
 
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
-            (*c)->finalizeStringArenaLists(cx);
+            (*c)->finalizeStringArenaLists(cx, gckind);
 
         TIMESTAMP(sweepStringEnd);
 
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
-            (*c)->finalizeShapeArenaLists(cx);
+            (*c)->finalizeShapeArenaLists(cx, gckind);
 
         TIMESTAMP(sweepShapeEnd);
 
         for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
             (*c)->propertyTree.dumpShapeStats();
     }
 
     PropertyTree::dumpShapes(cx);
@@ -2559,16 +2841,25 @@ GCUntilDone(JSContext *cx, JSCompartment
         if (firstRun) {
             PreGCCleanup(cx, gckind);
             TIMESTAMP(startMark);
             firstRun = false;
         }
 
         MarkAndSweep(cx, comp, gckind  GCTIMER_ARG);
 
+#ifdef JS_THREADSAFE
+        JS_ASSERT(cx->gcBackgroundFree == &rt->gcHelperThread);
+        if (rt->gcPoke) {
+            AutoLockGC lock(rt);
+            cx->gcBackgroundFree = NULL;
+            rt->gcHelperThread.startBackgroundSweep(rt);
+        }
+#endif
+
         // GC again if:
         //   - another thread, not in a request, called js_GC
         //   - js_GC was called recursively
         //   - a finalizer called js_RemoveRoot or js_UnlockGCThingRT.
     } while (rt->gcPoke);
 
 #ifdef JS_THREADSAFE
     JS_ASSERT(cx->gcBackgroundFree == &rt->gcHelperThread);
@@ -2623,16 +2914,19 @@ js_GC(JSContext *cx, JSCompartment *comp
          * on another thread.
          */
         if (JSGCCallback callback = rt->gcCallback) {
             if (!callback(cx, JSGC_BEGIN) && gckind != GC_LAST_CONTEXT)
                 return;
         }
 
         {
+#ifdef JS_THREADSAFE
+            rt->gcHelperThread.waitBackgroundSweepEnd(rt);
+#endif
             /* Lock out other GC allocator and collector invocations. */
             AutoLockGC lock(rt);
 
             GCUntilDone(cx, comp, gckind  GCTIMER_ARG);
         }
 
         /* We re-sample the callback again as the finalizers can change it. */
         if (JSGCCallback callback = rt->gcCallback)
--- a/js/src/jsgc.h
+++ b/js/src/jsgc.h
@@ -74,34 +74,40 @@ namespace js {
 
 struct Shape;
 
 namespace gc {
 
 /* The kind of GC thing with a finalizer. */
 enum FinalizeKind {
     FINALIZE_OBJECT0,
+    FINALIZE_OBJECT0_BACKGROUND,
     FINALIZE_OBJECT2,
+    FINALIZE_OBJECT2_BACKGROUND,
     FINALIZE_OBJECT4,
+    FINALIZE_OBJECT4_BACKGROUND,
     FINALIZE_OBJECT8,
+    FINALIZE_OBJECT8_BACKGROUND,
     FINALIZE_OBJECT12,
+    FINALIZE_OBJECT12_BACKGROUND,
     FINALIZE_OBJECT16,
-    FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16,
+    FINALIZE_OBJECT16_BACKGROUND,
+    FINALIZE_OBJECT_LAST = FINALIZE_OBJECT16_BACKGROUND,
     FINALIZE_FUNCTION,
     FINALIZE_SHAPE,
 #if JS_HAS_XML_SUPPORT
     FINALIZE_XML,
 #endif
     FINALIZE_SHORT_STRING,
     FINALIZE_STRING,
     FINALIZE_EXTERNAL_STRING,
     FINALIZE_LIMIT
 };
 
-const uintN JS_FINALIZE_OBJECT_LIMIT = 6;
+const uintN JS_FINALIZE_OBJECT_LIMIT = 12;
 
 /* Every arena has a header. */
 struct ArenaHeader {
     JSCompartment   *compartment;
     Arena<FreeCell> *next;
     FreeCell        *freeList;
     unsigned        thingKind;
 #ifdef DEBUG
@@ -185,18 +191,21 @@ struct Arena {
     void markDelayedChildren(JSTracer *trc);
     inline bool inFreeList(void *thing) const;
     inline T *getAlignedThing(const void *thing);
 #ifdef DEBUG
     inline bool assureThingIsAligned(void *thing);
 #endif
 
     void init(JSCompartment *compartment, unsigned thingKind);
+    bool finalize(JSContext *cx);
 };
 
+void FinalizeArena(Arena<FreeCell> *a);
+
 /*
  * Live objects are marked black. How many other additional colors are available
  * depends on the size of the GCThing.
  */
 static const uint32 BLACK = 0;
 
 /* An arena bitmap contains enough mark bits for all the cells in an arena. */
 struct ArenaBitmap {
@@ -345,16 +354,19 @@ EmptyArenaLists::insert(Arena<T> *arena)
 
 /* The chunk header (located at the end of the chunk to preserve arena alignment). */
 struct ChunkInfo {
     Chunk           *link;
     JSRuntime       *runtime;
     EmptyArenaLists emptyArenaLists;
     size_t          age;
     size_t          numFree;
+#ifdef JS_THREADSAFE
+    PRLock          *chunkLock;
+#endif
 };
 
 /* Chunks contain arenas and associated data structures (mark bitmap, delayed marking state). */
 struct Chunk {
     static const size_t BytesPerArena = sizeof(Arena<FreeCell>) +
                                         sizeof(ArenaBitmap) +
                                         sizeof(MarkingDelay);
 
@@ -362,24 +374,24 @@ struct Chunk {
 
     Arena<FreeCell> arenas[ArenasPerChunk];
     ArenaBitmap     bitmaps[ArenasPerChunk];
     MarkingDelay    markingDelay[ArenasPerChunk];
 
     ChunkInfo       info;
 
     void clearMarkBitmap();
-    void init(JSRuntime *rt);
+    bool init(JSRuntime *rt);
 
     bool unused();
     bool hasAvailableArenas();
     bool withinArenasRange(Cell *cell);
 
     template <typename T>
-    Arena<T> *allocateArena(JSCompartment *comp, unsigned thingKind);
+    Arena<T> *allocateArena(JSContext *cx, unsigned thingKind);
 
     template <typename T>
     void releaseArena(Arena<T> *a);
 
     JSRuntime *getRuntime();
 };
 JS_STATIC_ASSERT(sizeof(Chunk) <= GC_CHUNK_SIZE);
 JS_STATIC_ASSERT(sizeof(Chunk) + Chunk::BytesPerArena > GC_CHUNK_SIZE);
@@ -529,21 +541,27 @@ const float GC_HEAP_GROWTH_FACTOR = 3.0f
 
 static inline size_t
 GetFinalizableTraceKind(size_t thingKind)
 {
     JS_STATIC_ASSERT(JSExternalString::TYPE_LIMIT == 8);
 
     static const uint8 map[FINALIZE_LIMIT] = {
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT0 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT0_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT2 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT2_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT4 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT4_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT8 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT8_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT12 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT12_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_OBJECT16 */
+        JSTRACE_OBJECT,     /* FINALIZE_OBJECT16_BACKGROUND */
         JSTRACE_OBJECT,     /* FINALIZE_FUNCTION */
         JSTRACE_SHAPE,      /* FINALIZE_SHAPE */
 #if JS_HAS_XML_SUPPORT      /* FINALIZE_XML */
         JSTRACE_XML,
 #endif
         JSTRACE_STRING,     /* FINALIZE_SHORT_STRING */
         JSTRACE_STRING,     /* FINALIZE_STRING */
         JSTRACE_STRING,     /* FINALIZE_EXTERNAL_STRING */
@@ -566,23 +584,26 @@ GetGCThingRuntime(void *thing)
 extern bool
 checkArenaListsForThing(JSCompartment *comp, jsuword thing);
 #endif
 
 /* The arenas in a list have uniform kind. */
 struct ArenaList {
     Arena<FreeCell>       *head;          /* list start */
     Arena<FreeCell>       *cursor;        /* arena with free things */
+    volatile bool         hasToBeFinalized;
 
     inline void init() {
         head = NULL;
         cursor = NULL;
+        hasToBeFinalized = false;
     }
 
-    inline Arena<FreeCell> *getNextWithFreeList() {
+    inline Arena<FreeCell> *getNextWithFreeList(JSContext *cx) {
+        JS_ASSERT(!hasToBeFinalized);
         Arena<FreeCell> *a;
         while (cursor != NULL) {
             ArenaHeader *aheader = cursor->header();
             a = cursor;
             cursor = aheader->next;
             if (aheader->freeList)
                 return a;
         }
@@ -858,16 +879,19 @@ js_WaitForGC(JSRuntime *rt);
 
 # define js_WaitForGC(rt)    ((void) 0)
 
 #endif
 
 extern void
 js_DestroyScriptsToGC(JSContext *cx, JSCompartment *comp);
 
+extern void
+FinalizeArenaList(JSContext *cx, js::gc::ArenaList *arenaList, js::gc::Arena<js::gc::FreeCell> *head);
+
 namespace js {
 
 #ifdef JS_THREADSAFE
 
 /*
  * During the finalization we do not free immediately. Rather we add the
  * corresponding pointers to a buffer which we later release on a separated
  * thread.
@@ -876,67 +900,103 @@ namespace js {
  * simple vector, to avoid realloc calls during the vector growth and to not
  * bloat the binary size of the inlined freeLater method. Any OOM during
  * buffer growth results in the pointer being freed immediately.
  */
 class GCHelperThread {
     static const size_t FREE_ARRAY_SIZE = size_t(1) << 16;
     static const size_t FREE_ARRAY_LENGTH = FREE_ARRAY_SIZE / sizeof(void *);
 
+    JSContext         *cx;
     PRThread*         thread;
     PRCondVar*        wakeup;
     PRCondVar*        sweepingDone;
     bool              shutdown;
-    bool              sweeping;
 
     Vector<void **, 16, js::SystemAllocPolicy> freeVector;
     void            **freeCursor;
     void            **freeCursorEnd;
+    Vector<void **, 16, js::SystemAllocPolicy> finalizeVector;
+    void            **finalizeCursor;
+    void            **finalizeCursorEnd;
 
     JS_FRIEND_API(void)
     replenishAndFreeLater(void *ptr);
 
+    void replenishAndFinalizeLater(js::gc::ArenaList *list);
+
     static void freeElementsAndArray(void **array, void **end) {
         JS_ASSERT(array <= end);
         for (void **p = array; p != end; ++p)
             js::Foreground::free_(*p);
         js::Foreground::free_(array);
     }
 
+    void finalizeElementsAndArray(void **array, void **end) {
+        JS_ASSERT(array <= end);
+        for (void **p = array; p != end; p += 2) {
+            js::gc::ArenaList *list = (js::gc::ArenaList *)*p;
+            js::gc::Arena<js::gc::FreeCell> *head = (js::gc::Arena<js::gc::FreeCell> *)*(p+1);
+            
+            FinalizeArenaList(cx, list, head);
+        }
+        js::Foreground::free_(array);
+    }
+
     static void threadMain(void* arg);
 
     void threadLoop(JSRuntime *rt);
     void doSweep();
 
   public:
     GCHelperThread()
       : thread(NULL),
         wakeup(NULL),
         sweepingDone(NULL),
         shutdown(false),
-        sweeping(false),
         freeCursor(NULL),
-        freeCursorEnd(NULL) { }
+        freeCursorEnd(NULL),
+        finalizeCursor(NULL),
+        finalizeCursorEnd(NULL),
+        sweeping(false) { }
 
+    volatile bool     sweeping;
     bool init(JSRuntime *rt);
     void finish(JSRuntime *rt);
 
     /* Must be called with GC lock taken. */
     void startBackgroundSweep(JSRuntime *rt);
 
     /* Must be called outside the GC lock. */
     void waitBackgroundSweepEnd(JSRuntime *rt);
 
     void freeLater(void *ptr) {
         JS_ASSERT(!sweeping);
         if (freeCursor != freeCursorEnd)
             *freeCursor++ = ptr;
         else
             replenishAndFreeLater(ptr);
     }
+
+    void finalizeLater(js::gc::ArenaList *list) {
+        JS_ASSERT(!list->hasToBeFinalized);
+        if (!list->head)
+            return;
+
+        list->hasToBeFinalized = true;
+        JS_ASSERT(!sweeping);
+        if (finalizeCursor + 1 < finalizeCursorEnd) {
+            *finalizeCursor++ = list;
+            *finalizeCursor++ = list->head;
+        } else {
+            replenishAndFinalizeLater(list);
+        }
+    }
+
+    void setContext(JSContext *context) { cx = context; }
 };
 
 #endif /* JS_THREADSAFE */
 
 struct GCChunkHasher {
     typedef gc::Chunk *Lookup;
 
     /*
--- a/js/src/jsgcinlines.h
+++ b/js/src/jsgcinlines.h
@@ -137,26 +137,32 @@ GetGCObjectKind(size_t numSlots, Finaliz
 
 /* Get the number of fixed slots and initial capacity associated with a kind. */
 static inline size_t
 GetGCKindSlots(FinalizeKind thingKind)
 {
     /* Using a switch in hopes that thingKind will usually be a compile-time constant. */
     switch (thingKind) {
       case FINALIZE_OBJECT0:
+      case FINALIZE_OBJECT0_BACKGROUND:
         return 0;
       case FINALIZE_OBJECT2:
+      case FINALIZE_OBJECT2_BACKGROUND:
         return 2;
       case FINALIZE_OBJECT4:
+      case FINALIZE_OBJECT4_BACKGROUND:
         return 4;
       case FINALIZE_OBJECT8:
+      case FINALIZE_OBJECT8_BACKGROUND:
         return 8;
       case FINALIZE_OBJECT12:
+      case FINALIZE_OBJECT12_BACKGROUND:
         return 12;
       case FINALIZE_OBJECT16:
+      case FINALIZE_OBJECT16_BACKGROUND:
         return 16;
       default:
         JS_NOT_REACHED("Bad object finalize kind");
         return 0;
     }
 }
 
 } /* namespace gc */
@@ -281,17 +287,18 @@ Mark(JSTracer *trc, T *thing)
     JS_ASSERT(thing);
     JS_ASSERT(JS_IS_VALID_TRACE_KIND(js::gc::GetGCThingTraceKind(thing)));
     JS_ASSERT(trc->debugPrinter || trc->debugPrintArg);
 
     /* Per-Compartment GC only with GCMarker and no custom JSTracer */
     JS_ASSERT_IF(trc->context->runtime->gcCurrentCompartment, IS_GC_MARKING_TRACER(trc));
 
     JSRuntime *rt = trc->context->runtime;
-
+    JS_ASSERT(thing->arena()->header()->compartment);
+    JS_ASSERT(thing->arena()->header()->compartment->rt == rt);
     /* Don't mark things outside a compartment if we are in a per-compartment GC */
     if (rt->gcCurrentCompartment && thing->compartment() != rt->gcCurrentCompartment)
         goto out;
 
     if (!IS_GC_MARKING_TRACER(trc)) {
         uint32 kind = js::gc::GetGCThingTraceKind(thing);
         trc->callback(trc, (void *)thing, kind);
         goto out;
--- a/js/src/jsgcstats.cpp
+++ b/js/src/jsgcstats.cpp
@@ -108,21 +108,27 @@ UpdateCompartmentStats(JSCompartment *co
     if (globSt->maxarenas < compSt->maxarenas)
         globSt->maxarenas = compSt->maxarenas;
     if (globSt->maxthings < compSt->maxthings)
         globSt->maxthings = compSt->maxthings;
 }
 
 static const char *const GC_ARENA_NAMES[] = {
     "object_0",
+    "object_0_background",
     "object_2",
+    "object_2_background",
     "object_4",
+    "object_4_background",
     "object_8",
+    "object_8_background",
     "object_12",
+    "object_12_background",
     "object_16",
+    "object_16_background",
     "function",
     "shape",
 #if JS_HAS_XML_SUPPORT
     "xml",
 #endif
     "short string",
     "string",
     "external_string",
@@ -139,65 +145,77 @@ GetSizeAndThings(size_t &thingSize, size
 
 #if defined JS_DUMP_CONSERVATIVE_GC_ROOTS
 void *
 GetAlignedThing(void *thing, int thingKind)
 {
     Cell *cell = (Cell *)thing;
     switch (thingKind) {
         case FINALIZE_OBJECT0:
+        case FINALIZE_OBJECT0_BACKGROUND:
             return (void *)GetArena<JSObject>(cell)->getAlignedThing(thing);
         case FINALIZE_OBJECT2:
+        case FINALIZE_OBJECT2_BACKGROUND:
             return (void *)GetArena<JSObject_Slots2>(cell)->getAlignedThing(thing);
         case FINALIZE_OBJECT4:
+        case FINALIZE_OBJECT4_BACKGROUND:
             return (void *)GetArena<JSObject_Slots4>(cell)->getAlignedThing(thing);
         case FINALIZE_OBJECT8:
+        case FINALIZE_OBJECT8_BACKGROUND:
             return (void *)GetArena<JSObject_Slots8>(cell)->getAlignedThing(thing);
         case FINALIZE_OBJECT12:
+        case FINALIZE_OBJECT12_BACKGROUND:
             return (void *)GetArena<JSObject_Slots12>(cell)->getAlignedThing(thing);
         case FINALIZE_OBJECT16:
+        case FINALIZE_OBJECT16_BACKGROUND:
             return (void *)GetArena<JSObject_Slots16>(cell)->getAlignedThing(thing);
         case FINALIZE_STRING:
             return (void *)GetArena<JSString>(cell)->getAlignedThing(thing);
         case FINALIZE_EXTERNAL_STRING:
             return (void *)GetArena<JSExternalString>(cell)->getAlignedThing(thing);
         case FINALIZE_SHORT_STRING:
             return (void *)GetArena<JSShortString>(cell)->getAlignedThing(thing);
         case FINALIZE_FUNCTION:
             return (void *)GetArena<JSFunction>(cell)->getAlignedThing(thing);
 #if JS_HAS_XML_SUPPORT
         case FINALIZE_XML:
             return (void *)GetArena<JSXML>(cell)->getAlignedThing(thing);
 #endif
         default:
-            JS_ASSERT(false);
+            JS_NOT_REACHED("wrong kind");
             return NULL;
     }
 }
 #endif
 
 void GetSizeAndThingsPerArena(int thingKind, size_t &thingSize, size_t &thingsPerArena)
 {
     switch (thingKind) {
         case FINALIZE_OBJECT0:
+        case FINALIZE_OBJECT0_BACKGROUND:
             GetSizeAndThings<JSObject>(thingSize, thingsPerArena);
             break;
         case FINALIZE_OBJECT2:
+        case FINALIZE_OBJECT2_BACKGROUND:
             GetSizeAndThings<JSObject_Slots2>(thingSize, thingsPerArena);
             break;
         case FINALIZE_OBJECT4:
+        case FINALIZE_OBJECT4_BACKGROUND:
             GetSizeAndThings<JSObject_Slots4>(thingSize, thingsPerArena);
             break;
         case FINALIZE_OBJECT8:
+        case FINALIZE_OBJECT8_BACKGROUND:
             GetSizeAndThings<JSObject_Slots8>(thingSize, thingsPerArena);
             break;
         case FINALIZE_OBJECT12:
+        case FINALIZE_OBJECT12_BACKGROUND:
             GetSizeAndThings<JSObject_Slots12>(thingSize, thingsPerArena);
             break;
         case FINALIZE_OBJECT16:
+        case FINALIZE_OBJECT16_BACKGROUND:
             GetSizeAndThings<JSObject_Slots16>(thingSize, thingsPerArena);
             break;
         case FINALIZE_EXTERNAL_STRING:
         case FINALIZE_STRING:
             GetSizeAndThings<JSString>(thingSize, thingsPerArena);
             break;
         case FINALIZE_SHORT_STRING:
             GetSizeAndThings<JSShortString>(thingSize, thingsPerArena);
@@ -206,17 +224,17 @@ void GetSizeAndThingsPerArena(int thingK
             GetSizeAndThings<JSFunction>(thingSize, thingsPerArena);
             break;
 #if JS_HAS_XML_SUPPORT
         case FINALIZE_XML:
             GetSizeAndThings<JSXML>(thingSize, thingsPerArena);
             break;
 #endif
         default:
-            JS_ASSERT(false);
+            JS_NOT_REACHED("wrong kind");
     }
 }
 
 void
 DumpArenaStats(JSGCArenaStats *stp, FILE *fp)
 {
     size_t sumArenas = 0, sumTotalArenas = 0, sumThings =0,  sumMaxThings = 0;
     size_t sumThingSize = 0, sumTotalThingSize = 0, sumArenaCapacity = 0;
@@ -437,17 +455,17 @@ GCTimer::finish(bool lastGC) {
                     TIMEDIFF(getFirstEnter(), enter),
                     TIMEDIFF(enter, end),
                     TIMEDIFF(startMark, startSweep),
                     TIMEDIFF(startSweep, sweepDestroyEnd),
                     TIMEDIFF(startSweep, sweepObjectEnd),
                     TIMEDIFF(sweepObjectEnd, sweepStringEnd),
                     TIMEDIFF(sweepStringEnd, sweepShapeEnd),
                     TIMEDIFF(sweepShapeEnd, sweepDestroyEnd));
-            fprintf(gcFile, "%7d, %7d \n", newChunkCount, destroyChunkCount);
+            fprintf(gcFile, "%7d, %7d\n", newChunkCount, destroyChunkCount);
             fflush(gcFile);
 
             if (lastGC) {
                 fclose(gcFile);
                 gcFile = NULL;
             }
         }
     }
--- a/js/src/jshashtable.h
+++ b/js/src/jshashtable.h
@@ -38,16 +38,17 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jshashtable_h_
 #define jshashtable_h_
 
+#include "jsalloc.h"
 #include "jstl.h"
 
 /* Gross special case for Gecko, which defines malloc/calloc/free. */
 #ifdef mozilla_mozalloc_macro_wrappers_h
 #  define JSHASHTABLE_UNDEFD_MOZALLOC_WRAPPERS
 #  include "mozilla/mozalloc_undef_macro_wrappers.h"
 #endif
 
--- a/js/src/jsinferinlines.h
+++ b/js/src/jsinferinlines.h
@@ -500,20 +500,20 @@ JSContext::typeMonitorAssign(JSObject *o
     if (typeInferenceEnabled())
         return compartment->types.dynamicAssign(this, obj, id, rval);
     return true;
 }
 
 inline bool
 JSContext::typeMonitorCall(const js::CallArgs &args, bool constructing)
 {
-    if (!typeInferenceEnabled() || !args.callee().isObject())
+    if (!typeInferenceEnabled())
         return true;
 
-    JSObject *callee = &args.callee().toObject();
+    JSObject *callee = &args.callee();
     if (!callee->isFunction() || !callee->getFunctionPrivate()->isInterpreted())
         return true;
 
     return compartment->types.dynamicCall(this, callee, args, constructing);
 }
 
 inline bool
 JSContext::fixArrayType(JSObject *obj)
--- a/js/src/jsinterp.cpp
+++ b/js/src/jsinterp.cpp
@@ -69,20 +69,21 @@
 #include "jsemit.h"
 #include "jsscope.h"
 #include "jsscript.h"
 #include "jsstr.h"
 #include "jsstaticcheck.h"
 #include "jstracer.h"
 #include "jslibmath.h"
 #include "jsvector.h"
+#ifdef JS_METHODJIT
 #include "methodjit/MethodJIT.h"
 #include "methodjit/MethodJIT-inl.h"
 #include "methodjit/Logging.h"
-
+#endif
 #include "jsatominlines.h"
 #include "jscntxtinlines.h"
 #include "jsinferinlines.h"
 #include "jsinterpinlines.h"
 #include "jsobjinlines.h"
 #include "jsprobes.h"
 #include "jspropertycacheinlines.h"
 #include "jsscopeinlines.h"
@@ -446,41 +447,16 @@ CallThisObjectHook(JSContext *cx, JSObje
 {
     JSObject *thisp = obj->thisObject(cx);
     if (!thisp)
         return NULL;
     argv[-1].setObject(*thisp);
     return thisp;
 }
 
-/*
- * ECMA requires "the global object", but in embeddings such as the browser,
- * which have multiple top-level objects (windows, frames, etc. in the DOM),
- * we prefer fun's parent.  An example that causes this code to run:
- *
- *   // in window w1
- *   function f() { return this }
- *   function g() { return f }
- *
- *   // in window w2
- *   var h = w1.g()
- *   alert(h() == w1)
- *
- * The alert should display "true".
- */
-JS_STATIC_INTERPRET bool
-ComputeGlobalThis(JSContext *cx, Value *vp)
-{
-    JSObject *thisp = vp[0].toObject().getGlobal()->thisObject(cx);
-    if (!thisp)
-        return false;
-    vp[1].setObject(*thisp);
-    return true;
-}
-
 namespace js {
 
 void
 ReportIncompatibleMethod(JSContext *cx, Value *vp, Class *clasp)
 {
     Value &thisv = vp[1];
 
 #ifdef DEBUG
@@ -514,34 +490,56 @@ ReportIncompatibleMethod(JSContext *cx, 
         JSAutoByteString funNameBytes;
         if (const char *funName = GetFunctionNameBytes(cx, fun, &funNameBytes)) {
             JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_INCOMPATIBLE_PROTO,
                                  clasp->name, funName, name);
         }
     }
 }
 
+/*
+ * ECMA requires "the global object", but in embeddings such as the browser,
+ * which have multiple top-level objects (windows, frames, etc. in the DOM),
+ * we prefer fun's parent.  An example that causes this code to run:
+ *
+ *   // in window w1
+ *   function f() { return this }
+ *   function g() { return f }
+ *
+ *   // in window w2
+ *   var h = w1.g()
+ *   alert(h() == w1)
+ *
+ * The alert should display "true".
+ */
 bool
-BoxThisForVp(JSContext *cx, Value *vp)
+BoxNonStrictThis(JSContext *cx, const CallReceiver &call)
 {
     /*
      * Check for SynthesizeFrame poisoning and fast constructors which
-     * didn't check their vp properly.
+     * didn't check their callee properly.
      */
-    JS_ASSERT(!vp[1].isMagic());
+    Value &thisv = call.thisv();
+    JS_ASSERT(!thisv.isMagic());
+
 #ifdef DEBUG
-    JSFunction *fun = vp[0].toObject().isFunction() ? vp[0].toObject().getFunctionPrivate() : NULL;
+    JSFunction *fun = call.callee().isFunction() ? call.callee().getFunctionPrivate() : NULL;
     JS_ASSERT_IF(fun && fun->isInterpreted(), !fun->inStrictMode());
 #endif
 
-    if (vp[1].isNullOrUndefined())
-        return ComputeGlobalThis(cx, vp);
-
-    if (!vp[1].isObject())
-        return !!js_PrimitiveToObject(cx, &vp[1]);
+    if (thisv.isNullOrUndefined()) {
+        JSObject *thisp = call.callee().getGlobal()->thisObject(cx);
+        if (!thisp)
+            return false;
+        call.thisv().setObject(*thisp);
+        return true;
+    }
+
+    if (!thisv.isObject())
+        return !!js_PrimitiveToObject(cx, &thisv);
 
     return true;
 }
 
 }
 
 #if JS_HAS_NO_SUCH_METHOD
 
@@ -626,17 +624,17 @@ NoSuchMethod(JSContext *cx, uintN argc, 
     if (!cx->stack().pushInvokeArgs(cx, 2, &args))
         return JS_FALSE;
 
     JS_ASSERT(vp[0].isObject());
     JS_ASSERT(vp[1].isObject());
     JSObject *obj = &vp[0].toObject();
     JS_ASSERT(obj->getClass() == &js_NoSuchMethodClass);
 
-    args.callee() = obj->getSlot(JSSLOT_FOUND_FUNCTION);
+    args.calleev() = obj->getSlot(JSSLOT_FOUND_FUNCTION);
     args.thisv() = vp[1];
     args[0] = obj->getSlot(JSSLOT_SAVED_ID);
     JSObject *argsobj = NewDenseCopiedArray(cx, argc, vp + 2);
     if (!argsobj)
         return JS_FALSE;
     args[1].setObject(*argsobj);
     JSBool ok = (flags & JSINVOKE_CONSTRUCT)
                 ? InvokeConstructor(cx, args)
@@ -690,33 +688,33 @@ RunScript(JSContext *cx, JSScript *scrip
 JS_REQUIRES_STACK bool
 Invoke(JSContext *cx, const CallArgs &argsRef, uint32 flags)
 {
     /* N.B. Must be kept in sync with InvokeSessionGuard::start/invoke */
 
     CallArgs args = argsRef;
     JS_ASSERT(args.argc() <= JS_ARGS_LENGTH_MAX);
 
-    if (args.callee().isPrimitive()) {
-        js_ReportIsNotFunction(cx, &args.callee(), flags & JSINVOKE_FUNFLAGS);
+    if (args.calleev().isPrimitive()) {
+        js_ReportIsNotFunction(cx, &args.calleev(), flags & JSINVOKE_FUNFLAGS);
         return false;
     }
 
-    JSObject &callee = args.callee().toObject();
+    JSObject &callee = args.callee();
     Class *clasp = callee.getClass();
 
     /* Invoke non-functions. */
     if (JS_UNLIKELY(clasp != &js_FunctionClass)) {
 #if JS_HAS_NO_SUCH_METHOD
         if (JS_UNLIKELY(clasp == &js_NoSuchMethodClass))
             return NoSuchMethod(cx, args.argc(), args.base(), 0);
 #endif
         JS_ASSERT_IF(flags & JSINVOKE_CONSTRUCT, !clasp->construct);
         if (!clasp->call) {
-            js_ReportIsNotFunction(cx, &args.callee(), flags);
+            js_ReportIsNotFunction(cx, &args.calleev(), flags);
             return false;
         }
         if (!cx->markTypeCallerUnexpected(types::TYPE_UNKNOWN))
             return false;
         return CallJSNative(cx, clasp->call, args.argc(), args.base());
     }
 
     /* Invoke native functions. */
@@ -784,17 +782,17 @@ InvokeSessionGuard::start(JSContext *cx,
 #endif
 
     /* Always push arguments, regardless of optimized/normal invoke. */
     StackSpace &stack = cx->stack();
     if (!stack.pushInvokeArgs(cx, argc, &args_))
         return false;
 
     /* Callees may clobber 'this' or 'callee'. */
-    savedCallee_ = args_.callee() = calleev;
+    savedCallee_ = args_.calleev() = calleev;
     savedThis_ = args_.thisv() = thisv;
 
     do {
         /* Hoist dynamic checks from scripted Invoke. */
         if (!calleev.isObject())
             break;
         JSObject &callee = calleev.toObject();
         if (callee.getClass() != &js_FunctionClass)
@@ -884,17 +882,17 @@ ExternalInvoke(JSContext *cx, const Valu
                uintN argc, Value *argv, Value *rval)
 {
     LeaveTrace(cx);
 
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return false;
 
-    args.callee() = fval;
+    args.calleev() = fval;
     args.thisv() = thisv;
     memcpy(args.argv(), argv, argc * sizeof(Value));
 
     if (args.thisv().isObject()) {
         /*
          * We must call the thisObject hook in case we are not called from the
          * interpreter, where a prior bytecode has computed an appropriate
          * |this| already.
@@ -917,17 +915,17 @@ ExternalInvokeConstructor(JSContext *cx,
                           Value *rval)
 {
     LeaveTrace(cx);
 
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return false;
 
-    args.callee() = fval;
+    args.calleev() = fval;
     args.thisv().setMagic(JS_THIS_POISON);
     memcpy(args.argv(), argv, argc * sizeof(Value));
 
     if (!InvokeConstructor(cx, args))
         return false;
 
     *rval = args.rval();
     return true;
@@ -972,21 +970,22 @@ InitSharpSlots(JSContext *cx, JSStackFra
         sharps[0].setUndefined();
         sharps[1].setUndefined();
     }
     return true;
 }
 #endif
 
 bool
-Execute(JSContext *cx, JSObject *chain, JSScript *script,
+Execute(JSContext *cx, JSObject &chain, JSScript *script,
         JSStackFrame *prev, uintN flags, Value *result)
 {
-    JS_ASSERT(chain);
     JS_ASSERT_IF(prev, !prev->isDummyFrame());
+    JS_ASSERT_IF(prev, prev->compartment() == cx->compartment);
+    JS_ASSERT(script->compartment == cx->compartment);
 
     if (script->isEmpty()) {
         if (result)
             result->setUndefined();
         return true;
     }
 
     LeaveTrace(cx);
@@ -1002,48 +1001,48 @@ Execute(JSContext *cx, JSObject *chain, 
         return false;
 
     /* Initialize fixed slots (GVAR ops expect NULL). */
     SetValueRangeToNull(frame.fp()->slots(), script->nfixed);
 
     /* Initialize frame and locals. */
     JSObject *initialVarObj;
     if (prev) {
-        JS_ASSERT(chain == &prev->scopeChain());
+        JS_ASSERT(chain == prev->scopeChain());
         frame.fp()->initEvalFrame(cx, script, prev, flags);
 
         /* NB: prev may not be in cx->currentSegment. */
         initialVarObj = (prev == cx->maybefp())
                         ? &prev->varobj(cx)
                         : &prev->varobj(cx->stack().containingSegment(prev));
     } else {
         /* The scope chain could be anything, so innerize just in case. */
-        JSObject *innerizedChain = chain;
+        JSObject *innerizedChain = &chain;
         OBJ_TO_INNER_OBJECT(cx, innerizedChain);
         if (!innerizedChain)
             return false;
 
         /* If we were handed a non-native object, complain bitterly. */
         if (!innerizedChain->isNative()) {
             JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
                                  JSMSG_NON_NATIVE_SCOPE);
             return false;
         }
 
         frame.fp()->initGlobalFrame(script, *innerizedChain, flags);
 
         /* If scope chain is an inner window, outerize for 'this'. */
-        JSObject *thisp = chain->thisObject(cx);
+        JSObject *thisp = chain.thisObject(cx);
         if (!thisp)
             return false;
         frame.fp()->globalThis().setObject(*thisp);
 
         initialVarObj = cx->hasRunOption(JSOPTION_VAROBJFIX)
-                        ? chain->getGlobal()
-                        : chain;
+                        ? chain.getGlobal()
+                        : &chain;
     }
     JS_ASSERT(!initialVarObj->getOps()->defineProperty);
 
     if (frame.fp()->isStrictEvalFrame()) {
         /* Give strict mode eval its own fresh lexical environment. */
         initialVarObj = CreateEvalCallObject(cx, frame.fp());
         if (!initialVarObj)
             return false;
@@ -1240,17 +1239,17 @@ StrictlyEqual(JSContext *cx, const Value
     if (SameType(lval, rval)) {
         if (lval.isString())
             return EqualStrings(cx, lval.toString(), rval.toString(), equal);
         if (lval.isDouble()) {
             *equal = JSDOUBLE_COMPARE(lval.toDouble(), ==, rval.toDouble(), JS_FALSE);
             return true;
         }
         if (lval.isObject()) {
-            *equal = &lval.toObject() == &rval.toObject();
+            *equal = lval.toObject() == rval.toObject();
             return true;
         }
         if (lval.isUndefined()) {
             *equal = true;
             return true;
         }
         *equal = lval.payloadAsRawUint32() == rval.payloadAsRawUint32();
         return true;
@@ -1323,18 +1322,18 @@ TypeOfValue(JSContext *cx, const Value &
 
 JS_REQUIRES_STACK bool
 InvokeConstructor(JSContext *cx, const CallArgs &argsRef)
 {
     JS_ASSERT(!js_FunctionClass.construct);
     CallArgs args = argsRef;
 
     JSObject *callee;
-    if (args.callee().isPrimitive() || !(callee = &args.callee().toObject())->getParent()) {
-        js_ReportIsNotFunction(cx, &args.callee(), JSV2F_CONSTRUCT);
+    if (args.calleev().isPrimitive() || !(callee = &args.callee())->getParent()) {
+        js_ReportIsNotFunction(cx, &args.calleev(), JSV2F_CONSTRUCT);
         return false;
     }
 
     /* Handle the fast-constructors cases before falling into the general case . */
     Class *clasp = callee->getClass();
     JSFunction *fun = NULL;
     if (clasp == &js_FunctionClass) {
         fun = callee->getFunctionPrivate();
@@ -1386,17 +1385,17 @@ InvokeConstructorWithGivenThis(JSContext
                                uintN argc, Value *argv, Value *rval)
 {
     LeaveTrace(cx);
 
     InvokeArgsGuard args;
     if (!cx->stack().pushInvokeArgs(cx, argc, &args))
         return JS_FALSE;
 
-    args.callee() = fval;
+    args.calleev() = fval;
     /* Initialize args.thisv on all paths below. */
     memcpy(args.argv(), argv, argc * sizeof(Value));
 
     /* Handle the fast-constructor cases before calling the general case. */
     JSObject &callee = fval.toObject();
     Class *clasp = callee.getClass();
     JSFunction *fun;
     bool ok;
@@ -1411,36 +1410,16 @@ InvokeConstructorWithGivenThis(JSContext
         ok = Invoke(cx, args, JSINVOKE_CONSTRUCT);
     }
 
     *rval = args.rval();
     return ok;
 }
 
 bool
-DirectEval(JSContext *cx, uint32 argc, Value *vp)
-{
-    JS_ASSERT(vp == cx->regs->sp - argc - 2);
-    JS_ASSERT(vp[0].isObject());
-    JS_ASSERT(vp[0].toObject().isFunction());
-
-    JSStackFrame *caller = cx->fp();
-    JS_ASSERT(caller->isScriptFrame());
-    JS_ASSERT(IsBuiltinEvalForScope(&caller->scopeChain(), vp[0]));
-    AutoFunctionCallProbe callProbe(cx, vp[0].toObject().getFunctionPrivate(), caller->script());
-
-    JSObject *scopeChain =
-        GetScopeChainFast(cx, caller, JSOP_EVAL, JSOP_EVAL_LENGTH + JSOP_LINENO_LENGTH);
-    if (!scopeChain || !EvalKernel(cx, argc, vp, DIRECT_EVAL, caller, scopeChain))
-        return false;
-    cx->regs->sp = vp + 1;
-    return true;
-}
-
-bool
 ValueToId(JSContext *cx, const Value &v, jsid *idp)
 {
     int32_t i;
     if (ValueFitsInInt32(v, &i) && INT_FITS_IN_JSID(i)) {
         *idp = INT_TO_JSID(i);
         return true;
     }
 
@@ -1587,17 +1566,17 @@ js::GetUpvar(JSContext *cx, uintN closur
     uintN slot = cookie.slot();
     Value *vp;
 
     if (!fp->isFunctionFrame() || fp->isEvalFrame()) {
         vp = fp->slots() + fp->numFixed();
     } else if (slot < fp->numFormalArgs()) {
         vp = fp->formalArgs();
     } else if (slot == UpvarCookie::CALLEE_SLOT) {
-        vp = &fp->calleeValue();
+        vp = &fp->calleev();
         slot = 0;
     } else {
         slot -= fp->numFormalArgs();
         JS_ASSERT(slot < fp->numSlots());
         vp = fp->slots();
     }
 
     return vp[slot];
@@ -2168,30 +2147,30 @@ AssertValidPropertyCacheHit(JSContext *c
 
     const Shape *shape = (Shape *) prop;
     if (entry->vword.isSlot()) {
         JS_ASSERT(entry->vword.toSlot() == shape->slot);
         JS_ASSERT(!shape->isMethod());
     } else if (entry->vword.isShape()) {
         JS_ASSERT(entry->vword.toShape() == shape);
         JS_ASSERT_IF(shape->isMethod(),
-                     &shape->methodObject() == &pobj->nativeGetSlot(shape->slot).toObject());
+                     shape->methodObject() == pobj->nativeGetSlot(shape->slot).toObject());
     } else {
         Value v;
         JS_ASSERT(entry->vword.isFunObj());
         JS_ASSERT(!entry->vword.isNull());
         JS_ASSERT(pobj->brandedOrHasMethodBarrier());
         JS_ASSERT(shape->hasDefaultGetterOrIsMethod());
         JS_ASSERT(pobj->containsSlot(shape->slot));
         v = pobj->nativeGetSlot(shape->slot);
-        JS_ASSERT(&entry->vword.toFunObj() == &v.toObject());
+        JS_ASSERT(entry->vword.toFunObj() == v.toObject());
 
         if (shape->isMethod()) {
             JS_ASSERT(js_CodeSpec[*regs.pc].format & JOF_CALLOP);
-            JS_ASSERT(&shape->methodObject() == &v.toObject());
+            JS_ASSERT(shape->methodObject() == v.toObject());
         }
     }
 
     return true;
 }
 
 #else
 # define ASSERT_VALID_PROPERTY_CACHE_HIT(pcoff,obj,pobj,entry) ((void) 0)
@@ -2937,17 +2916,17 @@ BEGIN_CASE(JSOP_ENTERWITH)
      *
      * We set sp[-1] to the current "with" object to help asserting the
      * enter/leave balance in [leavewith].
      */
     regs.sp[-1].setObject(regs.fp->scopeChain());
 END_CASE(JSOP_ENTERWITH)
 
 BEGIN_CASE(JSOP_LEAVEWITH)
-    JS_ASSERT(&regs.sp[-1].toObject() == &regs.fp->scopeChain());
+    JS_ASSERT(regs.sp[-1].toObject() == regs.fp->scopeChain());
     regs.sp--;
     js_LeaveWith(cx);
 END_CASE(JSOP_LEAVEWITH)
 
 BEGIN_CASE(JSOP_RETURN)
     POP_RETURN_VALUE();
     /* FALL THROUGH */
 
@@ -4080,49 +4059,59 @@ do_incop:
     if (regs.sp[-1].isUndefined() &&
         !cx->addTypePropertyId(obj->getType(), id, types::TYPE_UNDEFINED)) {
         goto error;
     }
 
     const JSCodeSpec *cs = &js_CodeSpec[op];
     JS_ASSERT(cs->ndefs == 1);
     JS_ASSERT((cs->format & JOF_TMPSLOT_MASK) >= JOF_TMPSLOT2);
+
+    uint32 format = cs->format;
+    uint32 setPropFlags = (JOF_MODE(format) == JOF_NAME)
+                          ? JSRESOLVE_ASSIGNING
+                          : JSRESOLVE_ASSIGNING | JSRESOLVE_QUALIFIED;
+
     Value &ref = regs.sp[-1];
     int32_t tmp;
     if (JS_LIKELY(ref.isInt32() && CanIncDecWithoutOverflow(tmp = ref.toInt32()))) {
-        int incr = (cs->format & JOF_INC) ? 1 : -1;
-        if (cs->format & JOF_POST)
+        int incr = (format & JOF_INC) ? 1 : -1;
+        if (format & JOF_POST)
             ref.getInt32Ref() = tmp + incr;
         else
             ref.getInt32Ref() = tmp += incr;
-        regs.fp->setAssigning();
-        JSBool ok = obj->setProperty(cx, id, &ref, script->strictModeCode);
-        regs.fp->clearAssigning();
-        if (!ok)
-            goto error;
+
+        {
+            JSAutoResolveFlags rf(cx, setPropFlags);
+            if (!obj->setProperty(cx, id, &ref, script->strictModeCode))
+                goto error;
+        }
 
         /*
          * We must set regs.sp[-1] to tmp for both post and pre increments
          * as the setter overwrites regs.sp[-1].
          */
         ref.setInt32(tmp);
     } else {
         /* We need an extra root for the result. */
         PUSH_NULL();
         if (!js_DoIncDec(cx, cs, &regs.sp[-2], &regs.sp[-1]))
             goto error;
+
         if (!cx->typeMonitorAssign(obj, id, regs.sp[-1]))
             goto error;
         if (!script->typeMonitorOverflow(cx, regs.pc))
             goto error;
-        regs.fp->setAssigning();
-        JSBool ok = obj->setProperty(cx, id, &regs.sp[-1], script->strictModeCode);
-        regs.fp->clearAssigning();
-        if (!ok)
-            goto error;
+
+        {
+            JSAutoResolveFlags rf(cx, setPropFlags);
+            if (!obj->setProperty(cx, id, &regs.sp[-1], script->strictModeCode))
+                goto error;
+        }
+
         regs.sp--;
     }
 
     if (cs->nuses == 0) {
         /* regs.sp[-1] already contains the result of name increment. */
     } else {
         regs.sp[-1 - cs->nuses] = regs.sp[-1];
         regs.sp -= cs->nuses;
@@ -4189,41 +4178,41 @@ BEGIN_CASE(JSOP_LOCALINC)
             goto error;
     }
     len = JSOP_INCARG_LENGTH;
     JS_ASSERT(len == js_CodeSpec[op].length);
     DO_NEXT_OP(len);
 }
 
 BEGIN_CASE(JSOP_THIS)
-    if (!regs.fp->computeThis(cx))
+    if (!ComputeThis(cx, regs.fp))
         goto error;
     PUSH_COPY(regs.fp->thisValue());
 END_CASE(JSOP_THIS)
 
 BEGIN_CASE(JSOP_UNBRANDTHIS)
 {
-    if (!regs.fp->computeThis(cx))
+    if (!ComputeThis(cx, regs.fp))
         goto error;
     Value &thisv = regs.fp->thisValue();
     if (thisv.isObject()) {
         JSObject *obj = &thisv.toObject();
         if (obj->isNative())
             obj->unbrand(cx);
     }
 }
 END_CASE(JSOP_UNBRANDTHIS)
 
 {
     JSObject *obj;
     Value *vp;
     jsint i;
 
 BEGIN_CASE(JSOP_GETTHISPROP)
-    if (!regs.fp->computeThis(cx))
+    if (!ComputeThis(cx, regs.fp))
         goto error;
     i = 0;
     PUSH_COPY(regs.fp->thisValue());
     goto do_getprop_body;
 
 BEGIN_CASE(JSOP_GETARGPROP)
 {
     i = ARGNO_LEN;
@@ -4805,18 +4794,20 @@ END_CASE(JSOP_NEW)
 BEGIN_CASE(JSOP_EVAL)
 {
     argc = GET_ARGC(regs.pc);
     vp = regs.sp - (argc + 2);
 
     if (!IsBuiltinEvalForScope(&regs.fp->scopeChain(), *vp))
         goto call_using_invoke;
 
-    if (!DirectEval(cx, argc, vp))
+    if (!DirectEval(cx, CallArgsFromVp(argc, vp)))
         goto error;
+
+    regs.sp = vp + 1;
 }
 END_CASE(JSOP_EVAL)
 
 BEGIN_CASE(JSOP_CALL)
 BEGIN_CASE(JSOP_FUNAPPLY)
 BEGIN_CASE(JSOP_FUNCALL)
 {
     argc = GET_ARGC(regs.pc);
@@ -4838,17 +4829,17 @@ BEGIN_CASE(JSOP_FUNCALL)
             }
 
             /* Restrict recursion of lightweight functions. */
             if (JS_UNLIKELY(inlineCallCount >= JS_MAX_INLINE_CALL_COUNT)) {
                 js_ReportOverRecursed(cx);
                 goto error;
             }
 
-            if (!cx->typeMonitorCall(CallArgs(vp + 2, argc), flags & JSFRAME_CONSTRUCTING))
+            if (!cx->typeMonitorCall(CallArgsFromVp(argc, vp), flags & JSFRAME_CONSTRUCTING))
                 goto error;
 
             bool newType = (flags & JSFRAME_CONSTRUCTING) &&
                 cx->typeInferenceEnabled() && UseNewType(cx, script, regs.pc);
 
             /* Get pointer to new frame/slots, prepare arguments. */
             StackSpace &stack = cx->stack();
             JSStackFrame *newfp = stack.getInlineFrame(cx, regs.sp, argc, newfun,
--- a/js/src/jsinterp.h
+++ b/js/src/jsinterp.h
@@ -74,36 +74,35 @@ enum JSInterpMode
     JSINTERP_SAFEPOINT         =     2, /* interpreter should leave on a method JIT safe point */
     JSINTERP_PROFILE           =     3  /* interpreter should profile a loop */
 };
 
 /* Flags used in JSStackFrame::flags_ */
 enum JSFrameFlags
 {
     /* Primary frame type */
-    JSFRAME_GLOBAL             =     0x1, /* frame pushed for a global script */
-    JSFRAME_FUNCTION           =     0x2, /* frame pushed for a scripted call */
-    JSFRAME_DUMMY              =     0x4, /* frame pushed for bookkeeping */
+    JSFRAME_GLOBAL             =      0x1, /* frame pushed for a global script */
+    JSFRAME_FUNCTION           =      0x2, /* frame pushed for a scripted call */
+    JSFRAME_DUMMY              =      0x4, /* frame pushed for bookkeeping */
 
     /* Frame subtypes */
-    JSFRAME_EVAL               =     0x8, /* frame pushed for eval() or debugger eval */
-    JSFRAME_DEBUGGER           =    0x10, /* frame pushed for debugger eval */
-    JSFRAME_GENERATOR          =    0x20, /* frame is associated with a generator */
-    JSFRAME_FLOATING_GENERATOR =    0x40, /* frame is is in generator obj, not on stack */
-    JSFRAME_CONSTRUCTING       =    0x80, /* frame is for a constructor invocation */
+    JSFRAME_EVAL               =      0x8, /* frame pushed for eval() or debugger eval */
+    JSFRAME_DEBUGGER           =     0x10, /* frame pushed for debugger eval */
+    JSFRAME_GENERATOR          =     0x20, /* frame is associated with a generator */
+    JSFRAME_FLOATING_GENERATOR =     0x40, /* frame is is in generator obj, not on stack */
+    JSFRAME_CONSTRUCTING       =     0x80, /* frame is for a constructor invocation */
 
     /* Temporary frame states */
-    JSFRAME_ASSIGNING          =   0x100, /* not-JOF_ASSIGNING op is assigning */
-    JSFRAME_YIELDING           =   0x200, /* js::Interpret dispatched JSOP_YIELD */
-    JSFRAME_FINISHED_IN_INTERPRETER = 0x400, /* set if frame finished in Interpret() */
+    JSFRAME_YIELDING           =    0x200, /* js::Interpret dispatched JSOP_YIELD */
+    JSFRAME_FINISHED_IN_INTERP =    0x400, /* set if frame finished in Interpret() */
 
     /* Concerning function arguments */
-    JSFRAME_OVERRIDE_ARGS      =  0x1000, /* overridden arguments local variable */
-    JSFRAME_OVERFLOW_ARGS      =  0x2000, /* numActualArgs > numFormalArgs */
-    JSFRAME_UNDERFLOW_ARGS     =  0x4000, /* numActualArgs < numFormalArgs */
+    JSFRAME_OVERRIDE_ARGS      =   0x1000, /* overridden arguments local variable */
+    JSFRAME_OVERFLOW_ARGS      =   0x2000, /* numActualArgs > numFormalArgs */
+    JSFRAME_UNDERFLOW_ARGS     =   0x4000, /* numActualArgs < numFormalArgs */
 
     /* Lazy frame initialization */
     JSFRAME_HAS_IMACRO_PC      =   0x8000, /* frame has imacpc value available */
     JSFRAME_HAS_CALL_OBJ       =  0x10000, /* frame has a callobj reachable from scopeChain_ */
     JSFRAME_HAS_ARGS_OBJ       =  0x20000, /* frame has an argsobj in JSStackFrame::args */
     JSFRAME_HAS_HOOK_DATA      =  0x40000, /* frame has hookData_ set */
     JSFRAME_HAS_ANNOTATION     =  0x80000, /* frame has annotation_ set */
     JSFRAME_HAS_RVAL           = 0x100000, /* frame has rval_ set */
@@ -488,46 +487,48 @@ struct JSStackFrame
     }
 
     js::Value &thisValue() const {
         if (flags_ & (JSFRAME_EVAL | JSFRAME_GLOBAL))
             return ((js::Value *)this)[-1];
         return formalArgs()[-1];
     }
 
-    inline bool computeThis(JSContext *cx);
-
     /*
      * Callee
      *
      * Only function frames have a callee. An eval frame in a function has the
      * same caller as its containing function frame.
      */
 
-    js::Value &calleeValue() const {
+    js::Value &calleev() const {
         JS_ASSERT(isFunctionFrame());
         if (isEvalFrame())
             return ((js::Value *)this)[-2];
         return formalArgs()[-2];
     }
 
     JSObject &callee() const {
         JS_ASSERT(isFunctionFrame());
-        return calleeValue().toObject();
+        return calleev().toObject();
     }
 
     JSObject *maybeCallee() const {
         return isFunctionFrame() ? &callee() : NULL;
     }
 
+    js::CallReceiver callReceiver() const {
+        return js::CallReceiverFromArgv(formalArgs());
+    }
+
     /*
      * getValidCalleeObject is a fallible getter to compute the correct callee
      * function object, which may require deferred cloning due to the JSObject
      * methodReadBarrier. For a non-function frame, return true with *vp set
-     * from calleeValue, which may not be an object (it could be undefined).
+     * from calleev, which may not be an object (it could be undefined).
      */
     bool getValidCalleeObject(JSContext *cx, js::Value *vp);
 
     /*
      * Scope chain
      *
      * Every frame has a scopeChain which, when traversed via the 'parent' link
      * to the root, indicates the current global object. A 'call object' is a
@@ -567,16 +568,30 @@ struct JSStackFrame
 
     inline JSObject &callObj() const;
     inline void setScopeChainNoCallObj(JSObject &obj);
     inline void setScopeChainWithOwnCallObj(JSObject &obj);
 
     inline void markActivationObjectsAsPut();
 
     /*
+     * Frame compartment
+     *
+     * A stack frame's compartment is the frame's containing context's
+     * compartment when the frame was pushed.
+     */
+
+    JSCompartment *compartment() const {
+        JS_ASSERT_IF(isScriptFrame(), scopeChain().compartment() == script()->compartment);
+        return scopeChain().compartment();
+    }
+
+    inline JSPrincipals *principals(JSContext *cx) const;
+
+    /*
      * Imacropc
      *
      * A frame's IMacro pc is the bytecode address when an imacro started
      * executing (guaranteed non-null). An imacro does not push a frame, so
      * when the imacro finishes, the frame's IMacro pc becomes the current pc.
      */
 
     bool hasImacropc() const {
@@ -740,46 +755,34 @@ struct JSStackFrame
     bool hasOverflowArgs() const {
         return !!(flags_ & JSFRAME_OVERFLOW_ARGS);
     }
 
     void setOverriddenArgs() {
         flags_ |= JSFRAME_OVERRIDE_ARGS;
     }
 
-    bool isAssigning() const {
-        return !!(flags_ & JSFRAME_ASSIGNING);
-    }
-
-    void setAssigning() {
-        flags_ |= JSFRAME_ASSIGNING;
-    }
-
-    void clearAssigning() {
-        flags_ &= ~JSFRAME_ASSIGNING;
-    }
-
     bool isYielding() {
         return !!(flags_ & JSFRAME_YIELDING);
     }
 
     void setYielding() {
         flags_ |= JSFRAME_YIELDING;
     }
 
     void clearYielding() {
         flags_ &= ~JSFRAME_YIELDING;
     }
 
     void setFinishedInInterpreter() {
-        flags_ |= JSFRAME_FINISHED_IN_INTERPRETER;
+        flags_ |= JSFRAME_FINISHED_IN_INTERP;
     }
 
     bool finishedInInterpreter() const {
-        return !!(flags_ & JSFRAME_FINISHED_IN_INTERPRETER);
+        return !!(flags_ & JSFRAME_FINISHED_IN_INTERP);
     }
 
     /*
      * Variables object accessors
      *
      * A stack frame's 'varobj' refers to the 'variables object' (ES3 term)
      * associated with the Execution Context's VariableEnvironment (ES5 10.3).
      *
@@ -951,43 +954,32 @@ ScriptEpilogueOrGeneratorYield(JSContext
 
 extern void
 ScriptDebugPrologue(JSContext *cx, JSStackFrame *fp);
 
 extern bool
 ScriptDebugEpilogue(JSContext *cx, JSStackFrame *fp, bool ok);
 
 /*
- * For a call's vp (which necessarily includes callee at vp[0] and the original
- * specified |this| at vp[1]), convert null/undefined |this| into the global
- * object for the callee and replace other primitives with boxed versions. The
- * callee must not be strict mode code.
+ * For a given |call|, convert null/undefined |this| into the global object for
+ * the callee and replace other primitives with boxed versions. This assumes
+ * that call.callee() is not strict mode code. This is the special/slow case of
+ * ComputeThis.
  */
 extern bool
-BoxThisForVp(JSContext *cx, js::Value *vp);
+BoxNonStrictThis(JSContext *cx, const CallReceiver &call);
 
 /*
- * Abstracts the layout of the stack passed to natives from the engine and from
- * natives to js::Invoke.
+ * Ensure that fp->thisValue() is the correct value of |this| for the scripted
+ * call represented by |fp|. ComputeThis is necessary because fp->thisValue()
+ * may be set to 'undefined' when 'this' should really be the global object (as
+ * an optimization to avoid global-this computation).
  */
-struct CallArgs
-{
-    Value *argv_;
-    uintN argc_;
-    CallArgs() {}
-    CallArgs(Value *argv, uintN argc) : argv_(argv), argc_(argc) {}
-  public:
-    Value *base() const { return argv_ - 2; }
-    Value &callee() const { return argv_[-2]; }
-    Value &thisv() const { return argv_[-1]; }
-    Value &operator[](unsigned i) const { JS_ASSERT(i < argc_); return argv_[i]; }
-    Value *argv() const { return argv_; }
-    uintN argc() const { return argc_; }
-    Value &rval() const { return argv_[-2]; }
-};
+inline bool
+ComputeThis(JSContext *cx, JSStackFrame *fp);
 
 /*
  * The js::InvokeArgumentsGuard passed to js_Invoke must come from an
  * immediately-enclosing successful call to js::StackSpace::pushInvokeArgs,
  * i.e., there must have been no un-popped pushes to cx->stack(). Furthermore,
  * |args.getvp()[0]| should be the callee, |args.getvp()[1]| should be |this|,
  * and the range [args.getvp() + 2, args.getvp() + 2 + args.getArgc()) should
  * be initialized actual arguments.
@@ -1069,39 +1061,21 @@ extern JS_REQUIRES_STACK bool
 InvokeConstructorWithGivenThis(JSContext *cx, JSObject *thisobj, const Value &fval,
                                uintN argc, Value *argv, Value *rval);
 
 extern bool
 ExternalInvokeConstructor(JSContext *cx, const Value &fval, uintN argc, Value *argv,
                           Value *rval);
 
 /*
- * Performs a direct eval for the given arguments, which must correspond to the
- * currently-executing stack frame, which must be a script frame. On completion
- * the result is returned in *vp and the JS stack pointer is adjusted.
- */
-extern JS_REQUIRES_STACK bool
-DirectEval(JSContext *cx, uint32 argc, Value *vp);
-
-/*
- * Performs a direct eval for the given arguments, which must correspond to the
- * currently-executing stack frame, which must be a script frame.  evalfun must
- * be the built-in eval function and must correspond to the callee in vp[0].
- * When this function succeeds it returns the result in *vp, adjusts the JS
- * stack pointer, and returns true.
- */
-extern JS_REQUIRES_STACK bool
-DirectEval(JSContext *cx, JSFunction *evalfun, uint32 argc, Value *vp);
-
-/*
  * Executes a script with the given scope chain in the context of the given
  * frame.
  */
 extern JS_FORCES_STACK bool
-Execute(JSContext *cx, JSObject *chain, JSScript *script,
+Execute(JSContext *cx, JSObject &chain, JSScript *script,
         JSStackFrame *prev, uintN flags, Value *result);
 
 /*
  * Execute the caller-initialized frame for a user-defined script or function
  * pointed to by cx->fp until completion or error.
  */
 extern JS_REQUIRES_STACK JS_NEVER_INLINE bool
 Interpret(JSContext *cx, JSStackFrame *stopFp, uintN inlineCallCount = 0, JSInterpMode mode = JSINTERP_NORMAL);
--- a/js/src/jsinterpinlines.h
+++ b/js/src/jsinterpinlines.h
@@ -129,17 +129,17 @@ JSStackFrame::resetInvokeCallFrame()
                            JSFRAME_OVERRIDE_ARGS |
                            JSFRAME_HAS_PREVPC |
                            JSFRAME_HAS_RVAL |
                            JSFRAME_HAS_SCOPECHAIN |
                            JSFRAME_HAS_ANNOTATION |
                            JSFRAME_HAS_HOOK_DATA |
                            JSFRAME_HAS_CALL_OBJ |
                            JSFRAME_HAS_ARGS_OBJ |
-                           JSFRAME_FINISHED_IN_INTERPRETER)));
+                           JSFRAME_FINISHED_IN_INTERP)));
 
     /*
      * Since the stack frame is usually popped after PutActivationObjects,
      * these bits aren't cleared. The activation objects must have actually
      * been put, though.
      */
     JS_ASSERT_IF(flags_ & JSFRAME_HAS_CALL_OBJ, callObj().getPrivate() == NULL);
     JS_ASSERT_IF(flags_ & JSFRAME_HAS_ARGS_OBJ, argsObj().getPrivate() == NULL);
@@ -368,39 +368,16 @@ struct CopyTo
 
 JS_ALWAYS_INLINE void
 JSStackFrame::clearMissingArgs()
 {
     if (flags_ & JSFRAME_UNDERFLOW_ARGS)
         SetValueRangeToUndefined(formalArgs() + numActualArgs(), formalArgsEnd());
 }
 
-inline bool
-JSStackFrame::computeThis(JSContext *cx)
-{
-    js::Value &thisv = thisValue();
-    if (thisv.isObject())
-        return true;
-    if (isFunctionFrame()) {
-        if (fun()->inStrictMode())
-            return true;
-        /*
-         * Eval function frames have their own |this| slot, which is a copy of the function's
-         * |this| slot. If we lazily wrap a primitive |this| in an eval function frame, the
-         * eval's frame will get the wrapper, but the function's frame will not. To prevent
-         * this, we always wrap a function's |this| before pushing an eval frame, and should
-         * thus never see an unwrapped primitive in a non-strict eval function frame.
-         */
-        JS_ASSERT(!isEvalFrame());
-    }
-    if (!js::BoxThisForVp(cx, &thisv - 1))
-        return false;
-    return true;
-}
-
 inline JSObject &
 JSStackFrame::varobj(js::StackSegment *seg) const
 {
     JS_ASSERT(seg->contains(this));
     return isFunctionFrame() ? callObj() : seg->getInitialVarObj();
 }
 
 inline JSObject &
@@ -578,16 +555,19 @@ inline bool
 InvokeSessionGuard::invoke(JSContext *cx) const
 {
     /* N.B. Must be kept in sync with Invoke */
 
     /* Refer to canonical (callee, this) for optimized() sessions. */
     formals_[-2] = savedCallee_;
     formals_[-1] = savedThis_;
 
+    /* Prevent spurious accessing-callee-after-rval assert. */
+    args_.calleeHasBeenReset();
+
     if (!optimized())
         return Invoke(cx, args_, 0);
 
 #ifdef JS_METHODJIT
     mjit::JITScript *jit = script_->getJIT(false /* !constructing */);
     if (!jit) {
         /* Watch in case the code was thrown away due a recompile. */
         mjit::CompileStatus status = mjit::TryCompile(cx, frame_.fp());
@@ -651,16 +631,37 @@ class PrimitiveBehavior<double> {
   public:
     static inline bool isType(const Value &v) { return v.isNumber(); }
     static inline double extract(const Value &v) { return v.toNumber(); }
     static inline Class *getClass() { return &js_NumberClass; }
 };
 
 } // namespace detail
 
+template <typename T>
+bool
+GetPrimitiveThis(JSContext *cx, Value *vp, T *v)
+{
+    typedef detail::PrimitiveBehavior<T> Behavior;
+
+    const Value &thisv = vp[1];
+    if (Behavior::isType(thisv)) {
+        *v = Behavior::extract(thisv);
+        return true;
+    }
+
+    if (thisv.isObject() && thisv.toObject().getClass() == Behavior::getClass()) {
+        *v = Behavior::extract(thisv.toObject().getPrimitiveThis());
+        return true;
+    }
+
+    ReportIncompatibleMethod(cx, vp, Behavior::getClass());
+    return false;
+}
+
 /*
  * Compute the implicit |this| parameter for a call expression where the callee
  * is an unqualified name reference.
  *
  * We can avoid computing |this| eagerly and push the implicit callee-coerced
  * |this| value, undefined, according to this decision tree:
  *
  * 1. If the called value, funval, is not an object, bind |this| to undefined.
@@ -728,35 +729,35 @@ ComputeImplicitThis(JSContext *cx, JSObj
     obj = obj->thisObject(cx);
     if (!obj)
         return false;
 
     vp->setObject(*obj);
     return true;
 }
 
-template <typename T>
-bool
-GetPrimitiveThis(JSContext *cx, Value *vp, T *v)
+inline bool
+ComputeThis(JSContext *cx, JSStackFrame *fp)
 {
-    typedef detail::PrimitiveBehavior<T> Behavior;
-
-    const Value &thisv = vp[1];
-    if (Behavior::isType(thisv)) {
-        *v = Behavior::extract(thisv);
+    Value &thisv = fp->thisValue();
+    if (thisv.isObject())
         return true;
+    if (fp->isFunctionFrame()) {
+        if (fp->fun()->inStrictMode())
+            return true;
+        /*
+         * Eval function frames have their own |this| slot, which is a copy of the function's
+         * |this| slot. If we lazily wrap a primitive |this| in an eval function frame, the
+         * eval's frame will get the wrapper, but the function's frame will not. To prevent
+         * this, we always wrap a function's |this| before pushing an eval frame, and should
+         * thus never see an unwrapped primitive in a non-strict eval function frame.
+         */
+        JS_ASSERT(!fp->isEvalFrame());
     }
-
-    if (thisv.isObject() && thisv.toObject().getClass() == Behavior::getClass()) {
-        *v = Behavior::extract(thisv.toObject().getPrimitiveThis());
-        return true;
-    }
-
-    ReportIncompatibleMethod(cx, vp, Behavior::getClass());
-    return false;
+    return BoxNonStrictThis(cx, fp->callReceiver());
 }
 
 /*
  * Return an object on which we should look for the properties of |value|.
  * This helps us implement the custom [[Get]] method that ES5's GetValue
  * algorithm uses for primitive values, without actually constructing the
  * temporary object that the specification does.
  * 
--- a/js/src/jsiter.cpp
+++ b/js/src/jsiter.cpp
@@ -85,17 +85,19 @@ using namespace js::gc;
 using namespace js::types;
 
 static void iterator_finalize(JSContext *cx, JSObject *obj);
 static void iterator_trace(JSTracer *trc, JSObject *obj);
 static JSObject *iterator_iterator(JSContext *cx, JSObject *obj, JSBool keysonly);
 
 Class js_IteratorClass = {
     "Iterator",
-    JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
+    JSCLASS_HAS_PRIVATE |
+    JSCLASS_CONCURRENT_FINALIZER |
+    JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
     PropertyStub,         /* addProperty */
     PropertyStub,         /* delProperty */
     PropertyStub,         /* getProperty */
     StrictPropertyStub,   /* setProperty */
     EnumerateStub,
     ResolveStub,
     ConvertStub,
     iterator_finalize,
@@ -430,17 +432,17 @@ NewIteratorObject(JSContext *cx, uintN f
          * parent. However, code in jstracer.cpp and elsewhere may find such a
          * native enumerator object via the stack and (as for all objects that
          * are not stillborn, with the exception of "NoSuchMethod" internal
          * helper objects) expect it to have a non-null map pointer, so we
          * share an empty Enumerator scope in the runtime.
          */
         JSObject *obj = js_NewGCObject(cx, FINALIZE_OBJECT0);
         if (!obj)
-            return false;
+            return NULL;
 
         EmptyShape *emptyEnumeratorShape = EmptyShape::getEmptyEnumeratorShape(cx);
         if (!emptyEnumeratorShape)
             return NULL;
         obj->init(cx, &js_IteratorClass, cx->getTypeEmpty(), NULL, NULL, false);
         obj->setMap(emptyEnumeratorShape);
         return obj;
     }
@@ -559,18 +561,16 @@ bool
 EnumeratedIdVectorToIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &props, Value *vp)
 {
     if (!(flags & JSITER_FOREACH))
         return VectorToKeyIterator(cx, obj, flags, props, vp);
 
     return VectorToValueIterator(cx, obj, flags, props, vp);
 }
 
-typedef Vector<uint32, 8> ShapeVector;
-
 static inline void
 UpdateNativeIterator(NativeIterator *ni, JSObject *obj)
 {
     // Update the object for which the native iterator is associated, so
     // SuppressDeletedPropertyHelper will recognize the iterator as a match.
     ni->obj = obj;
 }
 
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -843,16 +843,26 @@ obj_toStringHelper(JSContext *cx, JSObje
     chars[nchars] = 0;
 
     JSString *str = js_NewString(cx, chars, nchars);
     if (!str)
         cx->free_(chars);
     return str;
 }
 
+JSObject *
+NonNullObject(JSContext *cx, const Value &v)
+{
+    if (v.isPrimitive()) {
+        JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_NONNULL_OBJECT);
+        return NULL;
+    }
+    return &v.toObject();
+}
+
 }
 
 /* ES5 15.2.4.2.  Note steps 1 and 2 are errata. */
 static JSBool
 obj_toString(JSContext *cx, uintN argc, Value *vp)
 {
     Value &thisv = vp[1];
 
@@ -926,79 +936,29 @@ js_CheckContentSecurityPolicy(JSContext 
                      callbacks->contentSecurityPolicyAllows(cx));
 
         // update the cache in the global object for the result of the security check
         js_SetReservedSlot(cx, global, JSRESERVED_GLOBAL_EVAL_ALLOWED, v);
     }
     return !v.isFalse();
 }
 
-/*
- * Check whether principals subsumes scopeobj's principals, and return true
- * if so (or if scopeobj has no principals, for backward compatibility with
- * the JS API, which does not require principals), and false otherwise.
- */
-JSBool
-js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
-                         JSPrincipals *principals, JSAtom *caller)
-{
-    JSSecurityCallbacks *callbacks;
-    JSPrincipals *scopePrincipals;
-
-    callbacks = JS_GetSecurityCallbacks(cx);
-    if (callbacks && callbacks->findObjectPrincipals) {
-        scopePrincipals = callbacks->findObjectPrincipals(cx, scopeobj);
-        if (!principals || !scopePrincipals ||
-            !principals->subsume(principals, scopePrincipals)) {
-            JSAutoByteString callerstr;
-            if (!js_AtomToPrintableString(cx, caller, &callerstr))
-                return JS_FALSE;
-            JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
-                                 JSMSG_BAD_INDIRECT_CALL, callerstr.ptr());
-            return JS_FALSE;
-        }
-    }
-    return JS_TRUE;
-}
-
-static bool
-CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj)
-{
-    JSObject *inner = scopeobj;
+static void
+AssertScopeChainValidity(JSContext *cx, JSObject &scopeobj)
+{
+#ifdef DEBUG
+    JSObject *inner = &scopeobj;
     OBJ_TO_INNER_OBJECT(cx, inner);
-    if (!inner)
-        return false;
-    JS_ASSERT(inner == scopeobj);
-
-    /* XXX This is an awful gross hack. */
-    while (scopeobj) {
-        JSObjectOp op = scopeobj->getClass()->ext.innerObject;
-        if (op && op(cx, scopeobj) != scopeobj) {
-            JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_INDIRECT_CALL,
-                                 js_eval_str);
-            return false;
-        }
-        scopeobj = scopeobj->getParent();
-    }
-
-    return true;
-}
-
-const char *
-js_ComputeFilename(JSContext *cx, JSStackFrame *caller,
-                   JSPrincipals *principals, uintN *linenop)
-{
-    jsbytecode *pc = caller->pc(cx);
-    if (pc && js_GetOpcode(cx, caller->script(), pc) == JSOP_EVAL) {
-        JS_ASSERT(js_GetOpcode(cx, caller->script(), pc + JSOP_EVAL_LENGTH) == JSOP_LINENO);
-        *linenop = GET_UINT16(pc + JSOP_EVAL_LENGTH);
-    } else {
-        *linenop = js_FramePCToLineNumber(cx, caller);
-    }
-    return caller->script()->filename;
+    JS_ASSERT(inner && inner == &scopeobj);
+
+    for (JSObject *o = &scopeobj; o; o = o->getParent()) {
+        if (JSObjectOp op = o->getClass()->ext.innerObject)
+            JS_ASSERT(op(cx, o) == &scopeobj);
+    }
+#endif
 }
 
 #ifndef EVAL_CACHE_CHAIN_LIMIT
 # define EVAL_CACHE_CHAIN_LIMIT 4
 #endif
 
 static inline JSScript **
 EvalCacheHash(JSContext *cx, JSLinearString *str)
@@ -1014,17 +974,17 @@ EvalCacheHash(JSContext *cx, JSLinearStr
 
     h *= JS_GOLDEN_RATIO;
     h >>= 32 - JS_EVAL_CACHE_SHIFT;
     return &JS_SCRIPTS_TO_GC(cx)[h];
 }
 
 static JS_ALWAYS_INLINE JSScript *
 EvalCacheLookup(JSContext *cx, JSLinearString *str, JSStackFrame *caller, uintN staticLevel,
-                JSPrincipals *principals, JSObject *scopeobj, JSScript **bucket)
+                JSPrincipals *principals, JSObject &scopeobj, JSScript **bucket)
 {
     /*
      * Cache local eval scripts indexed by source qualified by scope.
      *
      * An eval cache entry should never be considered a hit unless its
      * strictness matches that of the new eval code. The existing code takes
      * care of this, because hits are qualified by the function from which
      * eval was called, whose strictness doesn't change. (We don't cache evals
@@ -1080,17 +1040,17 @@ EvalCacheLookup(JSContext *cx, JSLinearS
                             objarray = script->regexps();
                             i = 0;
                         } else {
                             EVAL_CACHE_METER(noscope);
                             i = -1;
                         }
                     }
                     if (i < 0 ||
-                        objarray->vector[i]->getParent() == scopeobj) {
+                        objarray->vector[i]->getParent() == &scopeobj) {
                         JS_ASSERT(staticLevel == script->staticLevel);
                         EVAL_CACHE_METER(hit);
                         *scriptp = script->u.nextToGC;
                         script->u.nextToGC = NULL;
                         return script;
                     }
                 }
             }
@@ -1099,254 +1059,340 @@ EvalCacheLookup(JSContext *cx, JSLinearS
         if (++count == EVAL_CACHE_CHAIN_LIMIT)
             return NULL;
         EVAL_CACHE_METER(step);
         scriptp = &script->u.nextToGC;
     }
     return NULL;
 }
 
-/* ES5 15.1.2.1. */
-static JSBool
-eval(JSContext *cx, uintN argc, Value *vp)
-{
-    /*
-     * NB: This method handles only indirect eval: direct eval is handled by
-     *     JSOP_EVAL.
-     */
-
-    JSStackFrame *caller = js_GetScriptedCaller(cx, NULL);
-
-    /* FIXME Bug 602994: This really should be perfectly cromulent. */
-    if (!caller) {
-        /* Eval code needs to inherit principals from the caller. */
-        JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
-                             JSMSG_BAD_INDIRECT_CALL, js_eval_str);
-        return false;
-    }
-
-    return EvalKernel(cx, argc, vp, INDIRECT_EVAL, caller, vp[0].toObject().getGlobal());
-}
-
-namespace js {
-
-bool
-EvalKernel(JSContext *cx, uintN argc, Value *vp, EvalType evalType, JSStackFrame *caller,
-           JSObject *scopeobj)
-{
-    /*
-     * FIXME Bug 602994: Calls with no scripted caller should be permitted and
-     *       should be implemented as indirect calls.
-     */
-    JS_ASSERT(caller);
-    JS_ASSERT(scopeobj);
-
-    /*
-     * We once supported a second argument to eval to use as the scope chain
-     * when evaluating the code string.  Warn when such uses are seen so that
-     * authors will know that support for eval(s, o) has been removed.
-     */
-    JSScript *callerScript = caller->script();
-    if (argc > 1 && !callerScript->warnedAboutTwoArgumentEval) {
-        static const char TWO_ARGUMENT_WARNING[] =
-            "Support for eval(code, scopeObject) has been removed. "
-            "Use |with (scopeObject) eval(code);| instead.";
-        if (!JS_ReportWarning(cx, TWO_ARGUMENT_WARNING))
-            return false;
-        callerScript->warnedAboutTwoArgumentEval = true;
-    }
+/*
+ * There are two things we want to do with each script executed in EvalKernel:
+ *  1. notify jsdbgapi about script creation/destruction
+ *  2. add the script to the eval cache when EvalKernel is finished
+ *
+ * NB: Although the eval cache keeps a script alive wrt to the JS engine, from
+ * a jsdbgapi user's perspective, we want each eval() to create and destroy a
+ * script. This hides implementation details and means we don't have to deal
+ * with calls to JS_GetScriptObject for scripts in the eval cache (currently,
+ * script->u.object aliases script->u.nextToGC).
+ */
+class EvalScriptGuard
+{
+    JSContext *cx_;
+    JSLinearString *str_;
+    JSScript **bucket_;
+    JSScript *script_;
+
+  public:
+    EvalScriptGuard(JSContext *cx, JSLinearString *str)
+      : cx_(cx),
+        str_(str),
+        script_(NULL) {
+        bucket_ = EvalCacheHash(cx, str);
+    }
+
+    ~EvalScriptGuard() {
+        if (script_) {
+            js_CallDestroyScriptHook(cx_, script_);
+            script_->isCachedEval = true;
+            script_->u.nextToGC = *bucket_;
+            *bucket_ = script_;
+#ifdef CHECK_SCRIPT_OWNER
+            script_->owner = NULL;
+#endif
+        }
+    }
+
+    void lookupInEvalCache(JSStackFrame *caller, uintN staticLevel,
+                           JSPrincipals *principals, JSObject &scopeobj) {
+        if (JSScript *found = EvalCacheLookup(cx_, str_, caller, staticLevel,
+                                              principals, scopeobj, bucket_)) {
+            js_CallNewScriptHook(cx_, found, NULL);
+            script_ = found;
+        }
+    }
+
+    void setNewScript(JSScript *script) {
+        /* NewScriptFromCG has already called js_CallNewScriptHook. */
+        JS_ASSERT(!script_ && script);
+        script_ = script;
+    }
+
+    bool foundScript() {
+        return !!script_;
+    }
+
+    JSScript *script() const {
+        JS_ASSERT(script_);
+        return script_;
+    }
+};
+
+/*
+ * Common code implementing direct and indirect eval.
+ *
+ * Evaluate call.argv[2], if it is a string, in the context of the given calling
+ * frame, with the provided scope chain, with the semantics of either a direct
+ * or indirect eval (see ES5 10.4.2).  If this is an indirect eval, scopeobj
+ * must be a global object.
+ *
+ * On success, store the completion value in call.rval and return true.
+ */
+enum EvalType { DIRECT_EVAL, INDIRECT_EVAL };
+
+static bool
+EvalKernel(JSContext *cx, const CallArgs &call, EvalType evalType, JSStackFrame *caller,
+           JSObject &scopeobj)
+{
+    JS_ASSERT((evalType == INDIRECT_EVAL) == (caller == NULL));
+    AssertScopeChainValidity(cx, scopeobj);
 
     /*
      * CSP check: Is eval() allowed at all?
      * Report errors via CSP is done in the script security mgr.
      */
-    if (!js_CheckContentSecurityPolicy(cx, scopeobj)) {
+    if (!js_CheckContentSecurityPolicy(cx, &scopeobj)) {
         JS_ReportError(cx, "call to eval() blocked by CSP");
         return false;
     }
 
     /* ES5 15.1.2.1 step 1. */
-    if (argc < 1) {
-        vp->setUndefined();
+    if (call.argc() < 1) {
+        call.rval().setUndefined();
         return true;
     }
-    if (!vp[2].isString()) {
-        *vp = vp[2];
+    if (!call[0].isString()) {
+        call.rval() = call[0];
         return true;
     }
-    JSString *str = vp[2].toString();
+    JSString *str = call[0].toString();
 
     /* ES5 15.1.2.1 steps 2-8. */
-    JSObject *callee = JSVAL_TO_OBJECT(JS_CALLEE(cx, Jsvalify(vp)));
-    JS_ASSERT(IsAnyBuiltinEval(callee->getFunctionPrivate()));
-    JSPrincipals *principals = js_EvalFramePrincipals(cx, callee, caller);
 
     /*
      * Per ES5, indirect eval runs in the global scope. (eval is specified this
      * way so that the compiler can make assumptions about what bindings may or
      * may not exist in the current frame if it doesn't see 'eval'.)
      */
     uintN staticLevel;
     if (evalType == DIRECT_EVAL) {
         staticLevel = caller->script()->staticLevel + 1;
 
 #ifdef DEBUG
         jsbytecode *callerPC = caller->pc(cx);
         JS_ASSERT_IF(caller->isFunctionFrame(), caller->fun()->isHeavyweight());
         JS_ASSERT(callerPC && js_GetOpcode(cx, caller->script(), callerPC) == JSOP_EVAL);
 #endif
     } else {
-        /* Pretend that we're top level. */
+        JS_ASSERT(call.callee().getGlobal() == &scopeobj);
         staticLevel = 0;
-
-        JS_ASSERT(scopeobj == scopeobj->getGlobal());
-        JS_ASSERT(scopeobj->isGlobal());
-    }
-
-    /* Ensure we compile this eval with the right object in the scope chain. */
-    if (!CheckScopeChainValidity(cx, scopeobj))
-        return false;
+    }
 
     JSLinearString *linearStr = str->ensureLinear(cx);
     if (!linearStr)
         return false;
     const jschar *chars = linearStr->chars();
     size_t length = linearStr->length();
 
     /*
      * If the eval string starts with '(' and ends with ')', it may be JSON.
      * Try the JSON parser first because it's much faster.  If the eval string
      * isn't JSON, JSON parsing will probably fail quickly, so little time
      * will be lost.
      */
     if (length > 2 && chars[0] == '(' && chars[length - 1] == ')') {
 #if USE_OLD_AND_BUSTED_JSON_PARSER
-        JSONParser *jp = js_BeginJSONParse(cx, vp, /* suppressErrors = */true);
+        Value tmp;
+        JSONParser *jp = js_BeginJSONParse(cx, &tmp, /* suppressErrors = */true);
         if (jp != NULL) {
             /* Run JSON-parser on string inside ( and ). */
             bool ok = js_ConsumeJSONText(cx, jp, chars + 1, length - 2);
             ok &= js_FinishJSONParse(cx, jp, NullValue());
-            if (ok)
+            if (ok) {
+                call.rval() = tmp;
                 return true;
-        }
+            }
 #else
         JSONSourceParser parser(cx, chars + 1, length - 2, JSONSourceParser::StrictJSON,
                                 JSONSourceParser::NoError);
-        if (!parser.parse(vp))
+        Value tmp;
+        if (!parser.parse(&tmp))
             return false;
-        if (!vp->isUndefined())
+        if (!tmp.isUndefined()) {
+            call.rval() = tmp;
             return true;
+        }
 #endif
     }
 
     /*
      * Direct calls to eval are supposed to see the caller's |this|. If we
      * haven't wrapped that yet, do so now, before we make a copy of it for
      * the eval code to use.
      */
-    if (evalType == DIRECT_EVAL && !caller->computeThis(cx))
+    if (evalType == DIRECT_EVAL && !ComputeThis(cx, caller))
         return false;
 
-    JSScript *script = NULL;
-    JSScript **bucket = EvalCacheHash(cx, linearStr);
-    if (evalType == DIRECT_EVAL && caller->isNonEvalFunctionFrame()) {
-        script = EvalCacheLookup(cx, linearStr, caller, staticLevel, principals, scopeobj, bucket);
-
-        /*
-         * Although the eval cache keeps a script alive from the perspective of
-         * the JS engine, from a jsdbgapi user's perspective each eval()
-         * creates and destroys a script. This hides implementation details and
-         * allows jsdbgapi clients to avoid calling JS_GetScriptObject after a
-         * script has been returned to the eval cache, which is invalid since
-         * script->u.object aliases script->u.nextToGC.
-         */
-        if (script) {
-            js_CallNewScriptHook(cx, script, NULL);
-            MUST_FLOW_THROUGH("destroy");
-        }
-    }
-
-    /*
-     * We can't have a callerFrame (down in js::Execute's terms) if we're in
-     * global code (or if we're an indirect eval).
-     */
-    JSStackFrame *callerFrame = (staticLevel != 0) ? caller : NULL;
-    if (!script) {
+    EvalScriptGuard esg(cx, linearStr);
+
+    JSPrincipals *principals = PrincipalsForCompiledCode(call, cx);
+
+    if (evalType == DIRECT_EVAL && caller->isNonEvalFunctionFrame())
+        esg.lookupInEvalCache(caller, staticLevel, principals, scopeobj);
+
+    if (!esg.foundScript()) {
         uintN lineno;
-        const char *filename = js_ComputeFilename(cx, caller, principals, &lineno);
-
+        const char *filename = CurrentScriptFileAndLine(cx, &lineno,
+                                                        evalType == DIRECT_EVAL
+                                                        ? CALLED_FROM_JSOP_EVAL
+                                                        : NOT_CALLED_FROM_JSOP_EVAL);
         uint32 tcflags = TCF_COMPILE_N_GO | TCF_NEED_MUTABLE_SCRIPT | TCF_COMPILE_FOR_EVAL;
-        script = Compiler::compileScript(cx, scopeobj, callerFrame,
-                                         principals, tcflags, chars, length,
-                                         filename, lineno, cx->findVersion(),
-                                         linearStr, staticLevel);
-        if (!script)
+        JSScript *compiled = Compiler::compileScript(cx, &scopeobj, caller, principals, tcflags,
+                                                     chars, length, filename, lineno,
+                                                     cx->findVersion(), linearStr, staticLevel);
+        if (!compiled)
             return false;
-    }
-
-    assertSameCompartment(cx, scopeobj, script);
-
-    /*
-     * Belt-and-braces: check that the lesser of eval's principals and the
-     * caller's principals has access to scopeobj.
-     */
-    JSBool ok = js_CheckPrincipalsAccess(cx, scopeobj, principals,
-                                         cx->runtime->atomState.evalAtom) &&
-                Execute(cx, scopeobj, script, callerFrame, JSFRAME_EVAL, vp);
-
-    MUST_FLOW_LABEL(destroy);
-    js_CallDestroyScriptHook(cx, script);
-
-    script->isCachedEval = true;
-    script->u.nextToGC = *bucket;
-    *bucket = script;
-#ifdef CHECK_SCRIPT_OWNER
-    script->owner = NULL;
-#endif
-
-    return ok;
+
+        esg.setNewScript(compiled);
+    }
+
+    return Execute(cx, scopeobj, esg.script(), caller, JSFRAME_EVAL, &call.rval());
+}
+
+/*
+ * We once supported a second argument to eval to use as the scope chain
+ * when evaluating the code string.  Warn when such uses are seen so that
+ * authors will know that support for eval(s, o) has been removed.
+ */
+static inline bool
+WarnOnTooManyArgs(JSContext *cx, const CallArgs &call)
+{
+    if (call.argc() > 1) {
+        if (JSStackFrame *caller = js_GetScriptedCaller(cx, NULL)) {
+            if (!caller->script()->warnedAboutTwoArgumentEval) {
+                static const char TWO_ARGUMENT_WARNING[] =
+                    "Support for eval(code, scopeObject) has been removed. "
+                    "Use |with (scopeObject) eval(code);| instead.";
+                if (!JS_ReportWarning(cx, TWO_ARGUMENT_WARNING))
+                    return false;
+                caller->script()->warnedAboutTwoArgumentEval = true;
+            }
+        } else {
+            /*
+             * In the case of an indirect call without a caller frame, avoid a
+             * potential warning-flood by doing nothing.
+             */
+        }
+    }
+
+    return true;
+}
+
+/*
+ * ES5 15.1.2.1.
+ *
+ * NB: This method handles only indirect eval.
+ */
+static JSBool
+eval(JSContext *cx, uintN argc, Value *vp)
+{
+    CallArgs call = CallArgsFromVp(argc, vp);
+    return WarnOnTooManyArgs(cx, call) &&
+           EvalKernel(cx, call, INDIRECT_EVAL, NULL, *call.callee().getGlobal());
+}
+
+namespace js {
+
+bool
+DirectEval(JSContext *cx, const CallArgs &call)
+{
+    /* Direct eval can assume it was called from an interpreted frame. */
+    JSStackFrame *caller = cx->fp();
+    JS_ASSERT(caller->isScriptFrame());
+    JS_ASSERT(IsBuiltinEvalForScope(&caller->scopeChain(), call.calleev()));
+    JS_ASSERT(*cx->regs->pc == JSOP_EVAL);
+
+    AutoFunctionCallProbe callProbe(cx, call.callee().getFunctionPrivate(), caller->script());
+
+    JSObject *scopeChain =
+        GetScopeChainFast(cx, caller, JSOP_EVAL, JSOP_EVAL_LENGTH + JSOP_LINENO_LENGTH);
+
+    return scopeChain &&
+           WarnOnTooManyArgs(cx, call) &&
+           EvalKernel(cx, call, DIRECT_EVAL, caller, *scopeChain);
 }
 
 bool
 IsBuiltinEvalForScope(JSObject *scopeChain, const Value &v)
 {
     JSObject *global = scopeChain->getGlobal();
     JS_ASSERT((global->getClass()->flags & JSCLASS_GLOBAL_FLAGS) == JSCLASS_GLOBAL_FLAGS);
     return global->getReservedSlot(JSRESERVED_GLOBAL_EVAL) == v;
 }
 
 bool
 IsAnyBuiltinEval(JSFunction *fun)
 {
     return fun->maybeNative() == eval;
 }
 
-}
+JSPrincipals *
+PrincipalsForCompiledCode(const CallArgs &call, JSContext *cx)
+{
+    JS_ASSERT(IsAnyBuiltinEval(call.callee().getFunctionPrivate()) ||
+              IsBuiltinFunctionConstructor(call.callee().getFunctionPrivate()));
+
+    /*
+     * To compute the principals of the compiled eval/Function code, we simply
+     * use the callee's principals. To see why the caller's principals are
+     * ignored, consider first that, in the capability-model we assume, the
+     * high-privileged eval/Function should never have escaped to the
+     * low-privileged caller. (For the Mozilla embedding, this is brute-enforced
+     * by explicit filtering by wrappers.) Thus, the caller's privileges should
+     * subsume the callee's.
+     *
+     * In the converse situation, where the callee has lower privileges than the
+     * caller, we might initially guess that the caller would want to retain
+     * their higher privileges in the generated code. However, since the
+     * compiled code will be run with the callee's scope chain, this would make
+     * fp->script()->compartment() != fp->compartment().
+     */
+
+    JSPrincipals *calleePrincipals = call.callee().compartment()->principals;
+
+#ifdef DEBUG
+    if (calleePrincipals) {
+        if (JSStackFrame *caller = js_GetScriptedCaller(cx, NULL)) {
+            if (JSPrincipals *callerPrincipals = caller->principals(cx)) {
+                JS_ASSERT(callerPrincipals->subsume(callerPrincipals, calleePrincipals));
+            }
+        }
+    }
+#endif
+
+    return calleePrincipals;
+}
+
+}  /* namespace js */
 
 #if JS_HAS_OBJ_WATCHPOINT
 
 static JSBool
 obj_watch_handler(JSContext *cx, JSObject *obj, jsid id, jsval old,
                   jsval *nvp, void *closure)
 {
     JSObject *callable = (JSObject *) closure;
-    JSSecurityCallbacks *callbacks = JS_GetSecurityCallbacks(cx);
-    if (callbacks && callbacks->findObjectPrincipals) {
-        /* Skip over any obj_watch_* frames between us and the real subject. */
+    if (JSPrincipals *watcher = callable->principals(cx)) {
         if (JSStackFrame *caller = js_GetScriptedCaller(cx, NULL)) {
-            /*
-             * Only call the watch handler if the watcher is allowed to watch
-             * the currently executing script.
-             */
-            JSPrincipals *watcher = callbacks->findObjectPrincipals(cx, callable);
-            JSPrincipals *subject = js_StackFramePrincipals(cx, caller);
-
-            if (watcher && subject && !watcher->subsume(watcher, subject)) {
-                /* Silently don't call the watch handler. */
-                return true;
+            if (JSPrincipals *subject = caller->principals(cx)) {
+                if (!watcher->subsume(watcher, subject)) {
+                    /* Silently don't call the watch handler. */
+                    return JS_TRUE;
+                }
             }
         }
     }
 
     /* Avoid recursion on (obj, id) already being watched on cx. */
     AutoResolving resolving(cx, obj, id, AutoResolving::WATCH);
     if (resolving.alreadyStarted())
         return true;
@@ -1585,30 +1631,31 @@ js_PropertyIsEnumerable(JSContext *cx, J
 const char js_defineGetter_str[] = "__defineGetter__";
 const char js_defineSetter_str[] = "__defineSetter__";
 const char js_lookupGetter_str[] = "__lookupGetter__";
 const char js_lookupSetter_str[] = "__lookupSetter__";
 
 JS_FRIEND_API(JSBool)
 js_obj_defineGetter(JSContext *cx, uintN argc, Value *vp)
 {
-    if (!BoxThisForVp(cx, vp))
+    CallArgs call = CallArgsFromVp(argc, vp);
+    if (!BoxNonStrictThis(cx, call))
         return false;
-    JSObject *obj = &vp[1].toObject();
-
-    if (argc <= 1 || !js_IsCallable(vp[3])) {
+    JSObject *obj = &call.thisv().toObject();
+
+    if (argc <= 1 || !js_IsCallable(call[1])) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
                              JSMSG_BAD_GETTER_OR_SETTER,
                              js_getter_str);
         return JS_FALSE;
     }
-    PropertyOp getter = CastAsPropertyOp(&vp[3].toObject());
+    PropertyOp getter = CastAsPropertyOp(&call[1].toObject());
 
     jsid id;
-    if (!ValueToId(cx, vp[2], &id))
+    if (!ValueToId(cx, call[0], &id))
         return JS_FALSE;
     if (!CheckRedeclaration(cx, obj, id, JSPROP_GETTER))
         return JS_FALSE;
     /*
      * Getters and setters are just like watchpoints from an access
      * control point of view.
      */
     Value junk;
@@ -1616,38 +1663,39 @@ js_obj_defineGetter(JSContext *cx, uintN
     if (!CheckAccess(cx, obj, id, JSACC_WATCH, &junk, &attrs))
         return JS_FALSE;
 
     if (!cx->addTypePropertyId(obj->getType(), id, TYPE_UNKNOWN))
         return JS_FALSE;
     if (!cx->markTypePropertyConfigured(obj->getType(), id))
         return false;
 
-    vp->setUndefined();
+    call.rval().setUndefined();
     return obj->defineProperty(cx, id, UndefinedValue(), getter, StrictPropertyStub,
                                JSPROP_ENUMERATE | JSPROP_GETTER | JSPROP_SHARED);
 }
 
 JS_FRIEND_API(JSBool)
 js_obj_defineSetter(JSContext *cx, uintN argc, Value *vp)
 {
-    if (!BoxThisForVp(cx, vp))
+    CallArgs call = CallArgsFromVp(argc, vp);
+    if (!BoxNonStrictThis(cx, call))
         return false;
-    JSObject *obj = &vp[1].toObject();
-
-    if (argc <= 1 || !js_IsCallable(vp[3])) {
+    JSObject *obj = &call.thisv().toObject();
+
+    if (argc <= 1 || !js_IsCallable(call[1])) {
         JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
                              JSMSG_BAD_GETTER_OR_SETTER,
                              js_setter_str);
         return JS_FALSE;
     }
-    StrictPropertyOp setter = CastAsStrictPropertyOp(&vp[3].toObject());
+    StrictPropertyOp setter = CastAsStrictPropertyOp(&call[1].toObject());
 
     jsid id;
-    if (!ValueToId(cx, vp[2], &id))
+    if (!ValueToId(cx, call[0], &id))
         return JS_FALSE;
     if (!CheckRedeclaration(cx, obj, id, JSPROP_SETTER))
         return JS_FALSE;
     /*
      * Getters and setters are just like watchpoints from an access
      * control point of view.
      */
     Value junk;
@@ -1655,17 +1703,17 @@ js_obj_defineSetter(JSContext *cx, uintN
     if (!CheckAccess(cx, obj, id, JSACC_WATCH, &junk, &attrs))
         return JS_FALSE;
 
     if (!cx->addTypePropertyId(obj->getType(), id, TYPE_UNKNOWN))
         return JS_FALSE;
     if (!cx->markTypePropertyConfigured(obj->getType(), id))
         return false;
 
-    vp->setUndefined();
+    call.rval().setUndefined();
     return obj->defineProperty(cx, id, UndefinedValue(), PropertyStub, setter,
                                JSPROP_ENUMERATE | JSPROP_SETTER | JSPROP_SHARED);
 }
 
 static JSBool
 obj_lookupGetter(JSContext *cx, uintN argc, Value *vp)
 {
     jsid id;
@@ -3022,16 +3070,19 @@ NewObjectWithClassProto(JSContext *cx, C
 {
     JS_ASSERT(clasp->isNative());
     gc::FinalizeKind kind = gc::FinalizeKind(_kind);
 
     TypeObject *type = proto->getNewType(cx);
     if (!type)
         return NULL;
 
+    if (CanBeFinalizedInBackground(kind, clasp))
+        kind = (gc::FinalizeKind)(kind + 1);
+
     JSObject* obj = js_NewGCObject(cx, kind);
     if (!obj)
         return NULL;
 
     if (!obj->initSharingEmptyShape(cx, clasp, type, proto->getParent(), NULL, kind))
         return NULL;
     return obj;
 }
@@ -3208,17 +3259,17 @@ js_InferFlags(JSContext *cx, uintN defau
 
     JSScript *script;
     pc = fp->inlinepc(cx, &script);
 
     cs = &js_CodeSpec[js_GetOpcode(cx, script, pc)];
     format = cs->format;
     if (JOF_MODE(format) != JOF_NAME)
         flags |= JSRESOLVE_QUALIFIED;
-    if ((format & (JOF_SET | JOF_FOR)) || fp->isAssigning()) {
+    if (format & (JOF_SET | JOF_FOR)) {
         flags |= JSRESOLVE_ASSIGNING;
     } else if (cs->length >= 0) {
         pc += cs->length;
         if (pc < script->code + script->length && Detecting(cx, pc))
             flags |= JSRESOLVE_DETECTING;
     }
     if (format & JOF_DECLARING)
         flags |= JSRESOLVE_DECLARING;
@@ -5516,17 +5567,17 @@ js_NativeGetInline(JSContext *cx, JSObje
         JS_ASSERT(!vp->isMagic());
     } else {
         vp->setUndefined();
     }
     if (shape->hasDefaultGetter())
         return true;
 
     if (JS_UNLIKELY(shape->isMethod()) && (getHow & JSGET_NO_METHOD_BARRIER)) {
-        JS_ASSERT(&shape->methodObject() == &vp->toObject());
+        JS_ASSERT(shape->methodObject() == vp->toObject());
         return true;
     }
 
     sample = cx->runtime->propertyRemovals;
     {
         AutoShapeRooter tvr(cx, shape);
         AutoObjectRooter tvr2(cx, pobj);
         if (!shape->get(cx, receiver, obj, pobj, vp))
@@ -5985,26 +6036,26 @@ js_SetPropertyHelper(JSContext *cx, JSOb
              * Forget we found the proto-property now that we've copied any
              * needed member values.
              */
             shape = NULL;
         }
 
         JS_ASSERT_IF(shape && shape->isMethod(), pobj->hasMethodBarrier());
         JS_ASSERT_IF(shape && shape->isMethod(),
-                     &pobj->getSlot(shape->slot).toObject() == &shape->methodObject());
+                     pobj->getSlot(shape->slot).toObject() == shape->methodObject());
         if (shape && (defineHow & JSDNP_SET_METHOD)) {
             /*
              * JSOP_SETMETHOD is assigning to an existing own property. If it
              * is an identical method property, do nothing. Otherwise downgrade
              * to ordinary assignment. Either way, do not fill the property
              * cache, as the interpreter has no fast path for these unusual
              * cases.
              */
-            bool identical = shape->isMethod() && &shape->methodObject() == &vp->toObject();
+            bool identical = shape->isMethod() && shape->methodObject() == vp->toObject();
             if (!identical) {
                 shape = obj->methodShapeChange(cx, *shape);
                 if (!shape)
                     return false;
 
                 JSObject *funobj = &vp->toObject();
                 JSFunction *fun = funobj->getFunctionPrivate();
                 if (fun == funobj) {
@@ -6204,23 +6255,23 @@ js_DeleteProperty(JSContext *cx, JSObjec
             JSObject *funobj;
 
             if (IsFunctionObject(v, &funobj)) {
                 JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj);
 
                 if (fun != funobj) {
                     for (JSStackFrame *fp = cx->maybefp(); fp; fp = fp->prev()) {
                         if (fp->isFunctionFrame() &&
-                            &fp->callee() == &fun->compiledFunObj() &&
+                            fp->callee() == fun->compiledFunObj() &&
                             fp->thisValue().isObject())
                         {
                             JSObject *tmp = &fp->thisValue().toObject();
                             do {
                                 if (tmp == obj) {
-                                    fp->calleeValue().setObject(*funobj);
+                                    fp->calleev().setObject(*funobj);
                                     break;
                                 }
                             } while ((tmp = tmp->getProto()) != NULL);
                         }
                     }
                 }
             }
         }
@@ -6807,33 +6858,47 @@ js_PrintObjectSlotName(JSTracer *trc, ch
             PutEscapedString(buf, bufsize, JSID_TO_ATOM(id), 0);
         } else {
             JS_snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
         }
     }
 }
 #endif
 
-void
+static const Shape *
+LastConfigurableShape(JSObject *obj)
+{
+    for (Shape::Range r(obj->lastProperty()->all()); !r.empty(); r.popFront()) {
+        const Shape *shape = &r.front();
+        if (shape->configurable())
+            return shape;
+    }
+    return NULL;
+}
+
+bool
 js_ClearNative(JSContext *cx, JSObject *obj)
 {
-    /*
-     * Clear obj of all obj's properties. FIXME: we do not clear reserved slots
-     * lying below JSSLOT_FREE(clasp). JS_ClearScope does that.
-     */
-    if (!obj->nativeEmpty()) {
-        /* Now that we're done using real properties, clear obj. */
-        obj->clear(cx);
-
-        /* Clear slot values since obj->clear reset our shape to empty. */
-        uint32 freeslot = JSSLOT_FREE(obj->getClass());
-        uint32 n = obj->numSlots();
-        for (uint32 i = freeslot; i < n; ++i)
-            obj->setSlot(i, UndefinedValue());
-    }
+    /* Remove all configurable properties from obj. */
+    while (const Shape *shape = LastConfigurableShape(obj)) {
+        if (!obj->removeProperty(cx, shape->id))
+            return false;
+    }
+
+    /* Set all remaining writable plain data properties to undefined. */
+    for (Shape::Range r(obj->lastProperty()->all()); !r.empty(); r.popFront()) {
+        const Shape *shape = &r.front();
+        if (shape->isDataDescriptor() &&
+            shape->writable() &&
+            shape->hasDefaultSetter() &&
+            obj->containsSlot(shape->slot)) {
+            obj->setSlot(shape->slot, UndefinedValue());
+        }
+    }
+    return true;
 }
 
 bool
 js_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 slot, Value *vp)
 {
     if (!obj->isNative()) {
         vp->setUndefined();
         return true;
@@ -6867,16 +6932,17 @@ js_SetReservedSlot(JSContext *cx, JSObje
 }
 
 JSObject *
 JSObject::getGlobal() const
 {
     JSObject *obj = const_cast<JSObject *>(this);
     while (JSObject *parent = obj->getParent())
         obj = parent;
+    JS_ASSERT(obj->isGlobal());
     return obj;
 }
 
 JSBool
 js_ReportGetterOnlyAssignment(JSContext *cx)
 {
     return JS_ReportErrorFlagsAndNumber(cx,
                                         JSREPORT_WARNING | JSREPORT_STRICT |
@@ -7259,18 +7325,16 @@ js_DumpStackFrame(JSContext *cx, JSStack
         }
         fputc('\n', stderr);
 
         fprintf(stderr, "  flags:");
         if (fp->isConstructing())
             fprintf(stderr, " constructing");
         if (fp->hasOverriddenArgs())
             fprintf(stderr, " overridden_args");
-        if (fp->isAssigning())
-            fprintf(stderr, " assigning");
         if (fp->isDebuggerFrame())
             fprintf(stderr, " debugger");
         if (fp->isEvalFrame())
             fprintf(stderr, " eval");
         if (fp->isYielding())
             fprintf(stderr, " yielding");
         if (fp->isGeneratorFrame())
             fprintf(stderr, " generator");
--- a/js/src/jsobj.h
+++ b/js/src/jsobj.h
@@ -821,16 +821,19 @@ struct JSObject : js::gc::Cell {
         return privateData;
     }
 
     void setPrivate(void *data) {
         JS_ASSERT(getClass()->flags & JSCLASS_HAS_PRIVATE);
         privateData = data;
     }
 
+    /* N.B. Infallible: NULL means 'no principal', not an error. */
+    inline JSPrincipals *principals(JSContext *cx);
+
     /*
      * ES5 meta-object properties and operations.
      */
 
   private:
     enum ImmutabilityType { SEAL, FREEZE };
 
     /*
@@ -1430,30 +1433,42 @@ struct JSObject : js::gc::Cell {
     inline bool isClonedBlock() const;
     inline bool isCall() const;
     inline bool isRegExp() const;
     inline bool isScript() const;
     inline bool isXML() const;
     inline bool isXMLId() const;
     inline bool isNamespace() const;
     inline bool isQName() const;
+    inline bool isWeakMap() const;
 
     inline bool isProxy() const;
     inline bool isObjectProxy() const;
     inline bool isFunctionProxy() const;
 
     JS_FRIEND_API(bool) isWrapper() const;
     JS_FRIEND_API(JSObject *) unwrap(uintN *flagsp = NULL);
 
     inline void initArrayClass();
 };
 
 /* Check alignment for any fixed slots allocated after the object. */
 JS_STATIC_ASSERT(sizeof(JSObject) % sizeof(js::Value) == 0);
 
+/*
+ * The only sensible way to compare JSObject with == is by identity. We use
+ * const& instead of * as a syntactic way to assert non-null. This leads to an
+ * abundance of address-of operators to identity. Hence this overload.
+ */
+static JS_ALWAYS_INLINE bool
+operator==(const JSObject &lhs, const JSObject &rhs)
+{
+    return &lhs == &rhs;
+}
+
 inline js::Value*
 JSObject::fixedSlots() const {
     return (js::Value*) (jsuword(this) + sizeof(JSObject));
 }
 
 inline size_t
 JSObject::numFixedSlots() const
 {
@@ -2007,38 +2022,29 @@ extern JSBool
 js_TryValueOf(JSContext *cx, JSObject *obj, JSType type, js::Value *rval);
 
 extern JSBool
 js_XDRObject(JSXDRState *xdr, JSObject **objp);
 
 extern void
 js_PrintObjectSlotName(JSTracer *trc, char *buf, size_t bufsize);
 
-extern void
+extern bool
 js_ClearNative(JSContext *cx, JSObject *obj);
 
 extern bool
 js_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, js::Value *vp);
 
 extern bool
 js_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, const js::Value &v);
 
-extern JSBool
-js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
-                         JSPrincipals *principals, JSAtom *caller);
-
 /* For CSP -- checks if eval() and friends are allowed to run. */
 extern JSBool
 js_CheckContentSecurityPolicy(JSContext *cx, JSObject *scopeObj);
 
-/* NB: Infallible. */
-extern const char *
-js_ComputeFilename(JSContext *cx, JSStackFrame *caller,
-                   JSPrincipals *principals, uintN *linenop);
-
 extern JSBool
 js_ReportGetterOnlyAssignment(JSContext *cx);
 
 extern JS_FRIEND_API(JSBool)
 js_GetterOnlyPropertyStub(JSContext *cx, JSObject *obj, jsid id, JSBool strict, jsval *vp);
 
 #ifdef DEBUG
 JS_FRIEND_API(void) js_DumpChars(const jschar *s, size_t n);
@@ -2061,38 +2067,37 @@ js_Object(JSContext *cx, uintN argc, js:
 namespace js {
 
 extern bool
 SetProto(JSContext *cx, JSObject *obj, JSObject *proto, bool checkForCycles);
 
 extern JSString *
 obj_toStringHelper(JSContext *cx, JSObject *obj);
 
-enum EvalType { INDIRECT_EVAL, DIRECT_EVAL };
-
 /*
- * Common code implementing direct and indirect eval.
- *
- * Evaluate vp[2], if it is a string, in the context of the given calling
- * frame, with the provided scope chain, with the semantics of either a direct
- * or indirect eval (see ES5 10.4.2).  If this is an indirect eval, scopeobj
- * must be a global object.
- *
- * On success, store the completion value in *vp and return true.
+ * Performs a direct eval for the given arguments, which must correspond to the
+ * currently-executing stack frame, which must be a script frame. On completion
+ * the result is returned in call.rval.
  */
-extern bool
-EvalKernel(JSContext *cx, uintN argc, js::Value *vp, EvalType evalType, JSStackFrame *caller,
-           JSObject *scopeobj);
+extern JS_REQUIRES_STACK bool
+DirectEval(JSContext *cx, const CallArgs &call);
 
 /*
  * True iff |v| is the built-in eval function for the global object that
  * corresponds to |scopeChain|.
  */
 extern bool
 IsBuiltinEvalForScope(JSObject *scopeChain, const js::Value &v);
 
 /* True iff fun is a built-in eval function. */
 extern bool
 IsAnyBuiltinEval(JSFunction *fun);
 
+/* 'call' should be for the eval/Function native invocation. */
+extern JSPrincipals *
+PrincipalsForCompiledCode(const CallArgs &call, JSContext *cx);
+
+extern JSObject *
+NonNullObject(JSContext *cx, const Value &v);
+
 }
 
 #endif /* jsobj_h___ */
--- a/js/src/jsobjinlines.h
+++ b/js/src/jsobjinlines.h
@@ -48,16 +48,17 @@
 #include "jsiter.h"
 #include "jslock.h"
 #include "jsobj.h"
 #include "jsprobes.h"
 #include "jspropertytree.h"
 #include "jsproxy.h"
 #include "jsscope.h"
 #include "jsstaticcheck.h"
+#include "jstypedarray.h"
 #include "jsxml.h"
 
 /* Headers included for inline implementations used by this header. */
 #include "jsbool.h"
 #include "jscntxt.h"
 #include "jsnum.h"
 #include "jsinferinlines.h"
 #include "jsscopeinlines.h"
@@ -227,17 +228,17 @@ JSObject::setBlockOwnShape(JSContext *cx
  */
 inline const js::Shape *
 JSObject::methodReadBarrier(JSContext *cx, const js::Shape &shape, js::Value *vp)
 {
     JS_ASSERT(canHaveMethodBarrier());
     JS_ASSERT(hasMethodBarrier());
     JS_ASSERT(nativeContains(shape));
     JS_ASSERT(shape.isMethod());
-    JS_ASSERT(&shape.methodObject() == &vp->toObject());
+    JS_ASSERT(shape.methodObject() == vp->toObject());
     JS_ASSERT(shape.writable());
     JS_ASSERT(shape.slot != SHAPE_INVALID_SLOT);
     JS_ASSERT(shape.hasDefaultSetter() || shape.setterOp() == js_watch_set);
     JS_ASSERT(!isGlobal());  /* i.e. we are not changing the global shape */
 
     JSObject *funobj = &vp->toObject();
     JSFunction *fun = funobj->getFunctionPrivate();
     JS_ASSERT(fun == funobj);
@@ -738,17 +739,17 @@ JSObject::setFlatClosureUpvars(js::Value
     getFixedSlotRef(JSSLOT_FLAT_CLOSURE_UPVARS).setPrivate(upvars);
 }
 
 inline bool
 JSObject::hasMethodObj(const JSObject& obj) const
 {
     return JSSLOT_FUN_METHOD_OBJ < numSlots() &&
            getFixedSlot(JSSLOT_FUN_METHOD_OBJ).isObject() &&
-           &getFixedSlot(JSSLOT_FUN_METHOD_OBJ).toObject() == &obj;
+           getFixedSlot(JSSLOT_FUN_METHOD_OBJ).toObject() == obj;
 }
 
 inline void
 JSObject::setMethodObj(JSObject& obj)
 {
     getFixedSlotRef(JSSLOT_FUN_METHOD_OBJ).setObject(obj);
 }
 
@@ -973,20 +974,16 @@ JSObject::init(JSContext *cx, js::Class 
 
     setType(type);
     setParent(parent);
 }
 
 inline void
 JSObject::finish(JSContext *cx)
 {
-#ifdef DEBUG
-    if (isNative())
-        JS_LOCK_RUNTIME_VOID(cx->runtime, cx->runtime->liveObjectProps -= propertyCount());
-#endif
     if (hasSlotsArray())
         cx->free_(slots);
 }
 
 inline bool
 JSObject::initSharingEmptyShape(JSContext *cx,
                                 js::Class *aclasp,
                                 js::types::TypeObject *type,
@@ -1019,16 +1016,45 @@ JSObject::hasProperty(JSContext *cx, jsi
 }
 
 inline bool
 JSObject::isCallable()
 {
     return isFunction() || getClass()->call;
 }
 
+inline JSPrincipals *
+JSObject::principals(JSContext *cx)
+{
+    JSPrincipals *compPrincipals = compartment()->principals;
+#ifdef DEBUG
+    if (!compPrincipals)
+        return NULL;
+
+    /*
+     * Assert that the compartment's principals are either the same or
+     * equivalent to those we would find through security hooks.
+     */
+    JSSecurityCallbacks *cb = JS_GetSecurityCallbacks(cx);
+    if (JSObjectPrincipalsFinder finder = cb ? cb->findObjectPrincipals : NULL) {
+        JSPrincipals *hookPrincipals = finder(cx, this);
+        JS_ASSERT(hookPrincipals == compPrincipals ||
+                  (hookPrincipals->subsume(hookPrincipals, compPrincipals) &&
+                   compPrincipals->subsume(compPrincipals, hookPrincipals)));
+    }
+#endif
+    return compPrincipals;
+}
+
+inline JSPrincipals *
+JSStackFrame::principals(JSContext *cx) const
+{
+    return scopeChain().principals(cx);
+}
+
 static inline bool
 js_IsCallable(const js::Value &v)
 {
     return v.isObject() && v.toObject().isCallable();
 }
 
 inline bool
 JSObject::isStaticBlock() const
@@ -1124,37 +1150,58 @@ InitScopeForObject(JSContext* cx, JSObje
     return true;
 
   bad:
     /* The GC nulls map initially. It should still be null on error. */
     JS_ASSERT(!obj->map);
     return false;
 }
 
+static inline bool
+CanBeFinalizedInBackground(gc::FinalizeKind kind, Class *clasp)
+{
+    JS_ASSERT(kind <= gc::FINALIZE_OBJECT_LAST);
+    /* If the class has no finalizer or a finalizer that is safe to call on
+     * a different thread, we change the finalize kind. For example,
+     * FINALIZE_OBJECT0 calls the finalizer on the main thread,
+     * FINALIZE_OBJECT0_BACKGROUND calls the finalizer on the gcHelperThread.
+     * kind % 2 prevents from recursivly incrementing the finalize kind because
+     * we can call NewObject with a background finalize kind.
+     */
+    if (kind % 2 == 0 && (!clasp->finalize || clasp->flags & JSCLASS_CONCURRENT_FINALIZ