Bug 810373 - Add shared stubcode support to baseline compiler. r=jandem
authorKannan Vijayan <kvijayan@mozilla.com>
Mon, 12 Nov 2012 14:55:00 -0500
changeset 127068 ba510446fef6ffbf32bc2bca92e414c7f0b3f603
parent 127067 973074c7d7ce48a154a2906ebd30b8c8efb43e04
child 127069 9af743a5b908864e170751dbd54f8ac0e1ccf8fa
push idunknown
push userunknown
push dateunknown
reviewersjandem
bugs810373
milestone19.0a1
Bug 810373 - Add shared stubcode support to baseline compiler. r=jandem
js/src/ion/BaselineIC.h
js/src/ion/Ion.cpp
js/src/ion/IonCompartment.h
js/src/ion/x86/BaselineIC-x86.cpp
js/src/jsweakcache.h
--- a/js/src/ion/BaselineIC.h
+++ b/js/src/ion/BaselineIC.h
@@ -337,19 +337,30 @@ class ICStubCompiler
 
     // By default the stubcode key is just the kind.
     virtual int32_t getKey() const {
         return static_cast<int32_t>(kind);
     }
 
     virtual IonCode *generateStubCode() = 0;
     IonCode *getStubCode() {
-        // TODO: Check stubcode cache with getKey(), and if none present
-        //       then generate a new stub and store it in the cache.
-        return generateStubCode();
+        IonCompartment *ion = cx->compartment->ionCompartment();
+        uint32_t stubKey = getKey();
+        IonCode *stubCode = ion->getStubCode(stubKey);
+        if (stubCode)
+            return stubCode;
+
+        Rooted<IonCode *> newStubCode(cx, generateStubCode());
+        if (!newStubCode)
+            return NULL;
+
+        if (!ion->putStubCode(stubKey, newStubCode))
+            return NULL;
+
+        return newStubCode;
     }
 
     ICStubCompiler(JSContext *cx, ICStub::Kind kind)
       : cx(cx), kind(kind) {}
 
     // Helper to generate an stubcall IonCode from a VMFunction wrapper.
     IonCode *generateVMWrapper(const VMFunction &fun) {
         IonCompartment *ion = cx->compartment->ionCompartment();
--- a/js/src/ion/Ion.cpp
+++ b/js/src/ion/Ion.cpp
@@ -141,16 +141,20 @@ IonCompartment::initialize(JSContext *cx
     execAlloc_ = cx->runtime->getExecAlloc(cx);
     if (!execAlloc_)
         return false;
 
     functionWrappers_ = cx->new_<VMWrapperMap>(cx);
     if (!functionWrappers_ || !functionWrappers_->init())
         return false;
 
+    stubCodes_ = cx->new_<ICStubCodeMap>(cx);
+    if (!stubCodes_ || !stubCodes_->init())
+        return false;
+
     return true;
 }
 
 void
 ion::FinishOffThreadBuilder(IonBuilder *builder)
 {
     if (builder->script()->isIonCompilingOffThread()) {
         types::TypeCompartment &types = builder->script()->compartment()->types;
--- a/js/src/ion/IonCompartment.h
+++ b/js/src/ion/IonCompartment.h
@@ -25,16 +25,17 @@ typedef void (*EnterIonCode)(void *code,
 class IonActivation;
 class IonBuilder;
 
 typedef Vector<IonBuilder*, 0, SystemAllocPolicy> OffThreadCompilationVector;
 
 class IonCompartment
 {
     typedef WeakCache<const VMFunction *, ReadBarriered<IonCode> > VMWrapperMap;
+    typedef WeakValueCache<uint32_t, ReadBarriered<IonCode> > ICStubCodeMap;
 
     friend class IonActivation;
 
     // Executable allocator (owned by the runtime).
     JSC::ExecutableAllocator *execAlloc_;
 
     // Trampoline for entering JIT code. Contains OSR prologue.
     ReadBarriered<IonCode> enterJIT_;
@@ -53,16 +54,19 @@ class IonCompartment
     ReadBarriered<IonCode> invalidator_;
 
     // Thunk that calls the GC pre barrier.
     ReadBarriered<IonCode> preBarrier_;
 
     // Map VMFunction addresses to the IonCode of the wrapper.
     VMWrapperMap *functionWrappers_;
 
+    // Map ICStub keys to ICStub shared code objects.
+    ICStubCodeMap *stubCodes_;
+
     // Any scripts for which off thread compilation has successfully finished,
     // failed, or been cancelled. All off thread compilations which are started
     // will eventually appear in this list asynchronously. Protected by the
     // runtime's analysis lock.
     OffThreadCompilationVector finishedOffThreadCompilations_;
 
     // Keep track of memoryregions that are going to be flushed.
     AutoFlushCache *flusher_;
@@ -73,16 +77,30 @@ class IonCompartment
     IonCode *generateArgumentsRectifier(JSContext *cx);
     IonCode *generateBailoutTable(JSContext *cx, uint32 frameClass);
     IonCode *generateBailoutHandler(JSContext *cx);
     IonCode *generateInvalidator(JSContext *cx);
     IonCode *generatePreBarrier(JSContext *cx);
 
   public:
     IonCode *generateVMWrapper(JSContext *cx, const VMFunction &f);
+    IonCode *getStubCode(uint32_t key) {
+        ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
+        if (p)
+            return p->value;
+        return NULL;
+    }
+    bool putStubCode(uint32_t key, Handle<IonCode *> stubCode) {
+        // Make sure to do a lookupForAdd(key) and then insert into that slot, because
+        // that way if stubCode gets moved due to a GC caused by lookupForAdd, then
+        // we still write the correct pointer.
+        JS_ASSERT(!stubCodes_->has(key));
+        ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
+        return stubCodes_->add(p, key, stubCode.get());
+    }
 
     OffThreadCompilationVector &finishedOffThreadCompilations() {
         return finishedOffThreadCompilations_;
     }
 
   public:
     bool initialize(JSContext *cx);
     IonCompartment();
--- a/js/src/ion/x86/BaselineIC-x86.cpp
+++ b/js/src/ion/x86/BaselineIC-x86.cpp
@@ -35,17 +35,16 @@ ICCompare_Int32::Compiler::generateStubC
     MacroAssembler masm;
 
     // Guard that R0 is an integer and R1 is an integer.
     Label failure;
     masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
     masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
 
     // Compare payload regs of R0 and R1.
-    Register scratchReg = BaselineTailCallReg;
     masm.cmpl(R0.payloadReg(), R1.payloadReg());
     masm.setCC(cond, R0.payloadReg());
     masm.movzxbl(R0.payloadReg(), R0.payloadReg());
 
     // Box the result and return
     masm.boxNonDouble(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
     masm.ret();
 
--- a/js/src/jsweakcache.h
+++ b/js/src/jsweakcache.h
@@ -65,11 +65,44 @@ class WeakCache : public HashMap<Key, Va
 
             // Assert that IsMarked() did not perform relocation.
             JS_ASSERT(k == r.front().key);
         }
 #endif
     }
 };
 
+// A WeakValueCache is similar to a WeakCache, except keys are never marked.
+// This is useful for weak maps where the keys are primitive values such as uint32_t.
+template <class Key, class Value,
+          class HashPolicy = DefaultHasher<Key>,
+          class AllocPolicy = RuntimeAllocPolicy>
+class WeakValueCache : public HashMap<Key, Value, HashPolicy, AllocPolicy> {
+  private:
+    typedef HashMap<Key, Value, HashPolicy, AllocPolicy> Base;
+    typedef typename Base::Range Range;
+    typedef typename Base::Enum Enum;
+
+  public:
+    explicit WeakValueCache(JSRuntime *rt) : Base(rt) { }
+    explicit WeakValueCache(JSContext *cx) : Base(cx) { }
+
+  public:
+    // Sweep all entries which have unmarked key or value.
+    void sweep(FreeOp *fop) {
+        // Remove all entries whose values remain unmarked.
+        for (Enum e(*this); !e.empty(); e.popFront()) {
+            if (!gc::IsMarked(e.front().value))
+                e.removeFront();
+        }
+
+#if DEBUG
+        // Once we've swept, all remaining edges should stay within the
+        // known-live part of the graph.
+        for (Range r = Base::all(); !r.empty(); r.popFront())
+            JS_ASSERT(gc::IsMarked(r.front().value));
+#endif
+    }
+};
+
 } // namespace js
 
 #endif // jsweakcache_h___