Merge.
authorRobert Sayre <sayrer@gmail.com>
Mon, 28 Sep 2009 19:15:23 -0700
changeset 33553 f2dc27eaf94d38d078981b5dee02ab098adc711b
parent 33552 ec1343f5ed9d543b4532d5ea9811fa77eb4de362 (current diff)
parent 33551 baa35356288e82f388043d1fcffe4f9a39ebb66a (diff)
child 33554 87ba7979644b4025320fea28598ddaa7fe59b0b0
push idunknown
push userunknown
push dateunknown
milestone1.9.3a1pre
Merge.
--- a/js/src/jstracer.cpp
+++ b/js/src/jstracer.cpp
@@ -844,85 +844,56 @@ TraceRecorder::tprint(const char *format
 #endif
 
 /*
  * The entire VM shares one oracle. Collisions and concurrent updates are
  * tolerated and worst case cause performance regressions.
  */
 static Oracle oracle;
 
-/*
- * This confusing and mysterious expression is used for the Tracker. The
- * tracker's responsibility is to map opaque, 4-byte aligned addresses to LIns
- * pointers. To do this efficiently, we observe that the addresses of jsvals
- * living in the interpreter tend to be aggregated close to each other -
- * usually on the same page (where a tracker page doesn't have to be the same
- * size as the OS page size, but it's typically similar).
- *
- * For every address, we split it into two values: upper bits which represent
- * the "base", and lower bits which represent an offset against the base. We
- * create a list of:
- *   struct TrackerPage {
- *      void* base;
- *      LIns* map;
- *   };
- * The mapping then becomes:
- *   page = page such that Base(address) == page->base,
- *   page->map[Index(address)]
- *
- * The size of the map is allocated as N * sizeof(LIns*), where N is
- * (TRACKER_PAGE_SIZE >> 2).  Since the lower two bits are 0, they are always
- * discounted.
- *
- * TRACKER_PAGE_MASK is the "reverse" expression, with a |- 1| to get a mask
- * which separates an address into the Base and Index bits. It is necessary to
- * do all this work rather than use TRACKER_PAGE_SIZE - 1, because on 64-bit
- * platforms the pointer width is twice as large, and only half as many
- * indexes can fit into TrackerPage::map. So the "Base" grows by one bit, and
- * the "Index" shrinks by one bit.
- */
-#define TRACKER_PAGE_MASK (((TRACKER_PAGE_SIZE / sizeof(void*)) << 2) - 1)
-
-#define TRACKER_PAGE_SIZE   4096
-
 Tracker::Tracker()
 {
-    pagelist = 0;
+    pagelist = NULL;
 }
 
 Tracker::~Tracker()
 {
     clear();
 }
 
-jsuword
+inline jsuword
 Tracker::getTrackerPageBase(const void* v) const
 {
-    return jsuword(v) & ~jsuword(TRACKER_PAGE_MASK);
+    return jsuword(v) & ~TRACKER_PAGE_MASK;
+}
+
+inline jsuword
+Tracker::getTrackerPageOffset(const void* v) const
+{
+    return (jsuword(v) & TRACKER_PAGE_MASK) >> 2;
 }
 
 struct Tracker::TrackerPage*
 Tracker::findTrackerPage(const void* v) const
 {
     jsuword base = getTrackerPageBase(v);
     struct Tracker::TrackerPage* p = pagelist;
     while (p) {
-        if (p->base == base) {
+        if (p->base == base)
             return p;
-        }
         p = p->next;
     }
-    return 0;
+    return NULL;
 }
 
 struct Tracker::TrackerPage*
-Tracker::addTrackerPage(const void* v) {
+Tracker::addTrackerPage(const void* v)
+{
     jsuword base = getTrackerPageBase(v);
-    struct Tracker::TrackerPage* p = (struct Tracker::TrackerPage*)
-        calloc(1, sizeof(*p) - sizeof(p->map) + (TRACKER_PAGE_SIZE >> 2) * sizeof(LIns*));
+    struct TrackerPage* p = (struct TrackerPage*) calloc(1, sizeof(*p));
     p->base = base;
     p->next = pagelist;
     pagelist = p;
     return p;
 }
 
 void
 Tracker::clear()
@@ -941,26 +912,26 @@ Tracker::has(const void *v) const
 }
 
 LIns*
 Tracker::get(const void* v) const
 {
     struct Tracker::TrackerPage* p = findTrackerPage(v);
     if (!p)
         return NULL;
-    return p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2];
+    return p->map[getTrackerPageOffset(v)];
 }
 
 void
 Tracker::set(const void* v, LIns* i)
 {
     struct Tracker::TrackerPage* p = findTrackerPage(v);
     if (!p)
         p = addTrackerPage(v);
-    p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2] = i;
+    p->map[getTrackerPageOffset(v)] = i;
 }
 
 static inline jsuint
 argSlots(JSStackFrame* fp)
 {
     return JS_MAX(fp->argc, fp->fun->nargs);
 }
 
--- a/js/src/jstracer.h
+++ b/js/src/jstracer.h
@@ -152,28 +152,47 @@ public:
 
     T* data() const {
         return _data;
     }
 };
 
 /*
  * Tracker is used to keep track of values being manipulated by the interpreter
- * during trace recording.  Note that tracker pages aren't necessarily the
- * same size as OS pages, they just are a moderate-sized chunk of memory.
+ * during trace recording.  It maps opaque, 4-byte aligned address to LIns pointers.
+ * pointers. To do this efficiently, we observe that the addresses of jsvals
+ * living in the interpreter tend to be aggregated close to each other -
+ * usually on the same page (where a tracker page doesn't have to be the same
+ * size as the OS page size, but it's typically similar).  The Tracker
+ * consists of a linked-list of structures representing a memory page, which
+ * are created on-demand as memory locations are used.
+ *
+ * For every address, first we split it into two parts: upper bits which
+ * represent the "base", and lower bits which represent an offset against the
+ * base.  For the offset, we then right-shift it by two because the bottom two
+ * bits of a 4-byte aligned address are always zero.  The mapping then
+ * becomes:
+ *
+ *   page = page in pagelist such that Base(address) == page->base,
+ *   page->map[Offset(address)]
  */
 class Tracker {
+    #define TRACKER_PAGE_SZB        4096
+    #define TRACKER_PAGE_ENTRIES    (TRACKER_PAGE_SZB >> 2)    // each slot is 4 bytes
+    #define TRACKER_PAGE_MASK       jsuword(TRACKER_PAGE_SZB - 1)
+
     struct TrackerPage {
         struct TrackerPage* next;
         jsuword             base;
-        nanojit::LIns*      map[1];
+        nanojit::LIns*      map[TRACKER_PAGE_ENTRIES];
     };
     struct TrackerPage* pagelist;
 
     jsuword             getTrackerPageBase(const void* v) const;
+    jsuword             getTrackerPageOffset(const void* v) const;
     struct TrackerPage* findTrackerPage(const void* v) const;
     struct TrackerPage* addTrackerPage(const void* v);
 public:
     Tracker();
     ~Tracker();
 
     bool            has(const void* v) const;
     nanojit::LIns*  get(const void* v) const;
--- a/js/src/nanojit/Allocator.cpp
+++ b/js/src/nanojit/Allocator.cpp
@@ -54,17 +54,17 @@ namespace nanojit
         reset();
     }
 
     void Allocator::reset()
     {
         Chunk *c = current_chunk;
         while (c) {
             Chunk *prev = c->prev;
-            this->freeChunk(c);
+            freeChunk(c);
             c = prev;
         }
         current_chunk = NULL;
         current_top = NULL;
         current_limit = NULL;
         postReset();
     }
 
--- a/js/src/nanojit/Allocator.h
+++ b/js/src/nanojit/Allocator.h
@@ -48,17 +48,17 @@ namespace nanojit
      *
      * allocations never return NULL.  The implementation of allocChunk()
      * is expected to perform a longjmp or exception when an allocation can't
      * proceed.
      */
     class Allocator {
     public:
         Allocator();
-        virtual ~Allocator();
+        ~Allocator();
         void reset();
 
         /** alloc memory, never return null. */
         void* alloc(size_t nbytes) {
             nbytes = (nbytes + 7) & ~7; // round up
             if (current_top + nbytes <= current_limit) {
                 void *p = current_top;
                 current_top += nbytes;
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -100,16 +100,19 @@ namespace nanojit
         , alloc(alloc)
         , _codeAlloc(codeAlloc)
         , _thisfrag(NULL)
         , _branchStateMap(alloc)
         , _patches(alloc)
         , _labels(alloc)
         , _epilogue(NULL)
         , _err(None)
+    #if PEDANTIC
+        , pedanticTop(NULL)
+    #endif
         , config(core->config)
     {
         VMPI_memset(&_stats, 0, sizeof(_stats));
         nInit(core);
         (void)logc;
         verbose_only( _logc = logc; )
         verbose_only( _outputCache = 0; )
         verbose_only( outlineEOL[0] = '\0'; )
@@ -332,20 +335,24 @@ namespace nanojit
 
     Register Assembler::findSpecificRegFor(LIns* i, Register w)
     {
         return findRegFor(i, rmask(w));
     }
 
     Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
     {
+    #if !PEDANTIC
         if (i->isop(LIR_alloc)) {
             d += findMemFor(i);
             return FP;
         }
+    #else
+        (void) d;
+    #endif
         return findRegFor(i, allow);
     }
 
     // Finds a register in 'allow' to hold the result of 'ins'.  Used when we
     // encounter a use of 'ins'.  The actions depend on the prior state of
     // 'ins':
     // - If the result of 'ins' is not in any register, we find an allowed
     //   one, evicting one if necessary.
--- a/js/src/nanojit/Assembler.h
+++ b/js/src/nanojit/Assembler.h
@@ -248,16 +248,19 @@ namespace nanojit
             LabelStateMap       _labels;
 
             NIns        *codeStart, *codeEnd;       // current block we're adding code to
             NIns        *exitStart, *exitEnd;       // current block for exit stubs
             NIns*       _nIns;          // current native instruction
             NIns*       _nExitIns;      // current instruction in exit fragment page
             NIns*       _epilogue;
             AssmError   _err;           // 0 = means assemble() appears ok, otherwise it failed
+        #if PEDANTIC
+            NIns*       pedanticTop;
+        #endif
 
             AR          _activation;
             RegAlloc    _allocator;
 
             bool        _inExit, vpad2[3];
 
             verbose_only( void asm_inc_m32(uint32_t*); )
             void        asm_setcc(Register res, LIns *cond);