Add valgrind request hooks for MMgc and configure.py support (r=edwsmith,sr=lhansen,bug=509020)
☠☠ backed out by 380125ceb164 ☠ ☠
authorTommy Reilly <treilly@adobe.com>
Mon, 27 Sep 2010 16:43:18 -0400
changeset 5272 0776d8b449c5a297991fb96e8c6ee1fe8c51aa80
parent 5271 05f2ed27e868e2939d12f0b200a019c2b411346b
child 5273 380125ceb1648604579289e1256f8234c57aa71d
push id2872
push usertreilly@adobe.com
push dateMon, 27 Sep 2010 20:52:06 +0000
reviewersedwsmith, lhansen
bugs509020
Add valgrind request hooks for MMgc and configure.py support (r=edwsmith,sr=lhansen,bug=509020)
MMgc/FixedAlloc-inlines.h
MMgc/FixedAlloc.cpp
MMgc/FixedMalloc.cpp
MMgc/GC.cpp
MMgc/GCAlloc.cpp
MMgc/GCHeap.cpp
MMgc/GCLargeAlloc.cpp
MMgc/GCObject.h
MMgc/MMgc.h
MMgc/Shared-inlines.h
MMgc/ZCT.cpp
build/avmfeatures.py
build/avmshell-common.xcconfig
configure.py
core/avmfeatures.as
core/avmfeatures.cpp
core/avmfeatures.h
platform/android/Makefile
shell/avmshell-features.h
--- a/MMgc/FixedAlloc-inlines.h
+++ b/MMgc/FixedAlloc-inlines.h
@@ -204,19 +204,27 @@ namespace MMgc
             if (m_firstFree)
                 m_firstFree->prevFree = 0;
         }
 
         item = GetUserPointer(item);
 
 #ifdef DEBUG
         // Fresh memory poisoning.
-        if((opts & kZero) == 0)
+        if ((opts & kZero) == 0 && !RUNNING_ON_VALGRIND)
             memset(item, uint8_t(GCHeap::FXFreshPoison), b->size - DebugSize());
 #endif
+
+        // Note that we'd like to use the requested size and not
+        // b->size but clients will use the slop after calling Size().
+        // Using the requested size and expanding to b->size via
+        // VALGRIND_MEMPOOL_CHANGE in Size() doesn't work because the
+        // scanner scans the full size (see bug 594756).
+        VALGRIND_MEMPOOL_ALLOC(b, item, b->size);
+
         if((opts & kZero) != 0)
             memset(item, 0, b->size - DebugSize());
 
 #ifdef MMGC_MEMORY_PROFILER
         if(m_heap->HooksEnabled())
             m_totalAskSize += size;
 #endif
 
@@ -236,16 +244,18 @@ namespace MMgc
         if(heap->GetProfiler())
             b->alloc->m_totalAskSize -= askSize;
 #endif
 
         item = GetRealPointer(item);
 
         FLPush(b->firstFree, item);
 
+        VALGRIND_MEMPOOL_FREE(b, GetUserPointer(item));
+        
         // 'b' was full but now it has a free spot, add it to the free block list.
         if (b->numAlloc == b->alloc->m_itemsPerBlock)
         {
             GCAssert(!b->nextFree && !b->prevFree);
             b->nextFree = b->alloc->m_firstFree;
             if (b->alloc->m_firstFree)
                 b->alloc->m_firstFree->prevFree = b;
             b->alloc->m_firstFree = b;
--- a/MMgc/FixedAlloc.cpp
+++ b/MMgc/FixedAlloc.cpp
@@ -185,32 +185,37 @@ namespace MMgc
 
         vmpi_spin_lock_t *lock = NULL;
         if(m_isFixedAllocSafe) {
             lock = &((FixedAllocSafe*)this)->m_spinlock;
             VMPI_lockRelease(lock);
         }
 
         FixedBlock* b = (FixedBlock*) m_heap->Alloc(1, GCHeap::kExpand | (canFail ? GCHeap::kCanFail : 0));
+        VALGRIND_CREATE_MEMPOOL(b,  0/*redZoneSize*/, 0/*zeroed*/);
+
+        // treat block header as allocation so reads write are okay
+        VALGRIND_MEMPOOL_ALLOC(b, b, (char*)b->items - (char*)b);
 
         if(lock != NULL)
             VMPI_lockAcquire(lock);
 
         if(!b)
             return;
 
         b->numAlloc = 0;
         b->size = (uint16_t)m_itemSize;
         b->firstFree = 0;
         b->nextItem = b->items;
         b->alloc = this;
 
 #ifdef DEBUG
         // Deleted and unused memory is poisoned, this is important for leak diagnostics.
-        VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
+        if (!RUNNING_ON_VALGRIND)
+            VMPI_memset(b->items, uint8_t(GCHeap::FXFreedPoison), m_itemSize * m_itemsPerBlock);
 #endif
 
         // Link the block at the end of the list.
         b->prev = m_lastBlock;
         b->next = 0;
         if (m_lastBlock)
             m_lastBlock->next = b;
         if (!m_firstBlock)
@@ -266,16 +271,19 @@ namespace MMgc
             VMPI_lockRelease(lock);
         }
 
         // Free the memory
         m_heap->FreeNoProfile(b);
 
         if(lock != NULL)
             VMPI_lockAcquire(lock);
+
+        VALGRIND_MEMPOOL_FREE(b, b);
+        VALGRIND_DESTROY_MEMPOOL(b);
     }
 
 #ifdef DEBUG
     bool FixedAlloc::QueryOwnsObject(const void* item)
     {
         const char* ci = (const char*) item;
         for ( FixedBlock* fb=m_firstBlock ; fb != NULL ; fb=fb->next )
             if (ci >= (const char*)fb->items && ci < (const char*)fb->items + m_itemsPerBlock*m_itemSize)
--- a/MMgc/FixedMalloc.cpp
+++ b/MMgc/FixedMalloc.cpp
@@ -225,35 +225,38 @@ namespace MMgc
         if((flags & kCanFail) != 0)
             gcheap_flags |= GCHeap::kCanFail;
         if((flags & kZero) != 0)
             gcheap_flags |= GCHeap::kZero;
 
         void *item = m_heap->Alloc(blocksNeeded, gcheap_flags);
         if(item)
         {
+            VALGRIND_CREATE_MEMPOOL(item, 0,  (flags & kZero) != 0);
 
             item = GetUserPointer(item);
 #ifdef MMGC_HOOKS
             if(m_heap->HooksEnabled())
                 m_heap->AllocHook(item, size - DebugSize(), Size(item));
 #endif // MMGC_HOOKS
 
             UpdateLargeAllocStats(item, blocksNeeded);
 
 #ifdef DEBUG
             // Fresh memory poisoning
-            if((flags & kZero) == 0)
+            if ((flags & kZero) == 0 && !RUNNING_ON_VALGRIND)
                 memset(item, uint8_t(GCHeap::FXFreshPoison), size - DebugSize());
 
 #ifndef AVMPLUS_SAMPLER
             // Enregister the large object
             AddToLargeObjectTracker(item);
 #endif
 #endif // DEBUG
+
+            VALGRIND_MEMPOOL_ALLOC(GetRealPointer(item), item, Size(item));
         }
         return item;
     }
 
     void FixedMalloc::LargeFree(void *item)
     {
 #if defined DEBUG && !defined AVMPLUS_SAMPLER
         RemoveFromLargeObjectTracker(item);
@@ -263,16 +266,18 @@ namespace MMgc
 #ifdef MMGC_HOOKS
         if(m_heap->HooksEnabled())
         {
             m_heap->FinalizeHook(item, Size(item));
             m_heap->FreeHook(item, Size(item), uint8_t(GCHeap::FXFreedPoison));
         }
 #endif
         m_heap->FreeNoProfile(GetRealPointer(item));
+        VALGRIND_MEMPOOL_FREE(GetRealPointer(item), item);
+        VALGRIND_DESTROY_MEMPOOL(GetRealPointer(item));
     }
 
     size_t FixedMalloc::LargeSize(const void *item)
     {
         return m_heap->Size(GetRealPointer(item)) * GCHeap::kBlockSize;
     }
 
     void *FixedMalloc::Calloc(size_t count, size_t elsize, FixedMallocOpts opts)
--- a/MMgc/GC.cpp
+++ b/MMgc/GC.cpp
@@ -997,16 +997,19 @@ namespace MMgc
         while(lb) {
             GCLargeAlloc::LargeBlock *next = GCLargeAlloc::Next(lb);
 #ifdef MMGC_HOOKS
             if(heap->HooksEnabled())
                 heap->FreeHook(GetUserPointer(lb+1), lb->size - DebugSize(), uint8_t(GCHeap::GCSweptPoison));
 #endif
             int numBlocks = lb->GetNumBlocks();
             sweepResults += numBlocks;
+            VALGRIND_MEMPOOL_FREE(lb, lb);
+            VALGRIND_MEMPOOL_FREE(lb, lb + 1);
+            VALGRIND_DESTROY_MEMPOOL(lb);
             FreeBlock(lb, numBlocks);
             lb = next;
         }
         largeEmptyPageList = NULL;
 
         if (heap->Config().eagerSweeping)
             SweepNeedsSweeping();
 
@@ -2055,16 +2058,21 @@ namespace MMgc
         }
 
         uintptr_t _memStart = pageMap.MemStart();
         uintptr_t _memEnd = pageMap.MemEnd();
 
         while(p < end)
         {
             uintptr_t val = *p++;
+#ifdef MMGC_VALGRIND
+            if (wi.HasInteriorPtrs()) {
+                VALGRIND_MAKE_MEM_DEFINED(&val, sizeof(val));
+            }
+#endif // MMGC_VALGRIND
 
             if(val < _memStart || val >= _memEnd)
                 continue;
 
 #ifdef MMGC_POINTINESS_PROFILING
             could_be_pointer++;
 #endif
 
--- a/MMgc/GCAlloc.cpp
+++ b/MMgc/GCAlloc.cpp
@@ -274,16 +274,22 @@ namespace MMgc
 
         GCBlock* b = (GCBlock*) m_gc->AllocBlock(1, PageMap::kGCAllocPage, /*zero*/true,  (flags&GC::kCanFail) != 0);
 
         if (b)
         {
             m_maxAlloc += m_itemsPerBlock;
             m_numBlocks++;
 
+            VALGRIND_CREATE_MEMPOOL(b, 0/*redZoneSize*/, 1/*zeroed*/);
+
+            // treat block header as a separate allocation
+            VALGRIND_MEMPOOL_ALLOC(b, b, sizeof(GCBlock));
+
+
             b->gc = m_gc;
             b->alloc = this;
             b->size = m_itemSize;
             b->slowFlags = 0;
             if(m_gc->collecting && m_finalized)
                 b->finalizeState = m_gc->finalizedValue;
             else
                 b->finalizeState = !m_gc->finalizedValue;
@@ -291,16 +297,21 @@ namespace MMgc
 #ifdef MMGC_FASTBITS
             b->bitsShift = m_bitsShift;
 #endif
             if (m_bitsInPage)
                 b->bits = (gcbits_t*)b + sizeof(GCBlock);
             else
                 b->bits = bits;
 
+            // ditto for in page bits
+            if (m_bitsInPage) {
+                VALGRIND_MEMPOOL_ALLOC(b, b->bits, m_numBitmapBytes);
+            }
+
             // Link the block at the end of the list
             b->prev = m_lastBlock;
             b->next = 0;
 
             if (m_lastBlock) {
                 m_lastBlock->next = b;
             }
             if (!m_firstBlock) {
@@ -401,20 +412,27 @@ namespace MMgc
 
     void GCAlloc::FreeChunk(GCBlock* b)
     {
         GCAssert(b->numFree == m_itemsPerBlock);
         if(!m_bitsInPage) {
             VMPI_memset(b->bits, 0, m_numBitmapBytes);
             m_gc->FreeBits((uint32_t*)(void *)b->bits, m_sizeClassIndex);
             b->bits = NULL;
+        } else {
+            // Only free bits if they were in page, see CreateChunk.
+            VALGRIND_MEMPOOL_FREE(b, b->bits);
         }
+        
+        VALGRIND_MEMPOOL_FREE(b, b);
 
         // Free the memory
         m_gc->FreeBlock(b, 1);
+
+        VALGRIND_DESTROY_MEMPOOL(b);
     }
 
 #if defined DEBUG || defined MMGC_MEMORY_PROFILER
     void* GCAlloc::Alloc(size_t askSize, int flags)
 #else
     void* GCAlloc::Alloc(int flags)
 #endif
     {
@@ -494,16 +512,18 @@ namespace MMgc
 #else
             heap->AllocHook(GetUserPointer(item), 0, userSize);
 #endif
         }
 #endif // MMGC_HOOKS
 
         m_qBudget++;
 
+        VALGRIND_MEMPOOL_ALLOC(b, item, m_itemSize);
+
         return item;
     }
 
 #if defined DEBUG || defined MMGC_MEMORY_PROFILER
     void *GCAlloc::AllocSlow(size_t askSize, int flags)
 #else
     void *GCAlloc::AllocSlow(int flags)
 #endif
@@ -660,53 +680,59 @@ namespace MMgc
         GCAssert(!(b->bits[bitsindex] & kHasWeakRef));
 
 #ifndef _DEBUG
         ClearNonRCObject((void*)item, b->size);
 #endif
 
         FLPush(m_qList, item);
 
+        VALGRIND_MEMPOOL_FREE(b, item);
+
         m_gc->SignalFreeWork(m_itemSize);
         if (--m_qBudget <= 0)
             QuickListBudgetExhausted();
     }
 
     void GCAlloc::FreeSlow(GCBlock* b, int bitsindex, const void* item)
     {
         if(b->bits[bitsindex] & kHasWeakRef)
             b->gc->ClearWeakRef(GetUserPointer(item));
 
 #ifndef _DEBUG
         ClearNonRCObject((void*)item, b->size);
 #endif
+        bool blockSwept = false;
 
         if (b->needsSweeping()) {
             // See comment in GCAlloc::Free
             //b->bits[bitsindex] = kFreelist;
 
             // We know that the quick list does not have any items from the block b,
             // so we can push the object onto the block's free list and sweep the block.
             // Make the quick list NULL while we do that.
             void* qList = m_qList;
             m_qList = NULL;
 
             FLPush(b->firstFree, item);
             b->numFree++;
-            Sweep(b);
+
+            blockSwept = Sweep(b);
 
             m_qList = qList;
         }
         else {
             *(void**)item = m_qList;
             m_qList = (void**)item;
 
             if (--m_qBudget <= 0)
                 QuickListBudgetExhausted();
         }
+        if (!blockSwept)
+            VALGRIND_MEMPOOL_FREE(b, item);
     }
 
     REALLY_INLINE void GCAlloc::ClearNonRCObject(void* item, size_t size)
     {
         // memset rest of item not including free list pointer, in _DEBUG
         // we poison the memory (and clear in Alloc)
         //
         // BTW, experiments show that clearing on alloc instead of on free
@@ -1106,16 +1132,17 @@ namespace MMgc
         numFree++;
 
         bits[bitsindex] = kFreelist;
 
 #ifndef _DEBUG
         alloc->ClearNonRCObject((void*)item, size);
 #endif
         FLPush(firstFree, item);
+        VALGRIND_MEMPOOL_FREE(this, item);
     }
 
     void GCAlloc::GetUsageInfo(size_t& totalAskSize, size_t& totalAllocated)
     {
         totalAskSize = totalAllocated = 0;
 
         GCBlock *b=m_firstBlock;
         while (b) {
--- a/MMgc/GCHeap.cpp
+++ b/MMgc/GCHeap.cpp
@@ -388,26 +388,44 @@ namespace MMgc
 
             m_oomHandling = saved_oomHandling;
         }
 
         GCAssert(Size(baseAddr) == size);
 
         // Zero out the memory, if requested to do so
         if (zero) {
+            // These pages may have been seen by valgrind before and
+            // they become unaddressable when we last called
+            // FREELIKE_BLOCK or MEMPOOL_DESTROY, use MAKE_MEM_DEFINED
+            // to silence write to freed memory errors.
+            VALGRIND_MAKE_MEM_DEFINED(baseAddr, size * kBlockSize);
             VMPI_memset(baseAddr, 0, size * kBlockSize);
+            // and then make the memory undefined again, we do this because
+            // we do this because either the VALGRIND_MALLOCLIKE_BLOCK call
+            // below will define it, or the suballocator will, ie this is
+            // here to keep the sub allocators honest.
+            VALGRIND_MAKE_MEM_UNDEFINED(baseAddr, size * kBlockSize);
         }
 
         // Fail the allocation if we're a "canFail" allocation that has pushed beyond one of our limits.
         if((flags & kCanFail) != 0 && (status == kMemSoftLimit || SoftLimitExceeded() || HardLimitExceeded() ))
         {
             FreeInternal(baseAddr, (flags & kProfile) != 0, m_oomHandling);
             return NULL;
         }
 
+        // We utilize the "profile" flag to tell the difference
+        // between client requests and sub-allocator requests.  Direct
+        // client requests are reported to valgrind here, sub
+        // allocators need to tell valgrind about memory themselves.
+        if ((flags & kProfile) != 0) {
+            VALGRIND_MALLOCLIKE_BLOCK(baseAddr, size * kBlockSize, 0, (flags&kZero) != 0);
+        }
+
         GCAssert(((uintptr_t)baseAddr >> kBlockShift) % alignment == 0);
         return baseAddr;
     }
 
     void *GCHeap::AllocHelper(size_t size, bool expand, bool& zero, size_t alignment)
     {
         // first try to find it in our existing free memory
         HeapBlock *block = AllocBlock(size, zero, alignment);
@@ -511,16 +529,19 @@ namespace MMgc
         }
 #endif
 
         if(block)
             FreeBlock(block);
         else
             LargeFree(item);
 
+        if (profile)
+            VALGRIND_FREELIKE_BLOCK(item, 0);
+
         m_oomHandling = saved_oomHandling;
     }
 
     void GCHeap::Decommit()
     {
         // keep at least initialSize free
         if(!config.returnMemory)
             return;
@@ -1560,17 +1581,18 @@ namespace MMgc
     }
 
     void GCHeap::FreeBlock(HeapBlock *block)
     {
         GCAssert(block->inUse());
 
 #ifdef _DEBUG
         // trash it. fb == free block
-        VMPI_memset(block->baseAddr, uint8_t(MMFreedPoison), block->size * kBlockSize);
+        if (!RUNNING_ON_VALGRIND)
+            VMPI_memset(block->baseAddr, uint8_t(MMFreedPoison), block->size * kBlockSize);
 #endif
 
         AddToFreeList(block, true);
     }
 
     void GCHeap::CheckForNewMaxTotalHeapSize()
     {
         // The guard on instance being non-NULL is a hack, to be fixed later (now=2009-07-20).
--- a/MMgc/GCLargeAlloc.cpp
+++ b/MMgc/GCLargeAlloc.cpp
@@ -79,16 +79,20 @@ namespace MMgc
 
         if (block)
         {
             gcbits_t flagbits0 = 0;
             gcbits_t flagbits1 = 0;
             flagbits0 |= ((flags&GC::kFinalize) != 0) ? kFinalizable : 0;
             flagbits1 |= ((flags&GC::kContainsPointers) != 0) ? kContainsPointers : 0;
             flagbits1 |= ((flags&GC::kRCObject) != 0) ? kRCObject : 0;
+
+            VALGRIND_CREATE_MEMPOOL(block, /*rdzone*/0, (flags&GC::kZero) != 0);
+            VALGRIND_MEMPOOL_ALLOC(block, block, sizeof(LargeBlock));
+
             block->gc = this->m_gc;
             block->alloc= this;
             block->next = m_blocks;
             block->size = computedSize;
 #ifdef MMGC_FASTBITS
             block->bitsShift = 12;     // Always use bits[0]
 #endif
             block->bits = block->flags;
@@ -98,26 +102,29 @@ namespace MMgc
 
             if(m_gc->collecting && !m_startedFinalize)
                 flagbits0 |= kMark;
 
             block->flags[0] = flagbits0;
             block->flags[1] = flagbits1;
 #ifdef _DEBUG
             (void)originalSize;
-            if (flags & GC::kZero)
+            if (flags & GC::kZero && !RUNNING_ON_VALGRIND)
             {
                 // AllocBlock should take care of this
                 for(int i=0, n=(int)(requestSize/sizeof(int)); i<n; i++) {
                     if(((int*)item)[i] != 0)
                         GCAssert(false);
                 }
             }
 #endif
 
+            // see comments in GCAlloc about using full size instead of ask size
+            VALGRIND_MEMPOOL_ALLOC(block, item, computedSize);
+
 #ifdef MMGC_HOOKS
             GCHeap* heap = GCHeap::GetGCHeap();
             if(heap->HooksEnabled()) {
                 size_t userSize = block->size - DebugSize();
 #ifdef MMGC_MEMORY_PROFILER
                 m_totalAskSize += originalSize;
                 heap->AllocHook(GetUserPointer(item), originalSize, userSize);
 #else
@@ -171,17 +178,21 @@ namespace MMgc
             m_gc->ClearWeakRef(GetUserPointer(item));
 
         LargeBlock **prev = &m_blocks;
         while(*prev)
         {
             if(b == *prev)
             {
                 *prev = Next(b);
-                m_gc->FreeBlock(b, b->GetNumBlocks());
+                size_t numBlocks = b->GetNumBlocks();
+                VALGRIND_MEMPOOL_FREE(b, b);
+                VALGRIND_MEMPOOL_FREE(b, item);
+                VALGRIND_DESTROY_MEMPOOL(b);
+                m_gc->FreeBlock(b, numBlocks);
                 return;
             }
             prev = (LargeBlock**)(&(*prev)->next);
         }
         GCAssertMsg(false, "Bad free!");
     }
 
     void GCLargeAlloc::ClearMarks()
--- a/MMgc/GCObject.h
+++ b/MMgc/GCObject.h
@@ -245,17 +245,17 @@ namespace MMgc
         void Pin()
         {
 #ifdef _DEBUG
             // This is a deleted object so ignore it.
             if(IsGCPoisoned())
                 return;
 #endif
             // This is a deleted/free object so ignore it.
-            if (composite == 0)
+            if (getCompositeSafe() == 0)
                 return;
 
             composite |= STACK_PIN;
         }
 
         /**
          * Explicitly unpin the object, allowing it to be reaped by the ZCT.
          * It is not advised to unpin objects that weren't pinned explicitly
@@ -462,16 +462,28 @@ namespace MMgc
         // otherwise preserve the pinned flag.
         void setZCTIndexAndMaybeUnpin(uint32_t index, uint32_t reaping)
         {
             GCAssert(reaping == 0 || reaping == 1);
             GCAssert(index <= (ZCT_INDEX>>8));
             composite = (composite&~(ZCT_INDEX|((~reaping&1)<<STACK_PIN_SHIFT))) | ((index<<8)|ZCTFLAG);
         }
 
+        // Before we read composite tell valgrind its okay if
+        // composite isn't defined.  Deleted RCObject pointers can
+        // live on the stack so this read is always okay since we
+        // check the page header and found this to be a committed
+        // RCObject page.
+        REALLY_INLINE uint32_t getCompositeSafe()
+        {
+            uint32_t *c = &composite;
+            VALGRIND_MAKE_MEM_DEFINED(c, sizeof(c));
+            return *c;
+        }
+
         // Fields in 'composite'
         static const uint32_t ZCTFLAG            = 0x80000000;          // The object is in the ZCT
         static const uint32_t STICKYFLAG         = 0x40000000;          // The object is sticky (RC overflow)
         static const uint32_t STACK_PIN          = 0x20000000;          // The object has been pinned
         static const uint32_t STACK_PIN_SHIFT    = 29;
         static const uint32_t RCBITS             = 0x000000FF;          // 8 bits for the reference count
         static const uint32_t ZCT_INDEX          = 0x0FFFFF00;          // 20 bits for the ZCT index
         static const uint32_t ZCT_CAPACITY       = (ZCT_INDEX>>8) + 1;
--- a/MMgc/MMgc.h
+++ b/MMgc/MMgc.h
@@ -41,25 +41,42 @@
 #ifndef __MMgc__
 #define __MMgc__
 
 // VMPI.h includes avmfeatures.h, which detects platforms and sets up most MMGC_ names.
 #include "VMPI.h"
 
 #include "vmbase.h"
 
+#ifndef MMGC_VALGRIND
+// We always include valgrind headers so we need to use this define to
+// completely compile out the valgrind macros.
+    #define NVALGRIND
+#endif
+
+// Valgrind information:
+// The GCHeap, GC and FixedMalloc allocators are instrumented for valgrind's purposes.
+// All memory from the virtual memory API's is unknown to valgrind.   Only when we tell
+// valgrind about the memory using client requests does valgrind track it.
+// See valgrind headers and online manual for client request details.
+#include <valgrind/memcheck.h>
+
 #if defined MMGC_MEMORY_INFO && defined MMGC_64BIT
     #error "MMGC_MEMORY_INFO not supported on 64-bit (see bugzilla 468501)"
 #endif
 
 #ifdef DEBUG
     #define MMGC_DELETE_DEBUGGING
     #ifndef MMGC_64BIT              // see bugzilla 468501
+    // Valgrind integration is trickier with fresh memory scribbling and free memory
+    // poisoning and its pointless since valgrind will uncover the same problems.
+    #ifndef MMGC_VALGRIND
         #define MMGC_MEMORY_INFO
     #endif
+    #endif
 #endif
 
 #if defined MMGC_MEMORY_INFO && defined MMGC_MEMORY_PROFILER
     #define MMGC_RC_HISTORY
 #endif
 
 #if defined DEBUGGER || defined MMGC_MEMORY_PROFILER || defined MMGC_MEMORY_INFO
     #ifndef MMGC_HOOKS
--- a/MMgc/Shared-inlines.h
+++ b/MMgc/Shared-inlines.h
@@ -43,41 +43,49 @@
 // Inline methods shared across classes
 
 
 namespace MMgc
 {
     // FL* are freelist helpers, put in one place to ease Valgrind support
     REALLY_INLINE void **FLSeed(void **item, void *next)
     {
+        VALGRIND_MAKE_MEM_DEFINED(item, sizeof(void*));
         item[0] = next;
+        VALGRIND_MAKE_MEM_UNDEFINED(item, sizeof(void*));
         return (void**)next;
     }
 
     REALLY_INLINE void FLPush(void* &head, const void *next)
     {
         *(void**)next = head;
         head = (void*)next;
     }
 
     REALLY_INLINE void *FLPop(void* &head)
     {
         void *p = head;
+        VALGRIND_MAKE_MEM_DEFINED(p, sizeof(void*));
         head = *(void**)p;
+        VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(void*));
         return p;
     }
 
     REALLY_INLINE void *FLPopAndZero(void* &head)
     { 
         void *p = FLPop(head);
+        VALGRIND_MAKE_MEM_DEFINED(p, sizeof(void*));
         *(void**)p = NULL;
+        VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(void*));
         return p;
     }
 
     REALLY_INLINE void *FLNext(void *item)
     {
         void **p = (void**)item;
+        VALGRIND_MAKE_MEM_DEFINED(p, sizeof(void*));
         void *next = p[0];
+        VALGRIND_MAKE_MEM_UNDEFINED(p, sizeof(void*));
         return next;
     }
 }
 
 #endif /* __Shared_inlines__ */
--- a/MMgc/ZCT.cpp
+++ b/MMgc/ZCT.cpp
@@ -604,16 +604,21 @@ namespace MMgc
         RCObject **end = p + len/sizeof(RCObject*);
 
         const void *_memStart = (const void*)gc->pageMap.MemStart();
         const void *_memEnd = (const void*)gc->pageMap.MemEnd();
 
         while(p < end) {
             const void *val = GC::Pointer(*p++);
 
+#ifndef NVALGRIND
+            if (end == (void*)gc->GetStackTop())
+                VALGRIND_MAKE_MEM_DEFINED(&val, sizeof(val));
+#endif // !NVALGRIND
+
             if(val < _memStart || val >= _memEnd)
                 continue;
 
             // Any pointer into the object pins the object.
 
             switch (gc->GetPageMapValue((uintptr_t)val)) {
                 case PageMap::kGCAllocPage:
                     val = GCAlloc::IsRCObject(val) ? GetUserPointer(GCAlloc::FindBeginning(val)) : NULL;
--- a/build/avmfeatures.py
+++ b/build/avmfeatures.py
@@ -141,16 +141,21 @@ def featureSettings(o):
         args += "-DAVMFEATURE_OVERRIDE_GLOBAL_NEW=1 "
     if (arg == False):
         args += "-DAVMFEATURE_OVERRIDE_GLOBAL_NEW=0 "
     arg = o.getBoolArg("memory-profiler")
     if (arg == True):
         args += "-DAVMFEATURE_MEMORY_PROFILER=1 "
     if (arg == False):
         args += "-DAVMFEATURE_MEMORY_PROFILER=0 "
+    arg = o.getBoolArg("valgrind")
+    if (arg == True):
+        args += "-DAVMFEATURE_VALGRIND=1 "
+    if (arg == False):
+        args += "-DAVMFEATURE_VALGRIND=0 "
     arg = o.getBoolArg("cache-gqcn")
     if (arg == True):
         args += "-DAVMFEATURE_CACHE_GQCN=1 "
     if (arg == False):
         args += "-DAVMFEATURE_CACHE_GQCN=0 "
     arg = o.getBoolArg("sin-cos-nonfinite")
     if (arg == True):
         args += "-DAVMTWEAK_SIN_COS_NONFINITE=1 "
--- a/build/avmshell-common.xcconfig
+++ b/build/avmshell-common.xcconfig
@@ -37,17 +37,17 @@
 
 GCC_STRICT_ALIASING = YES
 GCC_ENABLE_CPP_EXCEPTIONS=NO
 GCC_ENABLE_CPP_RTTI=NO
 GCC_PREFIX_HEADER = ../../../core/avmplus.h
 GCC_PRECOMPILE_PREFIX_HEADER = YES
 
 // shell is included in this search path only for avmshell-features.h, included from platform/VMPI.h
-HEADER_SEARCH_PATHS=../../../core ../../../MMgc ../../../pcre ../../../extensions .. ../.. ../../../shell
+HEADER_SEARCH_PATHS=../../../core ../../../MMgc ../../../pcre ../../../extensions .. ../.. ../../../shell ../../../other-licenses
 
 // The following line should define all controlling cpp macros except DEBUG, _DEBUG, and DEBUGGER
 //
 // The following macros are defined here for specific effect:
 //
 //   AVMSHELL_BUILD : we use this for conditional inclusion of headers in shell builds
 //   _MAC : this is how we recognize the MacOS platform
 //
--- a/configure.py
+++ b/configure.py
@@ -109,16 +109,24 @@ OS_LIBS = []
 OS_LDFLAGS = ""
 MMGC_CPPFLAGS = "-DAVMSHELL_BUILD "
 AVMSHELL_CPPFLAGS = ""
 AVMSHELL_LDFLAGS = ""
 MMGC_DEFINES = {'SOFT_ASSERTS': None}
 NSPR_INCLUDES = ""
 NSPR_LDOPTS = ""
 
+if o.getBoolArg('valgrind', False, False):
+    OPT_CXXFLAGS = "-O1 -g "
+
+valinc = '$(topsrcdir)/other-licenses'
+if 'VALGRIND_HOME' in os.environ:
+    valinc = os.environ['VALGRIND_HOME'] + '/include'
+APP_CPPFLAGS += '-I' + valinc + ' '
+
 # See build/avmfeatures.py for the code that processes switches for
 # standard feature names.
 APP_CPPFLAGS += build.avmfeatures.featureSettings(o)
 
 if not o.getBoolArg("methodenv-impl32", True):
     APP_CPPFLAGS += "-DVMCFG_METHODENV_IMPL32=0 "
 
 memoryProfiler = o.getBoolArg("memory-profiler", False)
--- a/core/avmfeatures.as
+++ b/core/avmfeatures.as
@@ -615,16 +615,24 @@ var FEATURES =
   <feature>
     <desc> Enabling this will cache the result of getQualifiedClassName, making it run
         much more quickly, at the expense of more memory usage.
       </desc>
     <name> AVMFEATURE_CACHE_GQCN </name>
     <defines> VMCFG_CACHE_GQCN </defines>
   </feature>
 
+  <feature>
+    <desc> Enabling this will compile in code to tell valgrind about how MMgc allocates memory.
+      </desc>
+    <name> AVMFEATURE_VALGRIND </name>
+    <defines> MMGC_VALGRIND </defines>
+    <default> false </defines>
+  </feature>
+
   <!-- VM adjustments for various oddities: AVMTWEAK_* -->
 
   <tweak>
     <desc> Various iphone SDK versions - at least - botch sin() and cos() around NaN
            and infinity.  See https://bugzilla.mozilla.org/show_bug.cgi?id=556149. </desc>
     <name> AVMTWEAK_SIN_COS_NONFINITE </name>
     <defines> VMCFG_TWEAK_SIN_COS_NONFINITE </defines>
     <default> false </default>
--- a/core/avmfeatures.cpp
+++ b/core/avmfeatures.cpp
@@ -165,16 +165,19 @@ const char * const avmfeatures = ""
     "AVMFEATURE_INDIRECT_NATIVE_THUNKS;"
   #endif
   #if AVMFEATURE_OVERRIDE_GLOBAL_NEW
     "AVMFEATURE_OVERRIDE_GLOBAL_NEW;"
   #endif
   #if AVMFEATURE_MEMORY_PROFILER
     "AVMFEATURE_MEMORY_PROFILER;"
   #endif
+  #if AVMFEATURE_VALGRIND
+    "AVMFEATURE_VALGRIND;"
+  #endif
   #if AVMFEATURE_CACHE_GQCN
     "AVMFEATURE_CACHE_GQCN;"
   #endif
   #if AVMTWEAK_SIN_COS_NONFINITE
     "AVMTWEAK_SIN_COS_NONFINITE;"
   #endif
   #if AVMTWEAK_EPOC_EMULATOR
     "AVMTWEAK_EPOC_EMULATOR;"
--- a/core/avmfeatures.h
+++ b/core/avmfeatures.h
@@ -109,16 +109,17 @@
 #undef MMGC_ENABLE_CPP_EXCEPTIONS
 #undef MMGC_INTERIOR_PTRS
 #undef AVMPLUS_WITH_JNI
 #undef AVMPLUS_HEAP_ALLOCA
 #undef AVMPLUS_STATIC_POINTERS
 #undef VMCFG_INDIRECT_NATIVE_THUNKS
 #undef MMGC_OVERRIDE_GLOBAL_NEW
 #undef MMGC_MEMORY_PROFILER
+#undef MMGC_VALGRIND
 #undef VMCFG_CACHE_GQCN
 
 #undef VMCFG_TWEAK_SIN_COS_NONFINITE
 #undef VMCFG_EPOC_EMULATOR
 
 
 
 /* AVMSYSTEM_32BIT
@@ -564,16 +565,25 @@
  * Enabling this will compile in code to enable memory profiling. (Must still be
  * enabled at runtime.)
  */
 #if !defined AVMFEATURE_MEMORY_PROFILER || AVMFEATURE_MEMORY_PROFILER != 0 && AVMFEATURE_MEMORY_PROFILER != 1
 #  error "AVMFEATURE_MEMORY_PROFILER must be defined and 0 or 1 (only)."
 #endif
 
 
+/* AVMFEATURE_VALGRIND
+ *
+ * Enabling this will compile in code to tell valgrind about how MMgc allocates memory.
+ */
+#if !defined AVMFEATURE_VALGRIND || AVMFEATURE_VALGRIND != 0 && AVMFEATURE_VALGRIND != 1
+#  error "AVMFEATURE_VALGRIND must be defined and 0 or 1 (only)."
+#endif
+
+
 /* AVMFEATURE_CACHE_GQCN
  *
  * Enabling this will cache the result of getQualifiedClassName, making it run
  * much more quickly, at the expense of more memory usage.
  */
 #if !defined AVMFEATURE_CACHE_GQCN || AVMFEATURE_CACHE_GQCN != 0 && AVMFEATURE_CACHE_GQCN != 1
 #  error "AVMFEATURE_CACHE_GQCN must be defined and 0 or 1 (only)."
 #endif
@@ -711,16 +721,17 @@
 
 
 
 
 
 
 
 
+
 #if AVMSYSTEM_IA32+AVMSYSTEM_AMD64+AVMSYSTEM_ARM+AVMSYSTEM_PPC+AVMSYSTEM_SPARC+AVMSYSTEM_MIPS+AVMSYSTEM_SH4 > 1
 #  error "At most one of AVMSYSTEM_IA32,AVMSYSTEM_AMD64,AVMSYSTEM_ARM,AVMSYSTEM_PPC,AVMSYSTEM_SPARC,AVMSYSTEM_MIPS,AVMSYSTEM_SH4 must be defined."
 #endif
 #if AVMFEATURE_WORDCODE_INTERP+AVMFEATURE_ABC_INTERP > 1
 #  error "At most one of AVMFEATURE_WORDCODE_INTERP,AVMFEATURE_ABC_INTERP must be defined."
 #endif
 #if AVMFEATURE_WORDCODE_INTERP+AVMFEATURE_JIT > 1
 #  error "At most one of AVMFEATURE_WORDCODE_INTERP,AVMFEATURE_JIT must be defined."
@@ -941,16 +952,19 @@
 #  define VMCFG_INDIRECT_NATIVE_THUNKS
 #endif
 #if AVMFEATURE_OVERRIDE_GLOBAL_NEW
 #  define MMGC_OVERRIDE_GLOBAL_NEW
 #endif
 #if AVMFEATURE_MEMORY_PROFILER
 #  define MMGC_MEMORY_PROFILER
 #endif
+#if AVMFEATURE_VALGRIND
+#  define MMGC_VALGRIND
+#endif
 #if AVMFEATURE_CACHE_GQCN
 #  define VMCFG_CACHE_GQCN
 #endif
 
 #if AVMTWEAK_SIN_COS_NONFINITE
 #  define VMCFG_TWEAK_SIN_COS_NONFINITE
 #endif
 #if AVMTWEAK_EPOC_EMULATOR
--- a/platform/android/Makefile
+++ b/platform/android/Makefile
@@ -307,17 +307,17 @@ ifdef SPEAK_STATUS
 	@echo Speak status ENabled
 else
 	@echo Speak status DISabled
 endif
 
 # end make.common
 
 # Modify variables set by make.common before including other makefiles:
-INC := -I. -I.. -I../.. -I../../shell -I../../core -I../../eval -I../../MMgc -I../../pcre -I../../extensions -I../../VMPI -I../../vmbase -I../../other-licenses/zlib $(INC)
+INC := -I. -I.. -I../.. -I../../shell -I../../core -I../../eval -I../../MMgc -I../../pcre -I../../extensions -I../../VMPI -I../../vmbase -I../../other-licenses/zlib  -I../../other-licenses $(INC)
 
 AVMSHELL_DEFS := -DUNIX -Dlinux -DUSE_PTHREAD_MUTEX -DNO_SYS_SIGNAL -DHAVE_STDARG -DNO_CONSOLE_FWRITE -DAVMPLUS_ARM -DAVMPLUS_UNIX -DAVMSHELL_BUILD 
 
 ifneq ($(ENABLE_NANOJIT),true)
 	AVMSHELL_DEFS := ${AVMSHELL_DEFS} -DAVMPLUS_DISABLE_NJ
 endif
 
 include $(ANDROID_MAKEFILE_DIR)/make.avm
--- a/shell/avmshell-features.h
+++ b/shell/avmshell-features.h
@@ -216,9 +216,13 @@
 	#undef		AVMFEATURE_EVAL // Don't compile in this feature yet.
 	#define		AVMFEATURE_EVAL 0
 	#if AVMSYSTEM_SYMBIAN && AVMSYSTEM_IA32
 		#undef		AVMFEATURE_CPP_EXCEPTIONS
 		#define		AVMFEATURE_CPP_EXCEPTIONS 1 // winscw compiler wants
 	#endif // AVMSYSTEM_SYMBIAN && AVMSYSTEM_IA32
 #endif // AVMSYSTEM_SYMBIAN
 
+#ifndef AVMFEATURE_VALGRIND
+  #define AVMFEATURE_VALGRIND 0
+#endif
+
 #endif // __avmshell_features__