replace AvmAssert with NanoAssert everywhere (
bug 554549 r=nnethercote+)
Also, remove unused AvmAssert, AvmAssertMsg, and AvmDebugLog from nanojit/avmplus.h
--- a/js/src/nanojit/Assembler.cpp
+++ b/js/src/nanojit/Assembler.cpp
@@ -1605,17 +1605,17 @@ namespace nanojit
#if NJ_JTBL_SUPPORTED
case LIR_jtbl:
{
countlir_jtbl();
// Multiway jump can contain both forward and backward jumps.
// Out of range indices aren't allowed or checked.
// Code after this jtbl instruction is unreachable.
releaseRegisters();
- AvmAssert(_allocator.countActive() == 0);
+ NanoAssert(_allocator.countActive() == 0);
uint32_t count = ins->getTableSize();
bool has_back_edges = false;
// Merge the regstates of labels we have already seen.
for (uint32_t i = count; i-- > 0;) {
LIns* to = ins->getTarget(i);
LabelState *lstate = _labels.get(to);
@@ -1630,17 +1630,17 @@ namespace nanojit
asm_output("forward edges");
// In a multi-way jump, the register allocator has no ability to deal
// with two existing edges that have conflicting register assignments, unlike
// a conditional branch where code can be inserted on the fall-through path
// to reconcile registers. So, frontends *must* insert LIR_regfence at labels of
// forward jtbl jumps. Check here to make sure no registers were picked up from
// any forward edges.
- AvmAssert(_allocator.countActive() == 0);
+ NanoAssert(_allocator.countActive() == 0);
if (has_back_edges) {
handleLoopCarriedExprs(pending_lives);
// save merged (empty) register state at target labels we haven't seen yet
for (uint32_t i = count; i-- > 0;) {
LIns* to = ins->getTarget(i);
LabelState *lstate = _labels.get(to);
if (!lstate) {
--- a/js/src/nanojit/CodeAlloc.cpp
+++ b/js/src/nanojit/CodeAlloc.cpp
@@ -148,17 +148,17 @@ namespace nanojit
}
void CodeAlloc::free(NIns* start, NIns *end) {
NanoAssert(heapblocks);
CodeList *blk = getBlock(start, end);
if (verbose)
avmplus::AvmLog("free %p-%p %d\n", start, end, (int)blk->size());
- AvmAssert(!blk->isFree);
+ NanoAssert(!blk->isFree);
// coalesce adjacent blocks.
bool already_on_avail_list;
if (blk->lower && blk->lower->isFree) {
// combine blk into blk->lower (destroy blk)
CodeList* lower = blk->lower;
CodeList* higher = blk->higher;
--- a/js/src/nanojit/VMPI.cpp
+++ b/js/src/nanojit/VMPI.cpp
@@ -96,27 +96,27 @@ VMPI_setPageProtection(void *address,
address = (void*)((size_t)address & ~(0xfff));
size = (size + 0xfff) & ~(0xfff);
ULONG attribFlags = PAG_FREE;
while (size) {
ULONG attrib;
ULONG range = size;
ULONG retval = DosQueryMem(address, &range, &attrib);
- AvmAssert(retval == 0);
+ NanoAssert(retval == 0);
// exit if this is the start of the next memory object
if (attrib & attribFlags) {
break;
}
attribFlags |= PAG_BASE;
range = size > range ? range : size;
retval = DosSetMem(address, range, flags);
- AvmAssert(retval == 0);
+ NanoAssert(retval == 0);
address = (char*)address + range;
size -= range;
}
}
#else // !WIN32 && !AVMPLUS_OS2
@@ -135,13 +135,13 @@ void VMPI_setPageProtection(void *addres
int flags = PROT_READ;
if (executableFlag) {
flags |= PROT_EXEC;
}
if (writeableFlag) {
flags |= PROT_WRITE;
}
int retval = mprotect((maddr_ptr)beginPage, (unsigned int)sizePaged, flags);
- AvmAssert(retval == 0);
+ NanoAssert(retval == 0);
(void)retval;
}
#endif // WIN32
--- a/js/src/nanojit/avmplus.h
+++ b/js/src/nanojit/avmplus.h
@@ -85,20 +85,16 @@
#define NJ_PROFILE 1
#include <stdarg.h>
#endif
#ifdef _DEBUG
void NanoAssertFail();
#endif
-#define AvmAssert(x) assert(x)
-#define AvmAssertMsg(x, y)
-#define AvmDebugLog(x) printf x
-
#if defined(AVMPLUS_IA32)
#if defined(_MSC_VER)
__declspec(naked) static inline __int64 rdtsc()
{
__asm
{
rdtsc;
ret;