Bug 407216 - DOM quick stubs - faster paths for top N DOM methods (r+sr=jst, security r=mrbkap, build r=bsmedberg)
authorJason Orendorff <jorendorff@mozilla.com>
Tue, 19 Aug 2008 21:38:24 -0500
changeset 17092 cf6c811e127249ad399fe332627e24ed78aec1b4
parent 17091 1e3d4775197af65a4f9bcebf2280ad0d710b722a
child 17093 00bd2fd48d602f6f61424a5f6d587fce5b2d1709
push idunknown
push userunknown
push dateunknown
reviewersmrbkap, build, bsmedberg
bugs407216
milestone1.9.1a2pre
Bug 407216 - DOM quick stubs - faster paths for top N DOM methods (r+sr=jst, security r=mrbkap, build r=bsmedberg) * * * * * *
accessible/src/msaa/nsAccessNodeWrap.h
dom/public/idl/events/nsIDOMNSEvent.idl
dom/src/base/nsDOMClassInfo.cpp
js/src/jscntxt.h
js/src/xpconnect/idl/nsIXPCScriptable.idl
js/src/xpconnect/idl/nsIXPConnect.idl
js/src/xpconnect/public/xpc_map_end.h
js/src/xpconnect/src/Makefile.in
js/src/xpconnect/src/dom_quickstubs.qsconf
js/src/xpconnect/src/nsXPConnect.cpp
js/src/xpconnect/src/qsgen.py
js/src/xpconnect/src/xpcconvert.cpp
js/src/xpconnect/src/xpcprivate.h
js/src/xpconnect/src/xpcquickstubs.cpp
js/src/xpconnect/src/xpcquickstubs.h
js/src/xpconnect/src/xpcthrower.cpp
js/src/xpconnect/src/xpcwrappednativeproto.cpp
js/src/xpconnect/tests/mochitest/test_bug390488.html
other-licenses/ply/COPYING
other-licenses/ply/README
other-licenses/ply/ply/__init__.py
other-licenses/ply/ply/lex.py
other-licenses/ply/ply/yacc.py
storage/src/mozStorageStatementWrapper.cpp
widget/public/nsGUIEvent.h
xpcom/idl-parser/header.py
xpcom/idl-parser/xpidl.py
--- a/accessible/src/msaa/nsAccessNodeWrap.h
+++ b/accessible/src/msaa/nsAccessNodeWrap.h
@@ -59,17 +59,16 @@
 #include "nsIContent.h"
 #include "nsAccessNode.h"
 #include "OLEIDL.H"
 #include "OLEACC.H"
 #include <winuser.h>
 #ifndef WINABLEAPI
 #include <winable.h>
 #endif
-#undef ERROR /// Otherwise we can't include nsIDOMNSEvent.h if we include this
 #ifdef MOZ_CRASHREPORTER
 #include "nsICrashReporter.h"
 #endif
 
 typedef LRESULT (STDAPICALLTYPE *LPFNNOTIFYWINEVENT)(DWORD event,HWND hwnd,LONG idObjectType,LONG idObject);
 typedef LRESULT (STDAPICALLTYPE *LPFNGETGUITHREADINFO)(DWORD idThread, GUITHREADINFO* pgui);
 
 class nsAccessNodeWrap :  public nsAccessNode,
--- a/dom/public/idl/events/nsIDOMNSEvent.idl
+++ b/dom/public/idl/events/nsIDOMNSEvent.idl
@@ -34,17 +34,17 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "domstubs.idl"
 
 %{C++
-#ifdef WINCE
+#ifdef ERROR
 #undef ERROR
 #endif
 %}
 
 [scriptable, uuid(e565d518-4510-407f-a3d9-3b4107549c6d)]
 interface nsIDOMNSEvent : nsISupports
 {
   const long MOUSEDOWN    = 0x00000001;
--- a/dom/src/base/nsDOMClassInfo.cpp
+++ b/dom/src/base/nsDOMClassInfo.cpp
@@ -3818,17 +3818,18 @@ nsDOMClassInfo::PostCreate(nsIXPConnectW
   // Consider if a site sets HTMLElement.prototype.foopy = function () { ... }
   // Now, calling document.body.foopy() needs to ensure that looking up foopy
   // on document.body's prototype will find the right function. Thisb
   // LookupProperty accomplishes that.
   // XXX This shouldn't need to go through the JS engine. Instead, we should
   // be calling nsWindowSH::GlobalResolve directly.
   JSObject *global = ::JS_GetGlobalForObject(cx, obj);
   jsval val;
-  if (!::JS_LookupProperty(cx, global, mData->mName, &val)) {
+  if (!::JS_LookupPropertyWithFlags(cx, global, mData->mName,
+                                    JSRESOLVE_CLASSNAME, &val)) {
     return NS_ERROR_UNEXPECTED;
   }
 
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsDOMClassInfo::AddProperty(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
@@ -4054,16 +4055,35 @@ NS_IMETHODIMP
 nsDOMClassInfo::InnerObject(nsIXPConnectWrappedNative *wrapper, JSContext * cx,
                             JSObject * obj, JSObject * *_retval)
 {
   NS_WARNING("nsDOMClassInfo::InnerObject Don't call me!");
 
   return NS_ERROR_UNEXPECTED;
 }
 
+NS_IMETHODIMP
+nsDOMClassInfo::PostCreatePrototype(JSContext * cx, JSObject * proto)
+{
+  PRUint32 flags = (mData->mScriptableFlags & DONT_ENUM_STATIC_PROPS)
+                   ? 0
+                   : JSPROP_ENUMERATE;
+
+  PRUint32 count = 0;
+  while (mData->mInterfaces[count]) {
+    count++;
+  }
+
+  if (!sXPConnect->DefineDOMQuickStubs(cx, proto, flags,
+                                       count, mData->mInterfaces)) {
+    JS_ClearPendingException(cx);
+  }
+  return NS_OK;
+}
+
 // static
 nsIClassInfo *
 NS_GetDOMClassInfoInstance(nsDOMClassInfoID aID)
 {
   if (aID >= eDOMClassInfoIDCount) {
     NS_ERROR("Bad ID!");
 
     return nsnull;
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -977,17 +977,17 @@ js_TraceLocalRoots(JSTracer *trc, JSLoca
 typedef enum JSErrNum {
 #define MSG_DEF(name, number, count, exception, format) \
     name = number,
 #include "js.msg"
 #undef MSG_DEF
     JSErr_Limit
 } JSErrNum;
 
-extern const JSErrorFormatString *
+extern JS_FRIEND_API(const JSErrorFormatString *)
 js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
 
 #ifdef va_start
 extern JSBool
 js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap);
 
 extern JSBool
 js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
--- a/js/src/xpconnect/idl/nsIXPCScriptable.idl
+++ b/js/src/xpconnect/idl/nsIXPCScriptable.idl
@@ -50,17 +50,17 @@
 
 /**
  * Note: This is not really an XPCOM interface.  For example, callers must
  * guarantee that they set the *_retval of the various methods that return a
  * boolean to PR_TRUE before making the call.  Implementations may skip writing
  * to *_retval unless they want to return PR_FALSE.
  */
 
-[uuid(1455f6fe-6de9-4b62-a2b3-d1aee82dd829)]
+[uuid(5d309b93-e9b4-4374-bcd5-44245c83408f)]
 interface nsIXPCScriptable : nsISupports
 {
     /* bitflags used for 'flags' (only 32 bits available!) */
 
     const PRUint32 WANT_PRECREATE                   = 1 <<  0;
     const PRUint32 WANT_CREATE                      = 1 <<  1;
     const PRUint32 WANT_POSTCREATE                  = 1 <<  2;
     const PRUint32 WANT_ADDPROPERTY                 = 1 <<  3;
@@ -169,9 +169,13 @@ interface nsIXPCScriptable : nsISupports
     PRBool equality(in nsIXPConnectWrappedNative wrapper,
                     in JSContextPtr cx, in JSObjectPtr obj, in JSVal val);
 
     JSObjectPtr outerObject(in nsIXPConnectWrappedNative wrapper,
                             in JSContextPtr cx, in JSObjectPtr obj);
 
     JSObjectPtr innerObject(in nsIXPConnectWrappedNative wrapper,
                             in JSContextPtr cx, in JSObjectPtr obj);
+
+    // This method is called if the WANT_POSTCREATE bit is set in
+    // scriptableFlags.
+    void postCreatePrototype(in JSContextPtr cx, in JSObjectPtr proto);
 };
--- a/js/src/xpconnect/idl/nsIXPConnect.idl
+++ b/js/src/xpconnect/idl/nsIXPConnect.idl
@@ -400,17 +400,17 @@ interface nsIXPCFunctionThisTranslator :
 %{ C++
 // For use with the service manager
 // {CB6593E0-F9B2-11d2-BDD6-000064657374}
 #define NS_XPCONNECT_CID \
 { 0xcb6593e0, 0xf9b2, 0x11d2, \
     { 0xbd, 0xd6, 0x0, 0x0, 0x64, 0x65, 0x73, 0x74 } }
 %}
 
-[uuid(c1d4a482-1beb-4c82-9c0b-d2ab93acc7ef)]
+[uuid(d4c6bc06-2a4f-4315-90ec-d12904aca046)]
 interface nsIXPConnect : nsISupports
 {
 %{ C++
   NS_DEFINE_STATIC_CID_ACCESSOR(NS_XPCONNECT_CID)
 %}
 
     void
     initClasses(in JSContextPtr aJSContext,
@@ -742,9 +742,33 @@ interface nsIXPConnect : nsISupports
 
     /**
      * Whether or not XPConnect should report all JS exceptions when returning
      * from JS into C++. False by default, although any value set in the
      * MOZ_REPORT_ALL_JS_EXCEPTIONS environment variable will override the value
      * passed here.
      */
     void setReportAllJSExceptions(in boolean reportAllJSExceptions);
+
+    /**
+     * Define quick stubs on the given object, @a proto.
+     *
+     * @param cx
+     *     A context.  Requires request.
+     * @param proto
+     *     The (newly created) prototype object for a DOM class.  The JS half
+     *     of an XPCWrappedNativeProto.
+     * @param flags
+     *     Property flags for the quick stub properties--should be either
+     *     JSPROP_ENUMERATE or 0.
+     * @param interfaceCount
+     *     The number of interfaces the class implements.
+     * @param interfaceArray
+     *     The interfaces the class implements; interfaceArray and
+     *     interfaceCount are like what nsIClassInfo.getInterfaces returns.
+     */
+    [noscript,notxpcom] PRBool defineDOMQuickStubs(
+        in JSContextPtr cx,
+        in JSObjectPtr proto,
+        in PRUint32 flags,
+        in PRUint32 interfaceCount,
+        [array, size_is(interfaceCount)] in nsIIDPtr interfaceArray);
 };
--- a/js/src/xpconnect/public/xpc_map_end.h
+++ b/js/src/xpconnect/public/xpc_map_end.h
@@ -227,16 +227,21 @@ NS_IMETHODIMP XPC_MAP_CLASSNAME::OuterOb
     {NS_ERROR("never called"); return NS_ERROR_NOT_IMPLEMENTED;}
 #endif
 
 #ifndef XPC_MAP_WANT_INNER_OBJECT
 NS_IMETHODIMP XPC_MAP_CLASSNAME::InnerObject(nsIXPConnectWrappedNative *wrapper, JSContext * cx, JSObject * obj, JSObject * *_retval)
     {NS_ERROR("never called"); return NS_ERROR_NOT_IMPLEMENTED;}
 #endif
 
+#ifndef XPC_MAP_WANT_POST_CREATE_PROTOTYPE
+NS_IMETHODIMP XPC_MAP_CLASSNAME::PostCreatePrototype(JSContext *cx, JSObject *proto)
+    {return NS_OK;}
+#endif
+
 /**************************************************************/
 
 #undef XPC_MAP_CLASSNAME
 #undef XPC_MAP_QUOTED_CLASSNAME
 
 #ifdef XPC_MAP_WANT_PRECREATE
 #undef XPC_MAP_WANT_PRECREATE
 #endif
@@ -308,11 +313,15 @@ NS_IMETHODIMP XPC_MAP_CLASSNAME::InnerOb
 #ifdef XPC_MAP_WANT_EQUALITY
 #undef XPC_MAP_WANT_EQUALITY
 #endif
 
 #ifdef XPC_MAP_WANT_OUTER_OBJECT
 #undef XPC_MAP_WANT_OUTER_OBJECT
 #endif
 
+#ifdef XPC_MAP_WANT_POST_CREATE_PROTOTYPE
+#undef XPC_MAP_WANT_POST_CREATE_PROTOTYPE
+#endif
+
 #ifdef XPC_MAP_FLAGS
 #undef XPC_MAP_FLAGS
 #endif
--- a/js/src/xpconnect/src/Makefile.in
+++ b/js/src/xpconnect/src/Makefile.in
@@ -62,16 +62,28 @@ PACKAGE_FILE = xpconnect.pkg
 REQUIRES	= xpcom \
 		  string \
 		  js \
 		  caps \
 		  necko \
 		  dom \
 		  $(NULL)
 
+# These modules are required because the auto-generated file
+# dom_quickstubs.cpp #includes header files from many places.
+REQUIRES	+= content \
+		   editor \
+		   layout \
+		   rdf \
+		   svg \
+		   xuldoc \
+		   xultmpl \
+		   $(NULL)
+
+
 CPPSRCS		= \
 		nsScriptError.cpp \
 		nsXPConnect.cpp \
 		xpccallcontext.cpp \
 		xpccomponents.cpp \
 		xpccontext.cpp \
 		xpcconvert.cpp \
 		xpcdebug.cpp \
@@ -94,16 +106,18 @@ CPPSRCS		= \
 		xpcwrappednativejsops.cpp \
 		xpcwrappednativeproto.cpp \
 		xpcwrappednativescope.cpp \
 		XPCNativeWrapper.cpp \
 		xpcJSWeakReference.cpp \
 		XPCSafeJSObjectWrapper.cpp \
 		XPCCrossOriginWrapper.cpp \
 		XPCWrapper.cpp \
+		xpcquickstubs.cpp \
+		dom_quickstubs.cpp \
 		$(NULL)
 ifdef XPC_IDISPATCH_SUPPORT
 CPPSRCS +=	XPCDispObject.cpp	\
 		XPCDispInterface.cpp	\
 		XPCDispConvert.cpp \
 		XPCDispTypeInfo.cpp	\
 		XPCDispTearOff.cpp \
 		XPCIDispatchExtension.cpp \
@@ -163,8 +177,30 @@ ifeq (,$(findstring GL,$(CXXFLAGS)))
 CXXFLAGS	+= -YX -Fp$(LIBRARY_NAME).pch
 # precompiled headers require write access to .pch which breaks -j builds
 .NOTPARALLEL:
 endif
 endif
 endif
 endif
 
+nsXPConnect.$(OBJ_SUFFIX): dom_quickstubs.h
+
+dom_quickstubs.h dom_quickstubs.cpp: $(srcdir)/dom_quickstubs.qsconf \
+                                     $(srcdir)/qsgen.py \
+                                     $(topsrcdir)/xpcom/idl-parser/header.py \
+                                     $(topsrcdir)/xpcom/idl-parser/xpidl.py
+	PYTHONPATH=$(topsrcdir)/xpcom/idl-parser \
+	  $(PYTHON) $(srcdir)/qsgen.py \
+	  --idlpath=$(DEPTH)/dist/idl \
+	  --cachedir=$(DEPTH)/xpcom/idl-parser \
+	  --header-output dom_quickstubs.h \
+	  --stub-output dom_quickstubs.cpp \
+	  --makedepend-output dom_quickstubs.depends \
+	  $(srcdir)/dom_quickstubs.qsconf
+
+GARBAGE += \
+		dom_quickstubs.h \
+		dom_quickstubs.cpp \
+		dom_quickstubs.depends \
+		$(NULL)
+
+-include dom_quickstubs.depends
new file mode 100644
--- /dev/null
+++ b/js/src/xpconnect/src/dom_quickstubs.qsconf
@@ -0,0 +1,545 @@
+# -*- Mode: Python -*-
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+#   Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Jason Orendorff <jorendorff@mozilla.com>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+name = 'DOM'
+
+# A quick warning:
+#
+# Attributes or methods that call GetCurrentNativeCallContext must not be
+# quick-stubbed, because quick stubs don't generate a native call context.
+# qsgen.py has no way of knowing which attributes and methods do this, as it
+# looks at interfaces, not implementations.  The symptoms, if you quick-stub
+# one of those, can be really weird, because GetCurrentNativeCallContext
+# doesn't crash--it may in fact return a plausible wrong answer.
+
+members = [ 
+    # dom/public/idl/base
+    #
+    # Note that many implementations of interfaces in this directory
+    # use GetCurrentNativeCallContext, notably:
+    #   - nsIDOMCrypto.{generateCRMFRequest,signText}
+    #   - nsIDOMLocation.reload
+    #   - nsIDOMNSHistory.go
+    #   - nsIDOMJSNavigator.preference
+    #   - nsIDOMJSPluginArray.refresh
+    #   - nsIDOMWindowInternal.postMessage
+    #   - nsIDOMJSWindow.{prompt,setTimeout,setInterval,open,openDialog}
+    #
+    # (And nsIDOMModalContentWindow.returnValue is an attribute of type
+    # nsIVariant, which qsgen.py can't handle.)
+    #
+    'nsIDOMWindow.name',
+    'nsIDOMWindow.parent',
+    'nsIDOMWindow.top',
+    'nsIDOMWindow.document',
+    'nsIDOMWindow.getSelection',
+    'nsIDOMWindowCollection.item',
+    'nsIDOMWindowCollection.length',
+    'nsIDOMLocation.hostname',
+    'nsIDOMLocation.href',
+    'nsIDOMScreen.top',
+    'nsIDOMScreen.height',
+    'nsIDOMScreen.width',
+    'nsIDOMScreen.left',
+    'nsIDOMClientRect.top',
+    'nsIDOMClientRect.right',
+    'nsIDOMClientRect.bottom',
+    'nsIDOMClientRect.left',
+    'nsIDOMClientRectList.item',
+    'nsIDOMClientRectList.length',
+
+    # dom/public/idl/canvas
+    #
+    # nsIDOMCanvasRenderingContext2D
+    # NOTE: attributes strokeStyle and fillStyle are nsIVariant
+    # NOTE: drawImage(), getImageData(), and putImageData() use
+    #       GetCurrentNativeCallContext
+    'nsIDOMCanvasRenderingContext2D.canvas',
+    'nsIDOMCanvasRenderingContext2D.save',
+    'nsIDOMCanvasRenderingContext2D.restore',
+    'nsIDOMCanvasRenderingContext2D.scale',
+    'nsIDOMCanvasRenderingContext2D.rotate',
+    'nsIDOMCanvasRenderingContext2D.translate',
+    'nsIDOMCanvasRenderingContext2D.transform',
+    'nsIDOMCanvasRenderingContext2D.setTransform',
+    'nsIDOMCanvasRenderingContext2D.globalAlpha',
+    'nsIDOMCanvasRenderingContext2D.globalCompositeOperation',
+    'nsIDOMCanvasRenderingContext2D.lineWidth',
+    'nsIDOMCanvasRenderingContext2D.lineCap',
+    'nsIDOMCanvasRenderingContext2D.lineJoin',
+    'nsIDOMCanvasRenderingContext2D.miterLimit',
+    'nsIDOMCanvasRenderingContext2D.clearRect',
+    'nsIDOMCanvasRenderingContext2D.fillRect',
+    'nsIDOMCanvasRenderingContext2D.strokeRect',
+    'nsIDOMCanvasRenderingContext2D.beginPath',
+    'nsIDOMCanvasRenderingContext2D.closePath',
+    'nsIDOMCanvasRenderingContext2D.moveTo',
+    'nsIDOMCanvasRenderingContext2D.lineTo',
+    'nsIDOMCanvasRenderingContext2D.quadraticCurveTo',
+    'nsIDOMCanvasRenderingContext2D.bezierCurveTo',
+    'nsIDOMCanvasRenderingContext2D.arcTo',
+    'nsIDOMCanvasRenderingContext2D.arc',
+    'nsIDOMCanvasRenderingContext2D.rect',
+    'nsIDOMCanvasRenderingContext2D.fill',
+    'nsIDOMCanvasRenderingContext2D.stroke',
+    'nsIDOMCanvasRenderingContext2D.clip',
+    'nsIDOMCanvasRenderingContext2D.font',
+    'nsIDOMCanvasRenderingContext2D.textAlign',
+    'nsIDOMCanvasRenderingContext2D.textBaseline',
+    'nsIDOMCanvasRenderingContext2D.fillText',
+    'nsIDOMCanvasRenderingContext2D.strokeText',
+    'nsIDOMCanvasRenderingContext2D.measureText',
+    'nsIDOMCanvasRenderingContext2D.isPointInPath',
+    'nsIDOMTextMetrics.width',
+
+    # dom/public/idl/core
+    'nsIDOMCharacterData.data',
+    'nsIDOMCharacterData.length',
+    'nsIDOMDocument.documentElement',
+    'nsIDOMDocument.implementation',
+    'nsIDOMDocument.getElementsByTagName',
+    'nsIDOMDocument.doctype',
+    'nsIDOMDocument.getElementsByTagNameNS',
+    'nsIDOMDocument.getElementById',
+    'nsIDOMDocument.createDocumentFragment',
+    'nsIDOMDocument.createElement',
+    'nsIDOMDocument.importNode',
+    'nsIDOMDocument.createTextNode',
+    'nsIDOMElement.removeAttributeNS',
+    'nsIDOMElement.removeAttribute',
+    'nsIDOMElement.getAttribute',
+    'nsIDOMElement.getElementsByTagName',
+    'nsIDOMElement.setAttribute',
+    'nsIDOMElement.getElementsByTagNameNS',
+    'nsIDOMElement.hasAttributeNS',
+    'nsIDOMElement.tagName',
+    'nsIDOMElement.setAttributeNS',
+    'nsIDOMElement.hasAttribute',
+    'nsIDOMElement.getAttributeNS',
+    'nsIDOMNamedNodeMap.item',
+    'nsIDOMNamedNodeMap.length',
+    'nsIDOMNode.appendChild',
+    'nsIDOMNode.nextSibling',
+    'nsIDOMNode.cloneNode',
+    'nsIDOMNode.firstChild',
+    'nsIDOMNode.prefix',
+    'nsIDOMNode.nodeValue',
+    'nsIDOMNode.childNodes',
+    'nsIDOMNode.normalize',
+    'nsIDOMNode.nodeName',
+    'nsIDOMNode.namespaceURI',
+    'nsIDOMNode.hasChildNodes',
+    'nsIDOMNode.previousSibling',
+    'nsIDOMNode.nodeType',
+    'nsIDOMNode.insertBefore',
+    'nsIDOMNode.replaceChild',
+    'nsIDOMNode.localName',
+    'nsIDOMNode.lastChild',
+    'nsIDOMNode.ownerDocument',
+    'nsIDOMNode.parentNode',
+    'nsIDOMNode.removeChild',
+    'nsIDOMNode.hasAttributes',
+    'nsIDOMNode.attributes',
+    'nsIDOMNodeList.item',
+    'nsIDOMNodeList.length',
+    'nsIDOMText.splitText',
+    'nsIDOM3Document.documentURI',
+    'nsIDOM3Document.adoptNode',
+    'nsIDOM3Document.renameNode',
+    'nsIDOM3Node.compareDocumentPosition',
+    'nsIDOM3Node.getUserData',
+    'nsIDOM3Node.baseURI',
+    'nsIDOM3Node.textContent',
+    'nsIDOM3Node.isSameNode',
+    'nsIDOM3Node.lookupNamespaceURI',
+    'nsIDOM3Node.setUserData',
+    'nsIDOM3Node.lookupPrefix',
+    'nsIDOM3Node.isDefaultNamespace',
+    'nsIDOM3Node.isEqualNode',
+    'nsIDOM3Text.isElementContentWhitespace',
+    'nsIDOM3Text.replaceWholeText',
+    'nsIDOM3Text.wholeText',
+    'nsIDOMDOMStringList.item',
+    'nsIDOMDOMStringList.length',
+    'nsIDOMDOMStringList.contains',
+    'nsIDOMNameList.getName',
+    'nsIDOMNameList.contains',
+    'nsIDOMNameList.containsNS',
+    'nsIDOMNameList.length',
+    'nsIDOMNameList.getNamespaceURI',
+    'nsIDOMNSDocument.getElementsByClassName',
+    'nsIDOMNSDocument.title',
+    'nsIDOMNSDocument.hasFocus',
+    'nsIDOMNSDocument.location',
+    'nsIDOMNSDocument.elementFromPoint',
+    'nsIDOMNSDocument.activeElement',
+    'nsIDOMNSDocument.getBoxObjectFor',
+    'nsIDOMXMLDocument.evaluateXPointer',
+    'nsIDOMXMLDocument.evaluateFIXptr',
+    'nsIDOMNSEditableElement.editor',
+    'nsIDOMNSEditableElement.setUserInput',
+    'nsIDOMNSElement.getClientRects',
+    'nsIDOMNSElement.getBoundingClientRect',
+    'nsIDOMNSElement.getElementsByClassName',
+    'nsIDOMNSElement.scrollWidth',
+    'nsIDOMNSElement.clientLeft',
+    'nsIDOMNSElement.clientHeight',
+    'nsIDOMNSElement.clientWidth',
+    'nsIDOMNSElement.clientTop',
+
+    # dom/public/idl/css
+    'nsIDOMElementCSSInlineStyle.style',
+    'nsIDOMCSS2Properties.background',
+    'nsIDOMCSS2Properties.height',
+    'nsIDOMCSS2Properties.textAlign',
+    'nsIDOMCSS2Properties.right',
+    'nsIDOMCSS2Properties.bottom',
+    'nsIDOMCSS2Properties.fontSize',
+    'nsIDOMCSS2Properties.backgroundColor',
+    'nsIDOMCSS2Properties.letterSpacing',
+    'nsIDOMCSS2Properties.verticalAlign',
+    'nsIDOMCSS2Properties.color',
+    'nsIDOMCSS2Properties.top',
+    'nsIDOMCSS2Properties.width',
+    'nsIDOMCSS2Properties.display',
+    'nsIDOMCSS2Properties.zIndex',
+    'nsIDOMCSS2Properties.position',
+    'nsIDOMCSS2Properties.left',
+    'nsIDOMCSS2Properties.visibility',
+    'nsIDOMNSCSS2Properties.opacity',
+    'nsIDOMRect.top',
+    'nsIDOMRect.right',
+    'nsIDOMRect.left',
+    'nsIDOMRect.bottom',
+    'nsIDOMViewCSS.getComputedStyle',
+
+    # dom/public/idl/events
+    'nsIDOMEvent.target',
+    'nsIDOMEvent.preventDefault',
+    'nsIDOMEvent.cancelable',
+    'nsIDOMEvent.currentTarget',
+    'nsIDOMEvent.timeStamp',
+    'nsIDOMEvent.bubbles',
+    'nsIDOMEvent.type',
+    'nsIDOMEvent.initEvent',
+    'nsIDOMEvent.stopPropagation',
+    'nsIDOMEvent.eventPhase',
+    'nsIDOMEventTarget.dispatchEvent',
+    'nsIDOMEventTarget.removeEventListener',
+    'nsIDOMEventTarget.addEventListener',
+    'nsIDOMEventListener.handleEvent',
+    'nsIDOMCustomEvent.setCurrentTarget',
+    'nsIDOMCustomEvent.setEventPhase',
+    'nsIDOMDocumentEvent.createEvent',
+    'nsIDOMMouseEvent.clientX',
+    'nsIDOMMouseEvent.clientY',
+    'nsIDOMMouseEvent.relatedTarget',
+    'nsIDOMMouseEvent.shiftKey',
+    'nsIDOMMouseEvent.button',
+    'nsIDOMMouseEvent.altKey',
+    'nsIDOMMouseEvent.metaKey',
+    'nsIDOMMouseEvent.ctrlKey',
+    'nsIDOMMouseEvent.screenY',
+    'nsIDOMMouseEvent.screenX',
+    'nsIDOMNSEvent.originalTarget',
+    'nsIDOMNSEvent.preventCapture',
+    'nsIDOMKeyEvent.ctrlKey',
+    'nsIDOMKeyEvent.shiftKey',
+    'nsIDOMKeyEvent.keyCode',
+    'nsIDOMKeyEvent.metaKey',
+    'nsIDOMKeyEvent.charCode',
+    'nsIDOMKeyEvent.altKey',
+    'nsIDOMMutationEvent.attrName',
+    'nsIDOMMutationEvent.relatedNode',
+    'nsIDOMMutationEvent.attrChange',
+    'nsIDOMMutationEvent.newValue',
+    'nsIDOMMutationEvent.prevValue',
+    'nsIDOMNSUIEvent.getPreventDefault',
+    'nsIDOMNSUIEvent.which',
+    'nsIDOMNSUIEvent.rangeParent',
+    'nsIDOMNSUIEvent.rangeOffset',
+    'nsIDOMNSUIEvent.pageX',
+    'nsIDOMNSUIEvent.pageY',
+    'nsIDOMNSUIEvent.isChar',
+
+    # dom/public/idl/geolocation - None.
+
+    # dom/public/idl/html
+    'nsIDOMHTMLAnchorElement.href',
+    'nsIDOMHTMLAnchorElement.rel',
+    'nsIDOMHTMLAnchorElement.target',
+    'nsIDOMHTMLBaseElement.href',
+    'nsIDOMHTMLBaseElement.target',
+    'nsIDOMHTMLButtonElement.name',
+    'nsIDOMHTMLButtonElement.form',
+    'nsIDOMHTMLButtonElement.value',
+    'nsIDOMHTMLButtonElement.disabled',
+    'nsIDOMHTMLCollection.item',
+    'nsIDOMHTMLCollection.length',
+    'nsIDOMHTMLDocument.body',
+    'nsIDOMHTMLDocument.getElementsByName',
+    'nsIDOMHTMLDocument.anchors',
+    'nsIDOMHTMLDocument.links',
+    'nsIDOMHTMLDocument.title',
+    'nsIDOMHTMLDocument.URL',
+    'nsIDOMHTMLDocument.referrer',
+    'nsIDOMHTMLDocument.forms',
+    'nsIDOMHTMLDocument.cookie',
+    'nsIDOMHTMLDocument.images',
+    'nsIDOMHTMLDocument.close',
+    'nsIDOMHTMLElement.className',
+    'nsIDOMHTMLElement.id',
+    'nsIDOMHTMLElement.title',
+    'nsIDOMHTMLFormElement.elements',
+    'nsIDOMHTMLFormElement.name',
+    'nsIDOMHTMLFormElement.submit',
+    'nsIDOMHTMLFormElement.length',
+    'nsIDOMHTMLFormElement.target',
+    'nsIDOMHTMLFormElement.action',
+    'nsIDOMHTMLFrameElement.src',
+    'nsIDOMHTMLFrameElement.contentDocument',
+    'nsIDOMHTMLFrameElement.name',
+    'nsIDOMHTMLFrameSetElement.rows',
+    'nsIDOMHTMLFrameSetElement.cols',
+    'nsIDOMHTMLIFrameElement.src',
+    'nsIDOMHTMLIFrameElement.contentDocument',
+    'nsIDOMHTMLImageElement.src',
+    'nsIDOMHTMLImageElement.name',
+    'nsIDOMHTMLImageElement.height',
+    'nsIDOMHTMLImageElement.width',
+    'nsIDOMHTMLInputElement.defaultChecked',
+    'nsIDOMHTMLInputElement.disabled',
+    'nsIDOMHTMLInputElement.select',
+    'nsIDOMHTMLInputElement.checked',
+    'nsIDOMHTMLInputElement.type',
+    'nsIDOMHTMLInputElement.form',
+    'nsIDOMHTMLInputElement.src',
+    'nsIDOMHTMLInputElement.name',
+    'nsIDOMHTMLInputElement.value',
+    'nsIDOMHTMLLinkElement.disabled',
+    'nsIDOMHTMLOptionElement.index',
+    'nsIDOMHTMLOptionElement.selected',
+    'nsIDOMHTMLOptionElement.form',
+    'nsIDOMHTMLOptionElement.text',
+    'nsIDOMHTMLOptionElement.defaultSelected',
+    'nsIDOMHTMLOptionElement.value',
+    'nsIDOMHTMLOptionElement.label',
+    'nsIDOMHTMLOptionElement.disabled',
+    'nsIDOMHTMLOptionsCollection.item',
+    'nsIDOMHTMLOptionsCollection.length',
+    'nsIDOMHTMLSelectElement.name',
+    'nsIDOMHTMLSelectElement.form',
+    'nsIDOMHTMLSelectElement.add',
+    'nsIDOMHTMLSelectElement.value',
+    'nsIDOMHTMLSelectElement.disabled',
+    'nsIDOMHTMLSelectElement.length',
+    'nsIDOMHTMLSelectElement.remove',
+    'nsIDOMHTMLSelectElement.selectedIndex',
+    'nsIDOMHTMLSelectElement.type',
+    'nsIDOMHTMLSelectElement.options',
+    'nsIDOMHTMLSelectElement.size',
+    'nsIDOMHTMLStyleElement.disabled',
+    'nsIDOMHTMLTableCellElement.colSpan',
+    'nsIDOMHTMLTableCellElement.headers',
+    'nsIDOMHTMLTableCellElement.cellIndex',
+    'nsIDOMHTMLTableCellElement.rowSpan',
+    'nsIDOMHTMLTableCellElement.abbr',
+    'nsIDOMHTMLTableCellElement.scope',
+    'nsIDOMHTMLTableCellElement.noWrap',
+    'nsIDOMHTMLTableCellElement.width',
+    'nsIDOMHTMLTableColElement.span',
+    'nsIDOMHTMLTableColElement.width',
+    'nsIDOMHTMLTableElement.rows',
+    'nsIDOMHTMLTableElement.deleteRow',
+    'nsIDOMHTMLTableElement.summary',
+    'nsIDOMHTMLTableElement.insertRow',
+    'nsIDOMHTMLTableRowElement.sectionRowIndex',
+    'nsIDOMHTMLTableRowElement.rowIndex',
+    'nsIDOMHTMLTableRowElement.cells',
+    'nsIDOMHTMLTableRowElement.deleteCell',
+    'nsIDOMHTMLTableRowElement.insertCell',
+    'nsIDOMHTMLTableSectionElement.rows',
+    'nsIDOMHTMLTableSectionElement.insertRow',
+    'nsIDOMHTMLTableSectionElement.deleteRow',
+    'nsIDOMHTMLTextAreaElement.rows',
+    'nsIDOMHTMLTextAreaElement.name',
+    'nsIDOMHTMLTextAreaElement.form',
+    'nsIDOMHTMLTextAreaElement.defaultValue',
+    'nsIDOMHTMLTextAreaElement.cols',
+    'nsIDOMHTMLTextAreaElement.value',
+    'nsIDOMHTMLTextAreaElement.type',
+    'nsIDOMHTMLTextAreaElement.select',
+    'nsIDOMHTMLTitleElement.text',
+    'nsIDOMHTMLCanvasElement.width',
+    'nsIDOMHTMLCanvasElement.height',
+    'nsIDOMHTMLCanvasElement.getContext',
+    # 'nsIDOMHTMLCanvasElement.toDataURL',  # uses GetCurrentNativeCallContext
+    'nsIDOMNSHTMLAnchorElement.text',
+    'nsIDOMNSHTMLDocument.width',
+    'nsIDOMNSHTMLDocument.height',
+    'nsIDOMNSHTMLDocument.domain',
+    'nsIDOMNSHTMLDocument.getSelection',
+    'nsIDOMNSHTMLDocument.designMode',
+    #'nsIDOMNSHTMLDocument.write',  # uses GetCurrentNativeCallContext
+    #'nsIDOMNSHTMLDocument.writeln',  # uses GetCurrentNativeCallContext
+    'nsIDOMNSHTMLElement.contentEditable',
+    'nsIDOMNSHTMLElement.offsetParent',
+    'nsIDOMNSHTMLElement.innerHTML',
+    'nsIDOMNSHTMLElement.offsetLeft',
+    'nsIDOMNSHTMLElement.offsetTop',
+    'nsIDOMNSHTMLElement.offsetHeight',
+    'nsIDOMNSHTMLElement.offsetWidth',
+    'nsIDOMNSHTMLFrameElement.contentWindow',
+    'nsIDOMNSHTMLImageElement.complete',
+    'nsIDOMNSHTMLInputElement.files',
+    'nsIDOMNSHTMLInputElement.textLength',
+    'nsIDOMNSHTMLInputElement.selectionStart',
+    'nsIDOMNSHTMLInputElement.selectionEnd',
+    'nsIDOMNSHTMLInputElement.setSelectionRange',
+    'nsIDOMNSHTMLOptionCollection.selectedIndex',
+    'nsIDOMNSHTMLOptionElement.text',
+    'nsIDOMNSHTMLSelectElement.item',
+    'nsIDOMNSHTMLTextAreaElement.setSelectionRange',
+    'nsIDOMNSHTMLTextAreaElement.selectionStart',
+    'nsIDOMNSHTMLTextAreaElement.selectionEnd',
+    'nsIDOMNSHTMLTextAreaElement.textLength',
+
+    # dom/public/idl/json - None.
+    # All 4 methods of nsIJSON call GetCurrentNativeCallContext.
+
+    # dom/public/idl/offline - None.
+
+    # dom/public/idl/range
+    'nsIDOMRange.collapsed',
+
+    # dom/public/idl/sidebar - None.
+
+    # dom/public/idl/storage
+    'nsIDOMToString.toString',
+    'nsIDOMStorage.setItem',
+    'nsIDOMStorage.length',
+    'nsIDOMStorage.getItem',
+    'nsIDOMStorage.key',
+    'nsIDOMStorage.removeItem',
+    'nsIDOMStorageItem.value',
+    'nsIDOMStorageWindow.sessionStorage',
+    'nsIDOMStorageWindow.globalStorage',
+
+    # dom/public/idl/stylesheets - None.
+
+    # dom/public/idl/traversal
+    'nsIDOMDocumentTraversal.createNodeIterator',
+    'nsIDOMNodeIterator.nextNode',
+
+    # dom/public/idl/views
+    'nsIDOMDocumentView.defaultView',
+    
+    # dom/public/idl/xbl - None.
+
+    # dom/public/idl/xpath
+    'nsIDOMXPathEvaluator.evaluate',
+    'nsIDOMXPathEvaluator.createExpression',
+    'nsIDOMXPathEvaluator.createNSResolver',
+    'nsIDOMXPathExpression.evaluate',
+    'nsIDOMXPathNSResolver.lookupNamespaceURI',
+    'nsIDOMXPathResult.snapshotItem',
+    'nsIDOMXPathResult.iterateNext',
+    'nsIDOMXPathResult.snapshotLength',
+    'nsIDOMXPathResult.resultType',
+    'nsIDOMXPathResult.numberValue',
+    'nsIDOMXPathResult.stringValue',
+    'nsIDOMXPathResult.booleanValue',
+    'nsIDOMXPathResult.singleNodeValue',
+    'nsIDOMNSXPathExpression.evaluateWithContext',
+
+    # dom/public/idl/xul - None.
+    ]
+
+# Most interfaces can be found by searching the includePath; to find
+# nsIDOMEvent, for example, just look for nsIDOMEvent.idl.  But IDL filenames
+# for very long interface names are slightly abbreviated, and many interfaces
+# don't have their own files, just for extra wackiness.  So qsgen.py needs
+# a little help.
+#
+irregularFilenames = {
+    # abbreviations
+    'nsIDOMNSHTMLOptionCollection': 'nsIDOMNSHTMLOptionCollectn',
+    'nsIDOMHTMLTableSectionElement': 'nsIDOMHTMLTableSectionElem',
+    'nsIDOMHTMLTableCaptionElement': 'nsIDOMHTMLTableCaptionElem',
+    'nsIDOMSVGAnimatedEnumeration': 'nsIDOMSVGAnimatedEnum',
+    'nsIDOMSVGAnimatedPreserveAspectRatio': 'nsIDOMSVGAnimPresAspRatio',
+    'nsIDOMSVGAnimatedTransformList': 'nsIDOMSVGAnimTransformList',
+    'nsIDOMSVGForeignObjectElement': 'nsIDOMSVGForeignObjectElem',
+    'nsIDOMSVGPreserveAspectRatio': 'nsIDOMSVGPresAspectRatio',
+    'nsIDOMSVGTextPositioningElement': 'nsIDOMSVGTextPositionElem',
+    'nsIDOMXULLabeledControlElement': 'nsIDOMXULLabeledControlEl',
+    'nsIDOMXULSelectControlElement': 'nsIDOMXULSelectCntrlEl',
+    'nsIDOMXULSelectControlItemElement': 'nsIDOMXULSelectCntrlItemEl',
+    'nsIDOMXULMultiSelectControlElement': 'nsIDOMXULMultSelectCntrlEl',
+
+    # stowaways
+    'nsIXPointerResult': 'nsIXPointer',
+    'nsIDOMCanvasGradient': 'nsIDOMCanvasRenderingContext2D',
+    'nsIDOMCanvasPattern': 'nsIDOMCanvasRenderingContext2D',
+    'nsIDOMTextMetrics': 'nsIDOMCanvasRenderingContext2D',
+    'nsIGeolocationUpdate': 'nsIGeolocationProvider',
+    'nsIDOMNSCSS2Properties': 'nsIDOMCSS2Properties',
+    'nsIDOMSVGPathSegArcRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegMovetoAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegArcAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegMovetoRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoCubicRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoVerticalAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoQuadraticRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoVerticalRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoHorizontalAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoQuadraticAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoQuadraticSmoothAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoCubicSmoothRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegClosePath': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegLinetoHorizontalRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoCubicSmoothAbs': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoQuadraticSmoothRel': 'nsIDOMSVGPathSeg',
+    'nsIDOMSVGPathSegCurvetoCubicAbs': 'nsIDOMSVGPathSeg',
+
+    # mistakes
+    'nsIDOMXULTextBoxElement': 'nsIDOMXULTextboxElement',
+    'nsIDOMDOMConstructor': 'nsIDOMConstructor'
+    }
--- a/js/src/xpconnect/src/nsXPConnect.cpp
+++ b/js/src/xpconnect/src/nsXPConnect.cpp
@@ -46,16 +46,17 @@
 #include "XPCNativeWrapper.h"
 #include "nsBaseHashtable.h"
 #include "nsHashKeys.h"
 #include "jsatom.h"
 #include "jsfun.h"
 #include "jsobj.h"
 #include "jsscript.h"
 #include "nsThreadUtilsInternal.h"
+#include "dom_quickstubs.h"
 
 NS_IMPL_THREADSAFE_ISUPPORTS3(nsXPConnect,
                               nsIXPConnect,
                               nsISupportsWeakReference,
                               nsIThreadObserver)
 
 nsXPConnect* nsXPConnect::gSelf = nsnull;
 JSBool       nsXPConnect::gOnceAliveNowDead = JS_FALSE;
@@ -2057,18 +2058,18 @@ nsXPConnect::ReleaseJSContext(JSContext 
                 ccx = cur;
                 // Keep looping to find the deepest matching call context.
             }
         }
     
         if(ccx)
         {
 #ifdef DEBUG_xpc_hacker
-            printf("!xpc - deferring destruction of JSContext @ %0x\n", 
-                   aJSContext);
+            printf("!xpc - deferring destruction of JSContext @ %p\n", 
+                   (void *)aJSContext);
 #endif
             ccx->SetDestroyJSContextInDestructor(JS_TRUE);
             JS_ClearNewbornRoots(aJSContext);
             return NS_OK;
         }
         // else continue on and synchronously destroy the JSContext ...
 
         NS_ASSERTION(!tls->GetJSContextStack() || 
@@ -2321,16 +2322,29 @@ nsXPConnect::SetReportAllJSExceptions(PR
 {
     // Ignore if the environment variable was set.
     if (gReportAllJSExceptions != 1)
         gReportAllJSExceptions = newval ? 2 : 0;
 
     return NS_OK;
 }
 
+/* [noscript, notxpcom] PRBool defineDOMQuickStubs (in JSContextPtr cx, in JSObjectPtr proto, in PRUint32 flags, in PRUint32 interfaceCount, [array, size_is (interfaceCount)] in nsIIDPtr interfaceArray); */
+NS_IMETHODIMP_(PRBool)
+nsXPConnect::DefineDOMQuickStubs(JSContext * cx,
+                                 JSObject * proto,
+                                 PRUint32 flags,
+                                 PRUint32 interfaceCount,
+                                 const nsIID * *interfaceArray)
+{
+    return DOM_DefineQuickStubs(cx, proto, flags,
+                                interfaceCount, interfaceArray);
+}
+
+
 #ifdef DEBUG
 /* These are here to be callable from a debugger */
 JS_BEGIN_EXTERN_C
 void DumpJSStack()
 {
     nsresult rv;
     nsCOMPtr<nsIXPConnect> xpc(do_GetService(nsIXPConnect::GetCID(), &rv));
     if(NS_SUCCEEDED(rv) && xpc)
new file mode 100644
--- /dev/null
+++ b/js/src/xpconnect/src/qsgen.py
@@ -0,0 +1,1026 @@
+#!/usr/bin/env/python
+# qsgen.py - Generate XPConnect quick stubs.
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+#   Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Jason Orendorff <jorendorff@mozilla.com>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+# =About quick stubs=
+# qsgen.py generates "quick stubs", custom SpiderMonkey getters, setters, and
+# methods for specified XPCOM interface members.  These quick stubs serve at
+# runtime as replacements for the XPConnect functions XPC_WN_GetterSetter and
+# XPC_WN_CallMethod, which are the extremely generic (and slow) SpiderMonkey
+# getter/setter/methods otherwise used for all XPCOM member accesses from JS.
+#
+# There are two ways quick stubs win:
+#   1. Pure, transparent optimization by partial evaluation.
+#   2. Cutting corners.
+#
+# == Partial evaluation ==
+# Partial evaluation is when you execute part of a program early (before or at
+# compile time) so that you don't have to execute it at run time.  In this
+# case, everything that involves interpreting xptcall data (for example, the
+# big methodInfo loops in XPCWrappedNative::CallMethod and the switch statement
+# in XPCConert::JSData2Native) might as well happen at build time, since all
+# the type information for any given member is already known.  That's what this
+# script does.  It gets the information from IDL instead of XPT files.  Apart
+# from that, the code in this script is very similar to what you'll find in
+# XPConnect itself.  The advantage is that it runs once, at build time, not in
+# tight loops at run time.
+#
+# == Cutting corners ==
+# The XPConnect versions have to be slow because they do tons of work that's
+# only necessary in a few cases.  The quick stubs skip a lot of that work.  So
+# quick stubs necessarily differ from XPConnect in potentially observable ways.
+# For many specific interface members, the differences are not observable from
+# scripts or don't matter enough to worry about; but you do have to be careful
+# which members you decide to generate quick stubs for.
+#
+# The complete list of known differences, as of this writing, after an
+# assiduous search:
+#
+# - Quick stubs are currently always enumerable; XPConnect properties are
+#   non-enumerable if the object being wrapped has a scriptable helper with the
+#   DONT_ENUM_STATIC_PROPS flag (in nsIXPCScriptable.scriptableFlags).  (I
+#   think this is a bug that I'll have to fix.)
+#
+# - Quick stub getters and setters are JSPropertyOps-- that is, they do not use
+#   JSPROP_GETTER or JSPROP_SETTER.  This means __lookupGetter__ does not work
+#   on them.  This change is visible to scripts.
+#
+# - Quick stub methods are JSFastNative, which means that when a quick stub
+#   method is called, no JS stack frame is created.  This doesn't affect
+#   Mozilla security checks because they look for scripted JSStackFrames, not
+#   native ones.
+#
+#   It does affect the 'stack' property of JavaScript exceptions, though: the
+#   stubbed member will not appear.  (Note that if the stubbed member itself
+#   fails, the member name will appear in the 'message' property.)
+#
+# - Many quick stubs don't create an XPCCallContext.  In those cases, no entry
+#   is added to the XPCCallContext stack.  So native implementations of
+#   quick-stubbed methods must avoid nsXPConnect::GetCurrentNativeCallContext.
+#
+#   (Even when a quick stub does have an XPCCallContext, it never pushes it all
+#   the way to READY_TO_CALL state, so a lot of its members are garbage.  But
+#   this doesn't endanger native implementations of non-quick-stubbed methods
+#   that use GetCurrentNativeCallContext and are called indirectly from
+#   quick-stubbed methods, because only the current top XPCCallContext is
+#   exposed--nsAXPCNativeCallContext does not expose
+#   XPCCallContext::GetPrevCallContext.)
+#
+# - There are a few differences in how the "this" JSObject is unwrapped.
+#   Ordinarily, XPConnect searches the prototype chain of the "this" JSObject
+#   for an XPCOM object of the desired "proto".  For details, see the parts of
+#   XPCWrappedNative::GetWrappedNativeOfJSObject that use "proto".  Some quick
+#   stubs (methods, not getters or setters, that have XPCCallContexts) do this,
+#   but most instead look for an XPCOM object that supports the desired
+#   *interface*.  This is more lenient.  The difference is observable in some
+#   cases where a getter/setter/method is taken from one object and applied to
+#   another object.
+#
+#   Another notable difference in this area: Quick stubs don't support split
+#   objects.
+#
+# - Quick stubs never suspend the JS request.  So they are only suitable for
+#   main-thread-only interfaces.
+#
+# - Quick stubs don't call XPCContext::SetLastResult.  This is visible on the
+#   Components object.
+#
+# - Quick stubs skip a security check that XPConnect does in
+#   XPCWrappedNative::CallMethod.  This means the security manager doesn't have
+#   an opportunity to veto accesses to members for which quick stubs exist.
+#
+# - There are many features of IDL that XPConnect supports but qsgen does not,
+#   including dependent types, arrays, and out parameters.
+
+
+import xpidl
+import header
+import os, re
+import sys
+import sets
+
+# === Preliminaries
+
+# --makedepend-output support.
+make_dependencies = []
+make_targets = []
+
+def warn(msg):
+    sys.stderr.write(msg + '\n')
+
+def unaliasType(t):
+    while t.kind == 'typedef':
+        t = t.realtype
+    assert t is not None
+    return t
+
+def isVoidType(type):
+    """ Return True if the given xpidl type is void. """
+    return type.kind == 'builtin' and type.name == 'void'
+
+def isInterfaceType(t):
+    t = unaliasType(t)
+    assert t.kind in ('builtin', 'native', 'interface', 'forward')
+    return t.kind in ('interface', 'forward')
+
+def isSpecificInterfaceType(t, name):
+    """ True if `t` is an interface type with the given name, or a forward
+    declaration or typedef aliasing it.
+
+    `name` must not be the name of a typedef but the actual name of the
+    interface.
+    """
+    t = unaliasType(t)
+    return t.kind in ('interface', 'forward') and t.name == name
+
+def getBuiltinOrNativeTypeName(t):
+    t = unaliasType(t)
+    if t.kind == 'builtin':
+        return t.name
+    elif t.kind == 'native':
+        assert t.specialtype is not None
+        return '[%s]' % t.specialtype
+    else:
+        return None
+
+
+# === Reading the file
+
+class UserError(Exception):
+    pass
+
+def findIDL(includePath, irregularFilenames, interfaceName):
+    filename = irregularFilenames.get(interfaceName, interfaceName) + '.idl'
+    for d in includePath:
+        path = os.path.join(d, filename)
+        if os.path.exists(path):
+            return path
+    raise UserError("No IDL file found for interface %s "
+                    "in include path %r"
+                    % (interfaceName, includePath))
+
+def loadIDL(parser, includePath, filename):
+    make_dependencies.append(filename)
+    text = open(filename, 'r').read()
+    idl = parser.parse(text, filename=filename)
+    idl.resolve(includePath, parser)
+    return idl
+
+def addStubMember(memberId, member):
+    # Check that the member is ok.
+    if member.kind not in ('method', 'attribute'):
+        raise UserError("Member %s is %r, not a method or attribute."
+                        % (memberId, member.kind))
+    if member.noscript:
+        raise UserError("%s %s is noscript."
+                        % (member.kind.capitalize(), memberId))
+    if member.notxpcom:
+        raise UserError(
+            "%s %s: notxpcom methods are not supported."
+            % (member.kind.capitalize(), memberId))
+
+    if (member.kind == 'attribute'
+          and not member.readonly
+          and isSpecificInterfaceType(member.realtype, 'nsIVariant')):
+        raise UserError(
+            "Attribute %s: Non-readonly attributes of type nsIVariant "
+            "are not supported."
+            % memberId)
+
+    # Check for unknown properties.
+    for attrname, value in vars(member).items():
+        if value is True and attrname not in ('readonly',):
+            raise UserError("%s %s: unrecognized property %r"
+                            % (member.kind.capitalize(), memberId,
+                               attrname))
+    if member.kind == 'method':
+        for param in member.params:
+            for attrname, value in vars(param).items():
+                if value is True and attrname not in ('optional',):
+                    raise UserError("Method %s, parameter %s: "
+                                    "unrecognized property %r"
+                                    % (memberId, param.name, attrname))
+
+    # Add this member to the list.
+    member.iface.stubMembers.append(member)
+
+def parseMemberId(memberId):
+    """ Split the geven member id into its parts. """
+    pieces = memberId.split('.')
+    if len(pieces) < 2:
+        raise UserError("Member %r: Missing dot." % memberId)
+    if len(pieces) > 2:
+        raise UserError("Member %r: Dots out of control." % memberId)
+    return tuple(pieces)
+
+class Configuration:
+    def __init__(self, filename, includePath):
+        self.includePath = includePath
+        config = {}
+        execfile(filename, config)
+        # required settings
+        for name in ('name', 'members'):
+            if name not in config:
+                raise UserError(filename + ": `%s` was not defined." % name)
+            setattr(self, name, config[name])
+        # optional settings
+        self.irregularFilenames = config.get('irregularFilenames', {})
+
+def readConfigFile(filename, includePath, cachedir):
+    # Read the config file.
+    conf = Configuration(filename, includePath)
+
+    # Now read IDL files to connect the information in the config file to
+    # actual XPCOM interfaces, methods, and attributes.
+    interfaces = []
+    interfacesByName = {}
+    parser = xpidl.IDLParser(cachedir)
+
+    def getInterface(interfaceName, errorLoc):
+        iface = interfacesByName.get(interfaceName)
+        if iface is None:
+            idlFile = findIDL(conf.includePath, conf.irregularFilenames,
+                              interfaceName)
+            idl = loadIDL(parser, conf.includePath, idlFile)
+            if not idl.hasName(interfaceName):
+                raise UserError("The interface %s was not found "
+                                "in the idl file %r."
+                                % (interfaceName, idlFile))
+            iface = idl.getName(interfaceName, errorLoc)
+            iface.stubMembers = []
+            interfaces.append(iface)
+            interfacesByName[interfaceName] = iface
+        return iface
+
+    for memberId in conf.members:
+        interfaceName, memberName = parseMemberId(memberId)
+        iface = getInterface(interfaceName, errorLoc='looking for %r' % memberId)
+
+        if not iface.attributes.scriptable:
+            raise UserError("Interface %s is not scriptable. "
+                            "IDL file: %r." % (interfaceName, idlFile))
+
+        if memberName == '*':
+            # Stub all scriptable members of this interface.
+            for member in iface.members:
+                if member.kind in ('method', 'attribute') and not member.noscript:
+                    addStubMember(iface.name + '.' + member.name, member)
+        else:
+            # Look up a member by name.
+            if memberName not in iface.namemap:
+                idlFile = iface.idl.parser.lexer.filename
+                raise UserError("Interface %s has no member %r. "
+                                "(See IDL file %r.)"
+                                % (interfaceName, memberName, idlFile))
+            member = iface.namemap.get(memberName, None)
+            if member in iface.stubMembers:
+                raise UserError("Member %s is specified more than once."
+                                % memberId)
+            addStubMember(memberId, member)
+
+    return conf, interfaces
+
+
+# === Generating the header file
+
+def writeHeaderFile(filename, name):
+    print "Creating header file", filename
+    make_targets.append(filename)
+
+    headerMacro = '__gen_%s__' % filename.replace('.', '_')
+    f = open(filename, 'w')
+    try:
+        f.write("/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n"
+                "#ifndef " + headerMacro + "\n"
+                "#define " + headerMacro + "\n"
+                "JSBool " + name + "_DefineQuickStubs("
+                "JSContext *cx, JSObject *proto, uintN flags, "
+                "PRUint32 count, const nsID **iids);\n"
+                "#endif\n")
+    finally:
+        f.close()
+
+# === Generating the source file
+
+def substitute(template, vals):
+    """ Simple replacement for string.Template, which isn't in Python 2.3. """
+    def replacement(match):
+        return vals[match.group(1)]
+    return re.sub(r'\${(\w+)}', replacement, template)
+
+# From JSData2Native.
+argumentUnboxingTemplates = {
+    'short':
+        "    int32 ${name}_i32;\n"
+        "    if (!JS_ValueToECMAInt32(cx, ${argVal}, &${name}_i32)) ${failBlock}\n"
+        "    int16 ${name} = (int16) ${name}_i32;\n",
+
+    'unsigned short':
+        "    uint32 ${name}_u32;\n"
+        "    if (!JS_ValueToECMAUint32(cx, ${argVal}, &${name}_u32)) ${failBlock}\n"
+        "    uint16 ${name} = (uint16) ${name}_u32;\n",
+
+    'long':
+        "    int32 ${name};\n"
+        "    if (!JS_ValueToECMAInt32(cx, ${argVal}, &${name})) ${failBlock}\n",
+
+    'unsigned long':
+        "    uint32 ${name};\n"
+        "    if (!JS_ValueToECMAUint32(cx, ${argVal}, &${name})) ${failBlock}\n",
+
+    'float':
+        "    jsdouble ${name}_dbl;\n"
+        "    if (!JS_ValueToNumber(cx, ${argVal}, &${name}_dbl)) ${failBlock}\n"
+        "    float ${name} = (float) ${name}_dbl;\n",
+
+    'double':
+        "    jsdouble ${name};\n"
+        "    if (!JS_ValueToNumber(cx, ${argVal}, &${name})) ${failBlock}\n",
+
+    'boolean':
+        "    PRBool ${name};\n"
+        "    if (!JS_ValueToBoolean(cx, ${argVal}, &${name})) ${failBlock}\n",
+
+    '[astring]':
+        "    xpc_qsAString ${name}(cx, ${argPtr});\n"
+        "    if (!${name}.IsValid()) ${failBlock}\n",
+
+    '[domstring]':
+        "    xpc_qsDOMString ${name}(cx, ${argPtr});\n"
+        "    if (!${name}.IsValid()) ${failBlock}\n",
+
+    'string':
+        "    char *${name};\n"
+        "    if (!xpc_qsJsvalToCharStr(cx, ${argPtr}, &${name})) ${failBlock}\n",
+
+    'wstring':
+        "    PRUnichar *${name};\n"
+        "    if (!xpc_qsJsvalToWcharStr(cx, ${argPtr}, &${name})) ${failBlock}\n",
+
+    '[cstring]':
+        "    xpc_qsACString ${name}(cx, ${argPtr});\n"
+        "    if (!${name}.IsValid()) ${failBlock}\n"
+    }
+
+# From JSData2Native.
+#
+# Omitted optional arguments are treated as though the caller had passed JS
+# `null`; this behavior is from XPCWrappedNative::CallMethod.
+#
+def writeArgumentUnboxing(f, i, name, type, haveCcx, optional):
+    # f - file to write to
+    # i - int or None - Indicates the source jsval.  If i is an int, the source
+    #     jsval is argv[i]; otherwise it is *vp.  But if Python i >= C++ argc,
+    #     which can only happen if optional is True, the argument is missing;
+    #     use JSVAL_NULL as the source jsval instead.
+    # name - str - name of the native C++ variable to create.
+    # type - xpidl.{Interface,Native,Builtin} - IDL type of argument
+    # optional - bool - True if the parameter is optional.
+
+    isSetter = (i is None)
+
+    # Spell this out each time rather than use a goto.  The most common methods
+    # only have one parameter, and in that case the goto looks silly.
+    fail = ("        NS_RELEASE(self);\n"
+            "        return JS_FALSE;\n")
+
+    if isSetter:
+        argPtr = "vp"
+        argVal = "*vp"
+    elif optional:
+        argPtr = '!  /* TODO - optional parameter of this type not supported */'
+        argVal = "(%d < argc ? argv[%d] : JSVAL_NULL)" % (i, i)
+    else:
+        argVal = "argv[%d]" % i
+        argPtr = "&" + argVal
+
+    params = {
+        'name': name,
+        'argVal': argVal,
+        'argPtr': argPtr,
+        'failBlock': '{\n' + fail + '    }'
+        }
+
+    typeName = getBuiltinOrNativeTypeName(type)
+    if typeName is not None:
+        template = argumentUnboxingTemplates.get(typeName)
+        if template is not None:
+            if optional and ("${argPtr}" in template):
+                warn("Optional parameters of type %s are not supported."
+                     % type.name)
+            f.write(substitute(template, params))
+            return
+        # else fall through; the type isn't supported yet.
+    elif isInterfaceType(type):
+        if type.name == 'nsIVariant':
+            # Totally custom.
+            assert haveCcx
+            template = (
+                "    nsCOMPtr<nsIVariant> ${name}(already_AddRefed<nsIVariant>("
+                "XPCVariant::newVariant(ccx, ${argVal})));\n"
+                "    if (!${name}) ${failBlock}\n")
+            f.write(substitute(template, params))
+            return
+        elif type.name == 'nsIAtom':
+            # Should have special atomizing behavior.  Fall through.
+            pass
+        else:
+            f.write("    nsCOMPtr<%s> %s;\n" % (type.name, name))
+            f.write("    rv = xpc_qsUnwrapArg<%s>("
+                    "cx, %s, getter_AddRefs(%s));\n"
+                    % (type.name, argVal, name))
+            f.write("    if (NS_FAILED(rv)) {\n")
+            if isSetter:
+                f.write("        xpc_qsThrowBadSetterValue("
+                        "cx, rv, wrapper, id);\n")
+            elif haveCcx:
+                f.write("        xpc_qsThrowBadArgWithCcx(ccx, rv, %d);\n" % i)
+            else:
+                f.write("        xpc_qsThrowBadArg(cx, rv, wrapper, vp, %d);\n"
+                        % i)
+            f.write(fail);
+            f.write("    }\n")
+            return
+
+    warn("Unable to unbox argument of type %s" % type.name)
+    if i is None:
+        src = '*vp'
+    else:
+        src = 'argv[%d]' % i
+    f.write("    !; // TODO - Unbox argument %s = %s\n" % (name, src))
+
+def writeResultDecl(f, type):
+    if isVoidType(type):
+        return  # nothing to declare
+    
+    t = unaliasType(type)
+    if t.kind == 'builtin':
+        if not t.nativename.endswith('*'):
+            if type.kind == 'typedef':
+                typeName = type.name  # use it
+            else:
+                typeName = t.nativename
+            f.write("    %s result;\n" % typeName)
+            return
+    elif t.kind == 'native':
+        name = getBuiltinOrNativeTypeName(t)
+        if name in ('[domstring]', '[astring]'):
+            f.write("    nsString result;\n")
+            return
+    elif t.kind in ('interface', 'forward'):
+        f.write("    nsCOMPtr<%s> result;\n" % type.name)
+        return
+
+    warn("Unable to declare result of type %s" % type.name)
+    f.write("    !; // TODO - Declare out parameter `result`.\n")
+
+def outParamForm(name, type):
+    type = unaliasType(type)
+    if type.kind == 'builtin':
+        return '&' + name
+    elif type.kind == 'native':
+        if type.modifier == 'ref':
+            return name
+        else:
+            return '&' + name
+    else:
+        return 'getter_AddRefs(%s)' % name
+
+# From NativeData2JS.
+resultConvTemplates = {
+    'void':
+            "    ${jsvalRef} = JSVAL_VOID;\n"
+            "    return JS_TRUE;\n",
+
+    'short':
+        "    ${jsvalRef} = INT_TO_JSVAL((int32) result);\n"
+        "    return JS_TRUE;\n",
+
+    'long':
+        "    return xpc_qsInt32ToJsval(cx, result, ${jsvalPtr});\n",
+
+    'long long':
+        "    return xpc_qsInt64ToJsval(cx, result, ${jsvalPtr};\n",
+
+    'unsigned short':
+        "    ${jsvalRef} = INT_TO_JSVAL((int32) result);\n"
+        "    return JS_TRUE;\n",
+
+    'unsigned long':
+        "    return xpc_qsUint32ToJsval(cx, result, ${jsvalPtr});\n",
+
+    'unsigned long long':
+        "    return xpc_qsUint64ToJsval(cx, result, ${jsvalPtr});\n",
+
+    'float':
+        "    return JS_NewNumberValue(cx, result, ${jsvalPtr});\n",
+
+    'double':
+        "    return JS_NewNumberValue(cx, result, ${jsvalPtr});\n",
+
+    'boolean':
+        "    ${jsvalRef} = (result ? JSVAL_TRUE : JSVAL_FALSE);\n"
+        "    return JS_TRUE;\n",
+
+    '[astring]':
+        "    return xpc_qsStringToJsval(cx, result, ${jsvalPtr});\n",
+
+    '[domstring]':
+        "    return xpc_qsStringToJsval(cx, result, ${jsvalPtr});\n"
+    }    
+
+def isVariantType(t):
+    return isSpecificInterfaceType(t, 'nsIVariant')
+
+def writeResultConv(f, type, paramNum, jsvalPtr, jsvalRef):
+    """ Emit code to convert the C++ variable `result` to a jsval.
+
+    The emitted code contains a return statement; it returns JS_TRUE on
+    success, JS_FALSE on error.
+    """
+    # From NativeData2JS.
+    typeName = getBuiltinOrNativeTypeName(type)
+    if typeName is not None:
+        template = resultConvTemplates.get(typeName)
+        if template is not None:
+            values = {'jsvalRef': jsvalRef,
+                      'jsvalPtr': jsvalPtr}
+            f.write(substitute(template, values))
+            return
+        # else fall through; this type isn't supported yet
+    elif isInterfaceType(type):
+        if isVariantType(type):
+            f.write("    return xpc_qsVariantToJsval(ccx, result, %d, %s);\n"
+                    % (paramNum, jsvalPtr))
+            return
+        else:
+            f.write("    return xpc_qsXPCOMObjectToJsval(ccx, result, "
+                    "NS_GET_IID(%s), %s);\n" % (type.name, jsvalPtr))
+            return
+
+    warn("Unable to convert result of type %s" % type.name)
+    f.write("    !; // TODO - Convert `result` to jsval, store in `%s`.\n"
+            % jsvalRef)
+    f.write("    return xpc_qsThrow(cx, NS_ERROR_UNEXPECTED); // FIXME\n")
+
+def anyParamRequiresCcx(member):
+    for p in member.params:
+        if isVariantType(p.realtype):
+            return True
+    return False
+
+def writeQuickStub(f, member, stubName, isSetter=False):
+    """ Write a single quick stub (a custom SpiderMonkey getter/setter/method)
+    for the specified XPCOM interface-member. 
+    """
+    isAttr = (member.kind == 'attribute')
+    isMethod = (member.kind == 'method')
+    assert isAttr or isMethod
+    isGetter = isAttr and not isSetter
+
+    # Function prolog.
+    f.write("static JS_DLL_CALLBACK JSBool\n")
+    if isAttr:
+        # JSPropertyOp signature.
+        f.write(stubName + "(JSContext *cx, JSObject *obj, jsval id, "
+                "jsval *vp)\n")
+    else:
+        # JSFastNative.
+        f.write(stubName + "(JSContext *cx, uintN argc, jsval *vp)\n")
+    f.write("{\n")
+    f.write("    XPC_QS_ASSERT_CONTEXT_OK(cx);\n")
+
+    # For methods, compute "this".
+    if isMethod:
+        f.write("    JSObject *obj = JS_THIS_OBJECT(cx, vp);\n"
+                "    if (!obj)\n"
+                "        return JS_FALSE;\n")
+
+    # Create ccx if needed.
+    haveCcx = isMethod and (isInterfaceType(member.realtype)
+                            or anyParamRequiresCcx(member))
+    if haveCcx:
+            f.write("    XPCCallContext ccx(JS_CALLER, cx, obj, "
+                    "JSVAL_TO_OBJECT(JS_CALLEE(cx, vp)));\n")
+    else:
+        # In some cases we emit a ccx, but it does not count as
+        # "haveCcx" because it's not complete.
+        if isAttr and isInterfaceType(member.realtype):
+            f.write("    XPCCallContext ccx(JS_CALLER, cx, obj);\n")
+
+    # Get the 'self' pointer.
+    thisType = member.iface.name
+    f.write("    %s *self;\n" % thisType)
+    # Don't use FromCcx for getters or setters; the way we construct the ccx in
+    # a getter/setter causes it to find the wrong wrapper in some cases.
+    if isMethod and haveCcx:
+        f.write("    if (!xpc_qsUnwrapThisFromCcx(ccx, &self))\n"
+                "        return JS_FALSE;\n")
+    else:
+        # 'wrapper' is needed only for error messages.
+        f.write("    XPCWrappedNative *wrapper;\n"
+                "    if (!xpc_qsUnwrapThis(cx, obj, &self, &wrapper))\n"
+                "        return JS_FALSE;\n")
+
+    if isMethod:
+        # If there are any required arguments, check argc.
+        requiredArgs = len(member.params)
+        while requiredArgs and member.params[requiredArgs-1].optional:
+            requiredArgs -= 1
+        if requiredArgs:
+            f.write("    if (argc < %d) {\n" % requiredArgs)
+            f.write("        NS_RELEASE(self);\n"
+                    "        return xpc_qsThrow(cx, "
+                    "NS_ERROR_XPC_NOT_ENOUGH_ARGS);\n"
+                    "    }\n")
+
+    def pfail(msg):
+        raise UserError(
+            member.iface.name + '.' + member.name + ": "
+            "parameter " + param.name + ": " + msg)
+
+    # Convert in-parameters.
+    f.write("    nsresult rv;\n")
+    if isMethod:
+        if len(member.params) > 0:
+            f.write("    jsval *argv = JS_ARGV(cx, vp);\n")
+        for i, param in enumerate(member.params):
+            if param.iid_is is not None:
+                pfail("iid_is parameters are not supported.")
+            if param.size_is is not None:
+                pfail("size_is parameters are not supported.")
+            if param.retval:
+                pfail("Unexpected retval parameter!")
+            if param.paramtype in ('out', 'inout'):
+                pfail("Out parameters are not supported.")
+            if param.const or param.array or param.shared:
+                pfail("I am a simple caveman.")
+            # Emit code to convert this argument from jsval.
+            writeArgumentUnboxing(
+                f, i, 'arg%d' % i, param.realtype,
+                haveCcx=haveCcx,
+                optional=param.optional)
+    elif isSetter:
+        writeArgumentUnboxing(f, None, 'arg0', member.realtype,
+                              haveCcx=False, optional=False)
+
+    # Prepare out-parameter.
+    if isMethod or isGetter:
+        writeResultDecl(f, member.realtype)
+
+    # Call the method.
+    if isMethod:
+        comName = header.methodNativeName(member)
+        argv = ['arg' + str(i) for i, p in enumerate(member.params)]
+        if not isVoidType(member.realtype):
+            argv.append(outParamForm('result', member.realtype))
+        args = ', '.join(argv)
+    else:
+        comName = header.attributeNativeName(member, isGetter)
+        if isGetter:
+            args = outParamForm("result", member.realtype)
+        else:
+            args = "arg0"
+    f.write("    rv = self->%s(%s);\n" % (comName, args))
+    f.write("    NS_RELEASE(self);\n")
+
+    # Check for errors.
+    f.write("    if (NS_FAILED(rv))\n")
+    if isMethod:
+        if haveCcx:
+            f.write("        return xpc_qsThrowMethodFailedWithCcx(ccx, rv);\n")
+        else:
+            f.write("        return xpc_qsThrowMethodFailed("
+                    "cx, rv, wrapper, vp);\n")
+    else:
+        f.write("        return xpc_qsThrowGetterSetterFailed("
+                "cx, rv, wrapper, id);\n")
+
+    # Convert the return value.
+    if isMethod:
+        writeResultConv(f, member.realtype, len(member.params) + 1, 'vp', '*vp')
+    elif isGetter:
+        writeResultConv(f, member.realtype, None, 'vp', '*vp')
+    else:
+        f.write("    return JS_TRUE;\n");
+
+    # Epilog.
+    f.write("}\n\n")
+
+def writeAttrStubs(f, attr):
+    getterName = (attr.iface.name + '_'
+                  + header.attributeNativeName(attr, True))
+    writeQuickStub(f, attr, getterName)
+    if attr.readonly:
+        setterName = 'xpc_qsReadOnlySetter'
+    else:
+        setterName = (attr.iface.name + '_'
+                      + header.attributeNativeName(attr, False))
+        writeQuickStub(f, attr, setterName, isSetter=True)
+
+    ps = ('{"%s", %s, %s}'
+          % (attr.name, getterName, setterName))
+    return ps
+
+def writeMethodStub(f, method):
+    """ Write a method stub to `f`. Return an xpc_qsFunctionSpec initializer. """
+    stubName = method.iface.name + '_' + header.methodNativeName(method)
+    writeQuickStub(f, method, stubName)
+    fs = '{"%s", %s, %d}' % (method.name, stubName, len(method.params))
+    return fs
+
+def writeStubsForInterface(f, iface):
+    f.write("// === interface %s\n\n" % iface.name)
+    propspecs = []
+    funcspecs = []
+    for member in iface.stubMembers:
+        if member.kind == 'attribute':
+            ps = writeAttrStubs(f, member)
+            propspecs.append(ps)
+        elif member.kind == 'method':
+            fs = writeMethodStub(f, member)
+            funcspecs.append(fs)
+        else:
+            raise TypeError('expected attribute or method, not %r'
+                            % member.__class__.__name__)
+
+    if propspecs:
+        f.write("static const xpc_qsPropertySpec %s_properties[] = {\n"
+                % iface.name)
+        for ps in propspecs:
+            f.write("    %s,\n" % ps)
+        f.write("    {nsnull}};\n")
+    if funcspecs:
+        f.write("static const xpc_qsFunctionSpec %s_functions[] = {\n" % iface.name)
+        for fs in funcspecs:
+            f.write("    %s,\n" % fs)
+        f.write("    {nsnull}};\n")
+    f.write('\n\n')
+
+def hashIID(iid):
+    # See nsIDKey::HashCode in nsHashtable.h.
+    return int(iid[:8], 16)
+
+uuid_re = re.compile(r'^([0-9a-f]{8})-([0-9a-f]{4})-([0-9a-f]{4})-([0-9a-f]{4})-([0-9a-f]{12})$')
+
+def writeDefiner(f, conf, interfaces):
+    f.write("// === Definer\n\n")
+
+    # generate the static hash table
+    loadFactor = 0.6
+    size = int(len(interfaces) / loadFactor)
+    buckets = [[] for i in range(size)]
+    for iface in interfaces:
+        # This if-statement discards interfaces specified with
+        # "nsInterfaceName.*" that don't have any stub-able members.
+        if iface.stubMembers:
+            h = hashIID(iface.attributes.uuid)
+            buckets[h % size].append(iface)
+
+    # Calculate where each interface's entry will show up in tableData.  Where
+    # there are hash collisions, the extra entries are added at the end of the
+    # table.
+    entryIndexes = {}
+    arraySize = size
+    for i, bucket in enumerate(buckets):
+        if bucket:
+            entryIndexes[bucket[0].attributes.uuid] = i
+            for iface in bucket[1:]:
+                entryIndexes[iface.attributes.uuid] = arraySize
+                arraySize += 1
+
+    entries = ["    {{0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0}}, "
+               "nsnull, nsnull, XPC_QS_NULL_INDEX, XPC_QS_NULL_INDEX}"
+               for i in range(arraySize)]
+    for i, bucket in enumerate(buckets):
+        for j, iface in enumerate(bucket):
+            # iid field
+            uuid = iface.attributes.uuid.lower()
+            m = uuid_re.match(uuid)
+            assert m is not None
+            m0, m1, m2, m3, m4 = m.groups()
+            m3arr = ('{0x%s, 0x%s, 0x%s, 0x%s, 0x%s, 0x%s, 0x%s, 0x%s}'
+                     % (m3[0:2], m3[2:4], m4[0:2], m4[2:4],
+                        m4[4:6], m4[6:8], m4[8:10], m4[10:12]))
+            iid = ('{0x%s, 0x%s, 0x%s, %s}' % (m0, m1, m2, m3arr))
+
+            # properties field
+            properties = "nsnull"
+            for member in iface.stubMembers:
+                if member.kind == 'attribute':
+                    properties = iface.name + "_properties"
+                    break
+            functions = "nsnull"
+
+            # member field
+            for member in iface.stubMembers:
+                if member.kind == 'method':
+                    functions = iface.name + "_functions"
+                    break
+
+            # parentInterface field
+            baseName = iface.base
+            while baseName is not None:
+                piface = iface.idl.getName(baseName, None)
+                k = entryIndexes.get(piface.attributes.uuid)
+                if k is not None:
+                    parentInterface = str(k)
+                    break
+                baseName = piface.base
+            else:
+                parentInterface = "XPC_QS_NULL_INDEX"
+
+            # chain field
+            if j == len(bucket) - 1:
+                chain = "XPC_QS_NULL_INDEX"
+            else:
+                k = entryIndexes[bucket[j+1].attributes.uuid]
+                chain = str(k)
+
+            # add entry
+            entry = "    {%s, %s, %s, %s, %s}" % (
+                iid, properties, functions, parentInterface, chain)
+            entries[entryIndexes[iface.attributes.uuid]] = entry
+
+    f.write("static const xpc_qsHashEntry tableData[] = {\n")
+    f.write(",\n".join(entries))
+    f.write("\n    };\n\n")
+
+    # the definer function (entry point to this quick stubs file)
+    f.write("JSBool %s_DefineQuickStubs(" % conf.name)
+    f.write("JSContext *cx, JSObject *proto, uintN flags, PRUint32 count, "
+            "const nsID **iids)\n"
+            "{\n")
+    f.write("    return xpc_qsDefineQuickStubs("
+            "cx, proto, flags, count, iids, %d, tableData);\n" % size)
+    f.write("}\n\n\n")
+
+
+stubTopTemplate = '''\
+/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */
+#include "jsapi.h"
+#include "prtypes.h"
+#include "nsID.h"
+#include "%s"
+#include "nscore.h"
+#include "nsCOMPtr.h"
+#include "nsDependentString.h"
+#include "xpcprivate.h"  // for XPCCallContext
+#include "xpcquickstubs.h"
+
+'''
+
+def writeStubFile(filename, headerFilename, conf, interfaces):
+    print "Creating stub file", filename
+    make_targets.append(filename)
+
+    f = open(filename, 'w')
+    filesIncluded = sets.Set()
+
+    def includeType(type):
+        type = unaliasType(type)
+        if type.kind in ('builtin', 'native'):
+            return
+        file = conf.irregularFilenames.get(type.name, type.name) + '.h'
+        if file not in filesIncluded:
+            f.write('#include "%s"\n' % file)
+            filesIncluded.add(file)
+
+    def writeIncludesForMember(member):
+        assert member.kind in ('attribute', 'method')
+        includeType(member.realtype)
+        if member.kind == 'method':
+            for p in member.params:
+                includeType(p.realtype)
+
+    def writeIncludesForInterface(iface):
+        assert iface.kind == 'interface'
+        for member in iface.stubMembers:
+            writeIncludesForMember(member)
+        includeType(iface)
+
+    try:
+        f.write(stubTopTemplate % os.path.basename(headerFilename))
+        N = 256
+        for iface in interfaces:
+            writeIncludesForInterface(iface)
+        f.write("\n\n")
+        for iface in interfaces:
+            writeStubsForInterface(f, iface)
+        writeDefiner(f, conf, interfaces)
+    finally:
+        f.close()
+
+def makeQuote(filename):
+    return filename.replace(' ', '\\ ')  # enjoy!
+
+def writeMakeDependOutput(filename):
+    print "Creating makedepend file", filename
+    f = open(filename, 'w')
+    try:
+        if len(make_targets) > 0:
+            f.write("%s: \\\n" % makeQuote(make_targets[0]))
+            for filename in make_dependencies:
+                f.write('\t\t%s \\\n' % makeQuote(filename))
+            f.write('\t\t$(NULL)\n\n')
+            for filename in make_targets[1:]:
+                f.write('%s: %s\n' % (makeQuote(filename), makeQuote(make_targets[0])))
+    finally:
+        f.close()
+
+def main():
+    from optparse import OptionParser
+    o = OptionParser(usage="usage: %prog [options] configfile")
+    o.add_option('-o', "--stub-output",
+                 type='string', dest='stub_output', default=None,
+                 help="Quick stub C++ source output file", metavar="FILE")
+    o.add_option('--header-output', type='string', default=None,
+                 help="Quick stub header output file", metavar="FILE")
+    o.add_option('--makedepend-output', type='string', default=None,
+                 help="gnumake dependencies output file", metavar="FILE")
+    o.add_option('--idlpath', type='string', default='.',
+                 help="colon-separated directories to search for idl files",
+                 metavar="PATH")
+    o.add_option('--cachedir', dest='cachedir', default='',
+                 help="Directory in which to cache lex/parse tables.")
+    o.add_option("--verbose-errors", action='store_true', default=False,
+                 help="When an error happens, display the Python traceback.")
+    (options, filenames) = o.parse_args()
+
+    if len(filenames) != 1:
+        o.error("Exactly one config filename is needed.")
+    filename = filenames[0]
+
+    if options.stub_output is None:
+        if filename.endswith('.qsconf') or filename.endswith('.py'):
+            options.stub_output = filename.rsplit('.', 1)[0] + '.cpp'
+        else:
+            options.stub_output = filename + '.cpp'
+    if options.header_output is None:
+        options.header_output = re.sub(r'(\.c|\.cpp)?$', '.h',
+                                       options.stub_output)
+
+    if options.cachedir != '':
+        sys.path.append(options.cachedir)
+        if not os.path.isdir(options.cachedir):
+            os.mkdir(options.cachedir)
+
+    try:
+        includePath = options.idlpath.split(':')
+        conf, interfaces = readConfigFile(filename,
+                                          includePath=includePath,
+                                          cachedir=options.cachedir)
+        writeHeaderFile(options.header_output, conf.name)
+        writeStubFile(options.stub_output, options.header_output,
+                      conf, interfaces)
+        if options.makedepend_output is not None:
+            writeMakeDependOutput(options.makedepend_output)
+    except Exception, exc:
+        if options.verbose_errors:
+            raise
+        elif isinstance(exc, (UserError, xpidl.IDLError)):
+            warn(str(exc))
+        elif isinstance(exc, OSError):
+            warn("%s: %s" % (exc.__class__.__name__, exc))
+        else:
+            raise
+        sys.exit(1)
+
+if __name__ == '__main__':
+    main()
--- a/js/src/xpconnect/src/xpcconvert.cpp
+++ b/js/src/xpconnect/src/xpcconvert.cpp
@@ -142,18 +142,19 @@ XPCConvert::IsMethodReflectable(const XP
                                type.IsPointer(), param.IsOut()))
             return JS_FALSE;
     }
     return JS_TRUE;
 }
 
 /***************************************************************************/
 
-static JSBool
-GetISupportsFromJSObject(JSObject* obj, nsISupports** iface)
+// static
+JSBool
+XPCConvert::GetISupportsFromJSObject(JSObject* obj, nsISupports** iface)
 {
     JSClass* jsclass = STOBJ_GET_CLASS(obj);
     NS_ASSERTION(jsclass, "obj has no class");
     if(jsclass &&
        (jsclass->flags & JSCLASS_HAS_PRIVATE) &&
        (jsclass->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS))
     {
         *iface = (nsISupports*) xpc_GetJSPrivate(obj);
--- a/js/src/xpconnect/src/xpcprivate.h
+++ b/js/src/xpconnect/src/xpcprivate.h
@@ -2716,16 +2716,17 @@ public:
                                                  void** dest, JSObject* src,
                                                  const nsID* iid, 
                                                  nsresult* pErr);
     static JSBool JSObject2NativeInterface(XPCCallContext& ccx,
                                            void** dest, JSObject* src,
                                            const nsID* iid,
                                            nsISupports* aOuter,
                                            nsresult* pErr);
+    static JSBool GetISupportsFromJSObject(JSObject* obj, nsISupports** iface);
 
     /**
      * Convert a native array into a jsval.
      *
      * @param ccx the context for the whole procedure
      * @param d [out] the resulting jsval
      * @param s the native array we're working with
      * @param type the type of objects in the array
@@ -2822,23 +2823,24 @@ public:
 #ifdef XPC_IDISPATCH_SUPPORT
     static void ThrowCOMError(JSContext* cx, unsigned long COMErrorCode, 
                               nsresult rv = NS_ERROR_XPC_COM_ERROR,
                               const EXCEPINFO * exception = nsnull);
 #endif
     static JSBool SetVerbosity(JSBool state)
         {JSBool old = sVerbose; sVerbose = state; return old;}
 
+    static void BuildAndThrowException(JSContext* cx, nsresult rv, const char* sz);
+    static JSBool CheckForPendingException(nsresult result, JSContext *cx);
+
 private:
     static void Verbosify(XPCCallContext& ccx,
                           char** psz, PRBool own);
 
-    static void BuildAndThrowException(JSContext* cx, nsresult rv, const char* sz);
     static JSBool ThrowExceptionObject(JSContext* cx, nsIException* e);
-    static JSBool CheckForPendingException(nsresult result, XPCCallContext &ccx);
 
 private:
     static JSBool sVerbose;
 };
 
 
 /***************************************************************************/
 
@@ -3198,16 +3200,19 @@ public:
         {return ++mWrappedNativeThreadsafetyReportDepth;}
     void      ClearWrappedNativeThreadsafetyReportDepth()
         {mWrappedNativeThreadsafetyReportDepth = 0;}
 #endif
 
     static void ShutDown()
         {sMainJSThread = nsnull; sMainThreadData = nsnull;}
 
+    static PRBool IsMainThread(JSContext *cx)
+        { return cx->thread == sMainJSThread; }
+
 private:
     XPCPerThreadData();
     static XPCPerThreadData* GetDataImpl(JSContext *cx);
 
 private:
     XPCJSContextStack*   mJSContextStack;
     XPCPerThreadData*    mNextThread;
     XPCCallContext*      mCallContext;
@@ -3228,18 +3233,16 @@ private:
     JSUint32             mWrappedNativeThreadsafetyReportDepth;
 #endif
     PRThread*            mThread;
 
     static PRLock*           gLock;
     static XPCPerThreadData* gThreads;
     static PRUintn           gTLSIndex;
 
-    friend class AutoJSSuspendNonMainThreadRequest;
-
     // Cached value of cx->thread on the main thread. 
     static void *sMainJSThread;
 
     // Cached per thread data for the main thread. Only safe to access
     // if cx->thread == sMainJSThread.
     static XPCPerThreadData *sMainThreadData;
 };
 
@@ -3568,17 +3571,17 @@ public:
         if (mCX) {
             JS_ResumeRequest(mCX, mDepth);
             mCX = nsnull;
         }
     }
 
 private:
     void SuspendRequest() {
-        if (mCX && mCX->thread != XPCPerThreadData::sMainJSThread)
+        if (mCX && XPCPerThreadData::IsMainThread(mCX))
             mDepth = JS_SuspendRequest(mCX);
         else
             mCX = nsnull;
     }
 
     JSContext *mCX;
     jsrefcount mDepth;
 };
new file mode 100644
--- /dev/null
+++ b/js/src/xpconnect/src/xpcquickstubs.cpp
@@ -0,0 +1,799 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ *   Mozilla Foundation
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Jason Orendorff <jorendorff@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsapi.h"
+#include "jsobj.h"
+#include "jsstr.h"
+#include "jscntxt.h"  /* for error messages */
+#include "nsCOMPtr.h"
+#include "xpcprivate.h"
+#include "xpcinlines.h"
+#include "xpcquickstubs.h"
+#include "XPCWrapper.h"
+#include "XPCNativeWrapper.h"
+
+static const xpc_qsHashEntry *
+LookupEntry(PRUint32 tableSize, const xpc_qsHashEntry *table, const nsID &iid)
+{
+    size_t i;
+    const xpc_qsHashEntry *p;
+
+    i = iid.m0 % tableSize;
+    do
+    {
+        p = table + i;
+        if(p->iid.Equals(iid))
+            return p;
+        i = p->chain;
+    } while(i != XPC_QS_NULL_INDEX);
+    return nsnull;
+}
+
+static const xpc_qsHashEntry *
+LookupInterfaceOrAncestor(PRUint32 tableSize, const xpc_qsHashEntry *table,
+                          const nsID &iid)
+{
+    const xpc_qsHashEntry *p = LookupEntry(tableSize, table, iid);
+    if(!p)
+    {
+        /*
+         * On a miss, we have to search for every interface the object
+         * supports, including ancestors.
+         */
+        nsCOMPtr<nsIInterfaceInfo> info;
+        if(NS_FAILED(nsXPConnect::GetXPConnect()->GetInfoForIID(
+                          &iid, getter_AddRefs(info))))
+            return nsnull;
+
+        nsIID *piid;
+        for(;;)
+        {
+            nsCOMPtr<nsIInterfaceInfo> parent;
+            if(NS_FAILED(info->GetParent(getter_AddRefs(parent))) ||
+               !parent ||
+               NS_FAILED(parent->GetInterfaceIID(&piid)))
+            {
+                break;
+            }
+            p = LookupEntry(tableSize, table, *piid);
+            if(p)
+                break;
+            info.swap(parent);
+        }
+    }
+    return p;
+}
+
+JSBool
+xpc_qsDefineQuickStubs(JSContext *cx, JSObject *proto, uintN flags,
+                       PRUint32 ifacec, const nsIID **interfaces,
+                       PRUint32 tableSize, const xpc_qsHashEntry *table)
+{
+    for(uint32 i = 0; i < ifacec; i++)
+    {
+        const nsID &iid = *interfaces[i];
+        const xpc_qsHashEntry *entry =
+            LookupInterfaceOrAncestor(tableSize, table, iid);
+
+        if(entry)
+        {
+            for(;;)
+            {
+                // Define quick stubs for attributes.
+                const xpc_qsPropertySpec *ps = entry->properties;
+                if(ps)
+                {
+                    for(; ps->name; ps++)
+                    {
+                        if(!JS_DefineProperty(cx, proto, ps->name, JSVAL_VOID,
+                                              ps->getter, ps->setter,
+                                              flags | JSPROP_SHARED))
+                            return JS_FALSE;
+                    }
+                }
+
+                // Define quick stubs for methods.
+                const xpc_qsFunctionSpec *fs = entry->functions;
+                if(fs)
+                {
+                    for(; fs->name; fs++)
+                    {
+                        if(!JS_DefineFunction(
+                               cx, proto, fs->name,
+                               reinterpret_cast<JSNative>(fs->native),
+                               fs->arity, flags | JSFUN_FAST_NATIVE))
+                            return JS_FALSE;
+                    }
+                }
+
+                // Next.
+                size_t j = entry->parentInterface;
+                if(j == XPC_QS_NULL_INDEX)
+                    break;
+                entry = table + j;
+            }
+        }
+    }
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsThrow(JSContext *cx, nsresult rv)
+{
+    XPCThrower::Throw(rv, cx);
+    return JS_FALSE;
+}
+
+/**
+ * Get the interface name and member name (for error messages).
+ *
+ * We could instead have each quick stub pass its name to the error-handling
+ * functions, as that name is statically known.  But that would be redundant;
+ * the information is handy at runtime anyway.  Also, this code often produces
+ * a more specific error message, e.g. "[nsIDOMHTMLDocument.appendChild]"
+ * rather than "[nsIDOMNode.appendChild]".
+ */
+static void
+GetMemberInfo(XPCWrappedNative *wrapper,
+              jsval memberId,
+              const char **ifaceName,
+              const char **memberName)
+{
+    // Get the interface name.  From DefinePropertyIfFound (in
+    // xpcwrappednativejsops.cpp) and XPCThrower::Verbosify.
+    //
+    // We could instead make the quick stub could pass in its interface name,
+    // but this code often produces a more specific error message, e.g.
+    *ifaceName = "Unknown";
+    XPCWrappedNativeProto *proto = wrapper->GetProto();
+    if(proto)
+    {
+        XPCNativeSet *set = proto->GetSet();
+        if(set)
+        {
+            XPCNativeMember *member;
+            XPCNativeInterface *iface;
+
+            if(set->FindMember(memberId, &member, &iface))
+                *ifaceName = iface->GetNameString();
+        }
+    }
+
+    *memberName = (JSVAL_IS_STRING(memberId)
+                   ? JS_GetStringBytes(JSVAL_TO_STRING(memberId))
+                   : "unknown");
+}
+
+static void
+GetMethodInfo(JSContext *cx,
+              XPCWrappedNative *wrapper,
+              jsval *vp,
+              const char **ifaceName,
+              const char **memberName)
+{
+    JSObject *funobj = JSVAL_TO_OBJECT(JS_CALLEE(cx, vp));
+    NS_ASSERTION(JS_ObjectIsFunction(cx, funobj),
+                 "JSFastNative callee should be Function object");
+    JSString *str = JS_GetFunctionId((JSFunction *) JS_GetPrivate(cx, funobj));
+    jsval methodId = str ? STRING_TO_JSVAL(str) : JSVAL_NULL;
+
+    GetMemberInfo(wrapper, methodId, ifaceName, memberName);
+}
+
+static JSBool
+ThrowCallFailed(JSContext *cx, nsresult rv,
+                const char *ifaceName, const char *memberName)
+{
+    // From XPCThrower::ThrowBadResult.
+    char* sz;
+    const char* format;
+    const char* name;
+
+    /*
+     *  If there is a pending exception when the native call returns and
+     *  it has the same error result as returned by the native call, then
+     *  the native call may be passing through an error from a previous JS
+     *  call. So we'll just throw that exception into our JS.
+     */
+    if(XPCThrower::CheckForPendingException(rv, cx))
+        return JS_FALSE;
+
+    // else...
+
+    if(!nsXPCException::NameAndFormatForNSResult(
+            NS_ERROR_XPC_NATIVE_RETURNED_FAILURE, nsnull, &format) ||
+        !format)
+    {
+        format = "";
+    }
+
+    if(nsXPCException::NameAndFormatForNSResult(rv, &name, nsnull)
+        && name)
+    {
+        sz = JS_smprintf("%s 0x%x (%s) [%s.%s]",
+                         format, rv, name, ifaceName, memberName);
+    }
+    else
+    {
+        sz = JS_smprintf("%s 0x%x [%s.%s]",
+                         format, rv, ifaceName, memberName);
+    }
+
+    XPCThrower::BuildAndThrowException(cx, rv, sz);
+
+    if(sz)
+        JS_smprintf_free(sz);
+
+    return JS_FALSE;
+}
+
+JSBool
+xpc_qsThrowGetterSetterFailed(JSContext *cx, nsresult rv,
+                              XPCWrappedNative *wrapper, jsval memberId)
+{
+    const char *ifaceName, *memberName;
+    GetMemberInfo(wrapper, memberId, &ifaceName, &memberName);
+    return ThrowCallFailed(cx, rv, ifaceName, memberName);
+}
+
+JSBool
+xpc_qsThrowMethodFailed(JSContext *cx, nsresult rv,
+                        XPCWrappedNative *wrapper, jsval *vp)
+{
+    const char *ifaceName, *memberName;
+    GetMethodInfo(cx, wrapper, vp, &ifaceName, &memberName);
+    return ThrowCallFailed(cx, rv, ifaceName, memberName);
+}
+
+JSBool
+xpc_qsThrowMethodFailedWithCcx(XPCCallContext &ccx, nsresult rv)
+{
+    ThrowBadResult(rv, ccx);
+    return JS_FALSE;
+}
+
+static void
+ThrowBadArg(JSContext *cx, nsresult rv,
+            const char *ifaceName, const char *memberName, uintN paramnum)
+{
+    // From XPCThrower::ThrowBadParam.
+    char* sz;
+    const char* format;
+
+    if(!nsXPCException::NameAndFormatForNSResult(rv, nsnull, &format))
+        format = "";
+
+    sz = JS_smprintf("%s arg %u [%s.%s]",
+                     format, (unsigned int) paramnum, ifaceName, memberName);
+
+    XPCThrower::BuildAndThrowException(cx, rv, sz);
+
+    if(sz)
+        JS_smprintf_free(sz);
+}
+
+void
+xpc_qsThrowBadArg(JSContext *cx, nsresult rv,
+                  XPCWrappedNative *wrapper, jsval *vp, uintN paramnum)
+{
+    const char *ifaceName, *memberName;
+    GetMethodInfo(cx, wrapper, vp, &ifaceName, &memberName);
+    ThrowBadArg(cx, rv, ifaceName, memberName, paramnum);
+}
+
+void
+xpc_qsThrowBadArgWithCcx(XPCCallContext &ccx, nsresult rv, uintN paramnum)
+{
+    XPCThrower::ThrowBadParam(rv, paramnum, ccx);
+}
+
+void
+xpc_qsThrowBadSetterValue(JSContext *cx, nsresult rv,
+                          XPCWrappedNative *wrapper, jsval propId)
+{
+    const char *ifaceName, *memberName;
+    GetMemberInfo(wrapper, propId, &ifaceName, &memberName);
+    ThrowBadArg(cx, rv, ifaceName, memberName, 0);
+}
+
+xpc_qsDOMString::xpc_qsDOMString(JSContext *cx, jsval *pval)
+{
+    // From the T_DOMSTRING case in XPCConvert::JSData2Native.
+    typedef implementation_type::char_traits traits;
+    jsval v;
+    JSString *s;
+    const jschar *chars;
+    size_t len;
+
+    v = *pval;
+    if(JSVAL_IS_STRING(v))
+    {
+        s = JSVAL_TO_STRING(v);
+    }
+    else
+    {
+        if(JSVAL_IS_NULL(v))
+        {
+            (new(mBuf) implementation_type(
+                traits::sEmptyBuffer, PRUint32(0)))->SetIsVoid(PR_TRUE);
+            mValid = JS_TRUE;
+            return;
+        }
+
+        s = JS_ValueToString(cx, v);
+        if(!s)
+        {
+            mValid = JS_FALSE;
+            return;
+        }
+        *pval = STRING_TO_JSVAL(s);  // Root the new string.
+    }
+
+    len = JS_GetStringLength(s);
+    chars = (len == 0 ? traits::sEmptyBuffer : JS_GetStringChars(s));
+    new(mBuf) implementation_type(chars, len);
+    mValid = JS_TRUE;
+}
+
+xpc_qsAString::xpc_qsAString(JSContext *cx, jsval *pval)
+{
+    // From the T_ASTRING case in XPCConvert::JSData2Native.
+    typedef implementation_type::char_traits traits;
+    jsval v;
+    JSString *s;
+    const jschar *chars;
+    size_t len;
+
+    v = *pval;
+    if(JSVAL_IS_STRING(v))
+    {
+        s = JSVAL_TO_STRING(v);
+    }
+    else
+    {
+        if(JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+        {
+            (new(mBuf) implementation_type(
+                traits::sEmptyBuffer, PRUint32(0)))->SetIsVoid(PR_TRUE);
+            mValid = JS_TRUE;
+            return;
+        }
+
+        s = JS_ValueToString(cx, v);
+        if(!s)
+        {
+            mValid = JS_FALSE;
+            return;
+        }
+        *pval = STRING_TO_JSVAL(s);  // Root the new string.
+    }
+
+    len = JS_GetStringLength(s);
+    chars = (len == 0 ? traits::sEmptyBuffer : JS_GetStringChars(s));
+    new(mBuf) implementation_type(chars, len);
+    mValid = JS_TRUE;
+}
+
+xpc_qsACString::xpc_qsACString(JSContext *cx, jsval *pval)
+{
+    // From the T_CSTRING case in XPCConvert::JSData2Native.
+    jsval v;
+    JSString *s;
+
+    v = *pval;
+    if(JSVAL_IS_STRING(v))
+    {
+        s = JSVAL_TO_STRING(v);
+    }
+    else
+    {
+        if(JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+        {
+            (new(mBuf) implementation_type())->SetIsVoid(PR_TRUE);
+            mValid = JS_TRUE;
+            return;
+        }
+
+        s = JS_ValueToString(cx, v);
+        if(!s)
+        {
+            mValid = JS_FALSE;
+            return;
+        }
+        *pval = STRING_TO_JSVAL(s);  // Root the new string.
+    }
+
+    const char *bytes = JS_GetStringBytes(s);
+    size_t len = JS_GetStringLength(s);
+    new(mBuf) implementation_type(bytes, len);
+    mValid = JS_TRUE;
+}
+
+JSBool
+xpc_qsUnwrapThisImpl(JSContext *cx,
+                     JSObject *obj,
+                     const nsIID &iid,
+                     void **ppThis,
+                     XPCWrappedNative **ppWrapper)
+{
+    // From XPCWrappedNative::GetWrappedNativeOfJSObject.
+    //
+    // Usually IS_WRAPPER_CLASS is true the first time through the while loop,
+    // and the QueryInterface then succeeds.
+
+    NS_ASSERTION(obj, "this == null");
+
+    JSObject *cur = obj;
+    while(cur)
+    {
+        JSClass *clazz;
+        XPCWrappedNative *wrapper;
+        nsISupports *idobj;
+        nsresult rv;
+
+        clazz = STOBJ_GET_CLASS(cur);
+        if(IS_WRAPPER_CLASS(clazz))
+        {
+            wrapper = (XPCWrappedNative*) xpc_GetJSPrivate(cur);
+            NS_ASSERTION(wrapper, "XPCWN wrapping nothing");
+        }
+        else if(clazz == &XPC_WN_Tearoff_JSClass)
+        {
+            wrapper = (XPCWrappedNative*) xpc_GetJSPrivate(STOBJ_GET_PARENT(cur));
+            NS_ASSERTION(wrapper, "XPCWN wrapping nothing");
+        }
+        else if(clazz == &sXPC_XOW_JSClass.base)
+        {
+            JSObject *unsafeObj = XPCWrapper::Unwrap(cx, cur);
+            if(unsafeObj)
+            {
+                cur = unsafeObj;
+                continue;
+            }
+
+            // This goto is a bug, dutifully copied from
+            // XPCWrappedNative::GetWrappedNativeOfJSObject.
+            goto next;
+        }
+        else if(XPCNativeWrapper::IsNativeWrapperClass(clazz))
+        {
+            wrapper = XPCNativeWrapper::GetWrappedNative(cur);
+            NS_ASSERTION(wrapper, "XPCNativeWrapper wrapping nothing");
+        }
+        else if(IsXPCSafeJSObjectWrapperClass(clazz))
+        {
+            cur = STOBJ_GET_PARENT(cur);
+            NS_ASSERTION(cur, "SJOW wrapping nothing");
+            continue;
+        }
+        else {
+            goto next;
+        }
+
+        idobj = wrapper->GetIdentityObject();
+        rv = idobj->QueryInterface(iid, ppThis);
+        if(NS_SUCCEEDED(rv))
+        {
+            *ppWrapper = wrapper;
+            return JS_TRUE;
+        }
+        if(rv != NS_ERROR_NO_INTERFACE)
+            return xpc_qsThrow(cx, rv);
+
+    next:
+        cur = STOBJ_GET_PROTO(cur);
+    }
+
+    // If we didn't find a wrapper using the given obj, try again with obj's
+    // outer object, if it's got one.
+
+    JSClass *clazz = STOBJ_GET_CLASS(obj);
+
+    if((clazz->flags & JSCLASS_IS_EXTENDED) &&
+        ((JSExtendedClass*)clazz)->outerObject)
+    {
+        JSObject *outer = ((JSExtendedClass*)clazz)->outerObject(cx, obj);
+
+        // Protect against infinite recursion through XOWs.
+        JSObject *unsafeObj;
+        clazz = STOBJ_GET_CLASS(outer);
+        if(clazz == &sXPC_XOW_JSClass.base &&
+           (unsafeObj = XPCWrapper::Unwrap(cx, outer)))
+        {
+            outer = unsafeObj;
+        }
+
+        if(outer && outer != obj)
+            return xpc_qsUnwrapThisImpl(cx, outer, iid, ppThis, ppWrapper);
+    }
+
+    return xpc_qsThrow(cx, NS_ERROR_XPC_BAD_OP_ON_WN_PROTO);
+}
+
+JSBool
+xpc_qsUnwrapThisFromCcxImpl(XPCCallContext &ccx,
+                            const nsIID &iid,
+                            void **ppThis)
+{
+    XPCWrappedNative *wrapper = ccx.GetWrapper();
+    if(!wrapper)
+        return xpc_qsThrow(ccx.GetJSContext(), NS_ERROR_XPC_BAD_OP_ON_WN_PROTO);
+    if(!wrapper->IsValid())
+        return xpc_qsThrow(ccx.GetJSContext(), NS_ERROR_XPC_HAS_BEEN_SHUTDOWN);
+
+    nsISupports *idobj = wrapper->GetIdentityObject();
+    nsresult rv = idobj->QueryInterface(iid, ppThis);
+    if(NS_FAILED(rv))
+        return xpc_qsThrow(ccx.GetJSContext(), rv);
+    return JS_TRUE;
+}
+
+nsresult
+xpc_qsUnwrapArgImpl(JSContext *cx,
+                    jsval v,
+                    const nsIID &iid,
+                    void **ppArg)
+{
+    // From XPCConvert::JSData2Native
+    if(JSVAL_IS_VOID(v) || JSVAL_IS_NULL(v))
+        return NS_OK;
+
+    if(!JSVAL_IS_OBJECT(v))
+    {
+        return ((JSVAL_IS_INT(v) && JSVAL_TO_INT(v) == 0)
+                ? NS_ERROR_XPC_BAD_CONVERT_JS_ZERO_ISNOT_NULL
+                : NS_ERROR_XPC_BAD_CONVERT_JS);
+    }
+    JSObject *src = JSVAL_TO_OBJECT(v);
+
+    // From XPCConvert::JSObject2NativeInterface
+    XPCWrappedNative* wrappedNative =
+        XPCWrappedNative::GetWrappedNativeOfJSObject(cx, src);
+    nsISupports *iface;
+    if(wrappedNative)
+    {
+        iface = wrappedNative->GetIdentityObject();
+        if(NS_FAILED(iface->QueryInterface(iid, ppArg)))
+            return NS_ERROR_XPC_BAD_CONVERT_JS;
+        return NS_OK;
+    }
+    // else...
+    // Slow path.
+
+    // XXX E4X breaks the world. Don't try wrapping E4X objects!
+    // This hack can be removed (or changed accordingly) when the
+    // DOM <-> E4X bindings are complete, see bug 270553
+    if(JS_TypeOfValue(cx, OBJECT_TO_JSVAL(src)) == JSTYPE_XML)
+        return NS_ERROR_XPC_BAD_CONVERT_JS;
+
+    // Does the JSObject have 'nsISupportness'?
+    // XXX hmm, I wonder if this matters anymore with no
+    // oldstyle DOM objects around.
+    if(XPCConvert::GetISupportsFromJSObject(src, &iface))
+    {
+        if(!iface || NS_FAILED(iface->QueryInterface(iid, ppArg)))
+            return NS_ERROR_XPC_BAD_CONVERT_JS;
+        return NS_OK;
+    }
+
+    // Create the ccx needed for quick stubs.
+    XPCCallContext ccx(JS_CALLER, cx);
+    if(!ccx.IsValid())
+        return NS_ERROR_XPC_BAD_CONVERT_JS;
+
+    nsXPCWrappedJS *wrapper;
+    nsresult rv =
+        nsXPCWrappedJS::GetNewOrUsed(ccx, src, iid, nsnull, &wrapper);
+    if(NS_FAILED(rv) || !wrapper)
+        return rv;
+
+    // We need to go through the QueryInterface logic to make this return
+    // the right thing for the various 'special' interfaces; e.g.
+    // nsIPropertyBag. We must use AggregatedQueryInterface in cases where
+    // there is an outer to avoid nasty recursion.
+    rv = wrapper->QueryInterface(iid, ppArg);
+    NS_RELEASE(wrapper);
+    return rv;
+}
+
+JSBool
+xpc_qsJsvalToCharStr(JSContext *cx, jsval *pval, char **pstr)
+{
+    jsval v = *pval;
+    JSString *str;
+
+    if(JSVAL_IS_STRING(v))
+    {
+        str = JSVAL_TO_STRING(v);
+    }
+    else if(JSVAL_IS_VOID(v) || JSVAL_IS_NULL(v))
+    {
+        *pstr = NULL;
+        return JS_TRUE;
+    }
+    else
+    {
+        if(!(str = JS_ValueToString(cx, v)))
+            return JS_FALSE;
+        *pval = STRING_TO_JSVAL(str);  // Root the new string.
+    }
+
+    *pstr = JS_GetStringBytes(str);
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsJsvalToWcharStr(JSContext *cx, jsval *pval, PRUnichar **pstr)
+{
+    jsval v = *pval;
+    JSString *str;
+
+    if(JSVAL_IS_STRING(v))
+    {
+        str = JSVAL_TO_STRING(v);
+    }
+    else if(JSVAL_IS_VOID(v) || JSVAL_IS_NULL(v))
+    {
+        *pstr = NULL;
+        return JS_TRUE;
+    }
+    else
+    {
+        if(!(str = JS_ValueToString(cx, v)))
+            return JS_FALSE;
+        *pval = STRING_TO_JSVAL(str);  // Root the new string.
+    }
+
+    *pstr = JS_GetStringChars(str);
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsStringToJsval(JSContext *cx, const nsAString &str, jsval *rval)
+{
+    // From the T_DOMSTRING case in XPCConvert::NativeData2JS.
+    if(str.IsVoid())
+    {
+        *rval = JSVAL_NULL;
+        return JS_TRUE;
+    }
+
+    JSString *jsstr = XPCStringConvert::ReadableToJSString(cx, str);
+    if(!jsstr)
+        return JS_FALSE;
+    *rval = STRING_TO_JSVAL(jsstr);
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsXPCOMObjectToJsval(XPCCallContext &ccx, nsISupports *p,
+                         const nsIID &iid, jsval *rval)
+{
+    // From the T_INTERFACE case in XPCConvert::NativeData2JS.
+    // This is one of the slowest things quick stubs do.
+
+    JSObject *scope = ccx.GetCurrentJSObject();
+    NS_ASSERTION(scope, "bad ccx");
+
+    // XXX The OBJ_IS_NOT_GLOBAL here is not really right. In
+    // fact, this code is depending on the fact that the
+    // global object will not have been collected, and
+    // therefore this NativeInterface2JSObject will not end up
+    // creating a new XPCNativeScriptableShared.
+    nsCOMPtr<nsIXPConnectJSObjectHolder> holder;
+    nsresult rv;
+    if(!XPCConvert::NativeInterface2JSObject(ccx, getter_AddRefs(holder),
+                                              p, &iid, scope, PR_TRUE,
+                                              OBJ_IS_NOT_GLOBAL, &rv))
+    {
+        // I can't tell if NativeInterface2JSObject throws JS exceptions
+        // or not.  This is a sloppy stab at the right semantics; the
+        // method really ought to be fixed to behave consistently.
+        if(!JS_IsExceptionPending(ccx))
+            xpc_qsThrow(ccx, NS_FAILED(rv) ? rv : NS_ERROR_UNEXPECTED);
+        return JS_FALSE;
+    }
+
+    if(holder)
+    {
+        JSObject* jsobj;
+        if(NS_FAILED(holder->GetJSObject(&jsobj)))
+            return JS_FALSE;
+#ifdef DEBUG
+        if(!STOBJ_GET_PARENT(jsobj))
+            NS_ASSERTION(STOBJ_GET_CLASS(jsobj)->flags & JSCLASS_IS_GLOBAL,
+                         "Why did we recreate this wrapper?");
+#endif
+        *rval = OBJECT_TO_JSVAL(jsobj);
+    }
+    else
+    {
+        *rval = JSVAL_NULL;
+    }
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsVariantToJsval(XPCCallContext &ccx,
+                     nsIVariant *p,
+                     uintN paramNum,
+                     jsval *rval)
+{
+    // From the T_INTERFACE case in XPCConvert::NativeData2JS.
+    // Error handling is in XPCWrappedNative::CallMethod.
+    if(p)
+    {
+        nsresult rv;
+        JSBool ok = XPCVariant::VariantDataToJS(ccx, p,
+                                                ccx.GetCurrentJSObject(),
+                                                &rv, rval);
+        if (!ok)
+            XPCThrower::ThrowBadParam(rv, 0, ccx);
+        return ok;
+    }
+    *rval = JSVAL_NULL;
+    return JS_TRUE;
+}
+
+JSBool
+xpc_qsReadOnlySetter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+    JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+                         JSMSG_GETTER_ONLY, NULL);
+    return JS_FALSE;
+}
+
+#ifdef DEBUG
+void
+xpc_qsAssertContextOK(JSContext *cx)
+{
+    XPCPerThreadData *thread = XPCPerThreadData::GetData(cx);
+    XPCJSContextStack* stack = thread->GetJSContextStack();
+
+    JSContext* topJSContext = nsnull;
+    nsresult rv = stack->Peek(&topJSContext);
+    NS_ASSERTION(NS_SUCCEEDED(rv), "XPCJSContextStack::Peek failed");
+
+    // This is what we're actually trying to assert here.
+    NS_ASSERTION(cx == topJSContext, "wrong context on XPCJSContextStack!");
+
+    NS_ASSERTION(XPCPerThreadData::IsMainThread(cx),
+                 "XPConnect quick stub called on non-main thread");
+}
+#endif
new file mode 100644
--- /dev/null
+++ b/js/src/xpconnect/src/xpcquickstubs.h
@@ -0,0 +1,368 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ *   Mozilla Foundation.
+ * Portions created by the Initial Developer are Copyright (C) 2008
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Jason Orendorff <jorendorff@mozilla.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef xpcquickstubs_h___
+#define xpcquickstubs_h___
+
+/* xpcquickstubs.h - Support functions used only by quick stubs. */
+
+class XPCCallContext;
+
+#define XPC_QS_NULL_INDEX  ((size_t) -1)
+
+struct xpc_qsPropertySpec {
+    const char *name;
+    JSPropertyOp getter;
+    JSPropertyOp setter;
+};
+
+struct xpc_qsFunctionSpec {
+    const char *name;
+    JSFastNative native;
+    uintN arity;
+};
+
+/** A table mapping interfaces to quick stubs. */
+struct xpc_qsHashEntry {
+    nsID iid;
+    const xpc_qsPropertySpec *properties;
+    const xpc_qsFunctionSpec *functions;
+    // These last two fields index to other entries in the same table.
+    // XPC_QS_NULL_ENTRY indicates there are no more entries in the chain.
+    size_t parentInterface;
+    size_t chain;
+};
+
+JSBool
+xpc_qsDefineQuickStubs(JSContext *cx, JSObject *proto, uintN extraFlags,
+                       PRUint32 ifacec, const nsIID **interfaces,
+                       PRUint32 tableSize, const xpc_qsHashEntry *table);
+
+/** Raise an exception on @a cx and return JS_FALSE. */
+JSBool
+xpc_qsThrow(JSContext *cx, nsresult rv);
+
+/** Elaborately fail after an XPCOM method returned rv. */
+JSBool
+xpc_qsThrowGetterSetterFailed(JSContext *cx, nsresult rv,
+                              XPCWrappedNative *wrapper, jsval memberId);
+
+JSBool
+xpc_qsThrowMethodFailed(JSContext *cx, nsresult rv,
+                        XPCWrappedNative *wrapper, jsval *vp);
+
+JSBool
+xpc_qsThrowMethodFailedWithCcx(XPCCallContext &ccx, nsresult rv);
+
+/** Elaborately fail after converting an argument fails. */
+void
+xpc_qsThrowBadArg(JSContext *cx, nsresult rv,
+                  XPCWrappedNative *wrapper, jsval *vp, uintN paramnum);
+
+void
+xpc_qsThrowBadArgWithCcx(XPCCallContext &ccx, nsresult rv, uintN paramnum);
+
+void
+xpc_qsThrowBadSetterValue(JSContext *cx, nsresult rv,
+                          XPCWrappedNative *wrapper, jsval propId);
+
+
+/* Functions for converting values between COM and JS. */
+
+inline JSBool
+xpc_qsInt32ToJsval(JSContext *cx, PRInt32 i, jsval *rv)
+{
+    if(INT_FITS_IN_JSVAL(i))
+    {
+        *rv = INT_TO_JSVAL(i);
+        return JS_TRUE;
+    }
+    return JS_NewDoubleValue(cx, i, rv);
+}
+
+inline JSBool
+xpc_qsUint32ToJsval(JSContext *cx, PRUint32 u, jsval *rv)
+{
+    if(u <= JSVAL_INT_MAX)
+    {
+        *rv = INT_TO_JSVAL(u);
+        return JS_TRUE;
+    }
+    return JS_NewDoubleValue(cx, u, rv);
+}
+
+#ifdef HAVE_LONG_LONG
+
+#define INT64_TO_DOUBLE(i)      ((jsdouble) (i))
+// Win32 can't handle uint64 to double conversion
+#define UINT64_TO_DOUBLE(u)     ((jsdouble) (int64) (u))
+
+#else
+
+inline jsdouble
+INT64_TO_DOUBLE(const int64 &v)
+{
+    jsdouble d;
+    LL_L2D(d, v);
+    return d;
+}
+
+// if !HAVE_LONG_LONG, then uint64 is a typedef of int64
+#define UINT64_TO_DOUBLE INT64_TO_DOUBLE
+
+#endif
+
+inline JSBool
+xpc_qsInt64ToJsval(JSContext *cx, PRInt64 i, jsval *rv)
+{
+    double d = INT64_TO_DOUBLE(i);
+    return JS_NewNumberValue(cx, d, rv);
+}
+
+inline JSBool
+xpc_qsUint64ToJsval(JSContext *cx, PRUint64 u, jsval *rv)
+{
+    double d = UINT64_TO_DOUBLE(u);
+    return JS_NewNumberValue(cx, d, rv);
+}
+
+
+/* Classes for converting jsvals to string types. */
+
+template <class S, class T>
+class xpc_qsBasicString
+{
+public:
+    typedef S interface_type;
+    typedef T implementation_type;
+
+    ~xpc_qsBasicString()
+    {
+        if (mValid)
+            Ptr()->~implementation_type();
+    }
+
+    JSBool IsValid() { return mValid; }
+
+    implementation_type *Ptr()
+    {
+        return reinterpret_cast<implementation_type *>(mBuf);
+    }
+
+    operator interface_type &()
+    {
+        return *Ptr();
+    }
+
+protected:
+    /*
+     * Neither field is initialized; that is left to the derived class
+     * constructor. However, the destructor destroys the string object
+     * stored in mBuf, if mValid is true.
+     */
+    void *mBuf[JS_HOWMANY(sizeof(implementation_type), sizeof(void *))];
+    JSBool mValid;
+};
+
+/**
+ * Class for converting a jsval to DOMString.
+ *
+ *     xpc_qsDOMString arg0(cx, &argv[0]);
+ *     if (!arg0.IsValid())
+ *         return JS_FALSE;
+ *
+ * The second argument to the constructor is an in-out parameter. It must
+ * point to a rooted jsval, such as a JSNative argument or return value slot.
+ * The value in the jsval on entry is converted to a string. The constructor
+ * may overwrite that jsval with a string value, to protect the characters of
+ * the string from garbage collection. The caller must leave the jsval alone
+ * for the lifetime of the xpc_qsDOMString.
+ */
+class xpc_qsDOMString : public xpc_qsBasicString<nsAString, nsDependentString>
+{
+public:
+    xpc_qsDOMString(JSContext *cx, jsval *pval);
+};
+
+/**
+ * The same as xpc_qsDOMString, but with slightly different conversion behavior,
+ * corresponding to the [astring] magic XPIDL annotation rather than [domstring].
+ */
+class xpc_qsAString : public xpc_qsBasicString<nsAString, nsDependentString>
+{
+public:
+    xpc_qsAString(JSContext *cx, jsval *pval);
+};
+
+/**
+ * Like xpc_qsDOMString and xpc_qsAString, but for XPIDL native types annotated
+ * with [cstring] rather than [domstring] or [astring].
+ */
+class xpc_qsACString : public xpc_qsBasicString<nsACString, nsCString>
+{
+public:
+    xpc_qsACString(JSContext *cx, jsval *pval);
+};
+
+/**
+ * Convert a jsval to char*, returning JS_TRUE on success.
+ *
+ * @param cx
+ *      A context.
+ * @param pval
+ *     In/out. *pval is the jsval to convert; the function may write to *pval,
+ *     using it as a GC root (like xpc_qsDOMString's constructor).
+ * @param pstr
+ *     Out. On success *pstr receives the converted string or NULL if *pval is
+ *     null or undefined. Unicode data is garbled as with JS_GetStringBytes.
+ */
+JSBool
+xpc_qsJsvalToCharStr(JSContext *cx, jsval *pval, char **pstr);
+
+JSBool
+xpc_qsJsvalToWcharStr(JSContext *cx, jsval *pval, PRUnichar **pstr);
+
+
+/** Convert an nsAString to jsval, returning JS_TRUE on success. */
+JSBool
+xpc_qsStringToJsval(JSContext *cx, const nsAString &str, jsval *rval);
+
+JSBool
+xpc_qsUnwrapThisImpl(JSContext *cx,
+                     JSObject *obj,
+                     const nsIID &iid,
+                     void **ppThis,
+                     XPCWrappedNative **ppWrapper);
+
+/**
+ * Search @a obj and its prototype chain for an XPCOM object that implements
+ * the interface T.
+ *
+ * If an object implementing T is found, AddRef it, store the pointer in
+ * @a *ppThis, store a pointer to the wrapper in @a *ppWrapper, and return
+ * JS_TRUE. Otherwise, raise an exception on @a cx and return JS_FALSE.
+ *
+ * This does not consult inner objects. It does support XPConnect tear-offs
+ * and it sees through XOWs, XPCNativeWrappers, and SafeJSObjectWrappers.
+ *
+ * Requires a request on @a cx.
+ */
+template <class T>
+inline JSBool
+xpc_qsUnwrapThis(JSContext *cx,
+                 JSObject *obj,
+                 T **ppThis,
+                 XPCWrappedNative **ppWrapper)
+{
+    return xpc_qsUnwrapThisImpl(cx,
+                                obj,
+                                NS_GET_TEMPLATE_IID(T),
+                                reinterpret_cast<void **>(ppThis),
+                                ppWrapper);
+}
+
+JSBool
+xpc_qsUnwrapThisFromCcxImpl(XPCCallContext &ccx,
+                            const nsIID &iid,
+                            void **ppThis);
+
+/**
+ * Alternate implementation of xpc_qsUnwrapThis using information already
+ * present in the given XPCCallContext.
+ */
+template <class T>
+inline JSBool
+xpc_qsUnwrapThisFromCcx(XPCCallContext &ccx,
+                        T **ppThis)
+{
+    return xpc_qsUnwrapThisFromCcxImpl(ccx, NS_GET_TEMPLATE_IID(T),
+                                       reinterpret_cast<void **>(ppThis));
+}
+
+nsresult
+xpc_qsUnwrapArgImpl(JSContext *cx, jsval v, const nsIID &iid, void **ppArg);
+
+/** Convert a jsval to an XPCOM pointer. */
+template <class T>
+inline nsresult
+xpc_qsUnwrapArg(JSContext *cx, jsval v, T **ppArg)
+{
+    return xpc_qsUnwrapArgImpl(cx, v, NS_GET_TEMPLATE_IID(T),
+                               reinterpret_cast<void **>(ppArg));
+}
+
+/** Convert an XPCOM pointer to jsval. Return JS_TRUE on success. */
+JSBool
+xpc_qsXPCOMObjectToJsval(XPCCallContext &ccx,
+                         nsISupports *p,
+                         const nsIID &iid,
+                         jsval *rval);
+
+/**
+ * Convert a variant to jsval. Return JS_TRUE on success.
+ *
+ * @a paramNum is used in error messages. XPConnect treats the return
+ * value as a parameter in this regard.
+ */
+JSBool
+xpc_qsVariantToJsval(XPCCallContext &ccx,
+                     nsIVariant *p,
+                     uintN paramNum,
+                     jsval *rval);
+
+/**
+ * Use this as the setter for readonly attributes. (The IDL readonly
+ * keyword does not map to JSPROP_READONLY. Semantic mismatch.)
+ *
+ * Always fails, with the same error as setting a property that has
+ * JSPROP_GETTER but not JSPROP_SETTER.
+ */
+JSBool
+xpc_qsReadOnlySetter(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+#ifdef DEBUG
+void
+xpc_qsAssertContextOK(JSContext *cx);
+
+#define XPC_QS_ASSERT_CONTEXT_OK(cx) xpc_qsAssertContextOK(cx)
+#else
+#define XPC_QS_ASSERT_CONTEXT_OK(cx) ((void) 0)
+#endif
+
+#endif /* xpcquickstubs_h___ */
--- a/js/src/xpconnect/src/xpcthrower.cpp
+++ b/js/src/xpconnect/src/xpcthrower.cpp
@@ -58,34 +58,34 @@ XPCThrower::Throw(nsresult rv, JSContext
 
 /*
  * If there has already been an exception thrown, see if we're throwing the
  * same sort of exception, and if we are, don't clobber the old one. ccx
  * should be the current call context.
  */
 // static
 JSBool
-XPCThrower::CheckForPendingException(nsresult result, XPCCallContext &ccx)
+XPCThrower::CheckForPendingException(nsresult result, JSContext *cx)
 {
     nsXPConnect* xpc = nsXPConnect::GetXPConnect();
     if(!xpc)
         return JS_FALSE;
 
     nsCOMPtr<nsIException> e;
     xpc->GetPendingException(getter_AddRefs(e));
     if(!e)
         return JS_FALSE;
     xpc->SetPendingException(nsnull);
 
     nsresult e_result;
     if(NS_FAILED(e->GetResult(&e_result)) || e_result != result)
         return JS_FALSE;
 
-    if(!ThrowExceptionObject(ccx, e))
-        JS_ReportOutOfMemory(ccx);
+    if(!ThrowExceptionObject(cx, e))
+        JS_ReportOutOfMemory(cx);
     return JS_TRUE;
 }
 
 // static
 void
 XPCThrower::Throw(nsresult rv, XPCCallContext& ccx)
 {
     char* sz;
--- a/js/src/xpconnect/src/xpcwrappednativeproto.cpp
+++ b/js/src/xpconnect/src/xpcwrappednativeproto.cpp
@@ -128,16 +128,30 @@ XPCWrappedNativeProto::Init(
 
     mJSProtoObject =
         xpc_NewSystemInheritingJSObject(ccx, jsclazz,
                                         mScope->GetPrototypeJSObject(),
                                         parent);
 
     JSBool ok = mJSProtoObject && JS_SetPrivate(ccx, mJSProtoObject, this);
 
+    if(ok && scriptableCreateInfo)
+    {
+        nsIXPCScriptable *callback = scriptableCreateInfo->GetCallback();
+        if(callback)
+        {
+            nsresult rv = callback->PostCreatePrototype(ccx, mJSProtoObject);
+            if(NS_FAILED(rv))
+            {
+                XPCThrower::Throw(rv, ccx);
+                return JS_FALSE;
+            }
+        }
+    }
+
     DEBUG_ReportShadowedMembers(mSet, nsnull, this);
 
     return ok;
 }
 
 void
 XPCWrappedNativeProto::JSProtoObjectFinalized(JSContext *cx, JSObject *obj)
 {
--- a/js/src/xpconnect/tests/mochitest/test_bug390488.html
+++ b/js/src/xpconnect/tests/mochitest/test_bug390488.html
@@ -27,34 +27,37 @@ https://bugzilla.mozilla.org/show_bug.cg
     var stack = "";
     for (var i = 1; func && i < 8; i++) {
       stack += " " + i + ". " + func.name;
       func = func.caller;
     }
     return stack;
   }
   
-  var correctStack1 = " 1. checkForStacks 2. onclick 3. dispatchEvent 4. simulateClick";
-  
   function getStack2() {
     var stack = new Error().stack;
     // Remove the two lines due to calling this
     return stack.substring(stack.indexOf("\n", stack.indexOf("\n")+1)+1);
   }
   
   function simulateClick() {
     var evt = document.createEvent("MouseEvents");
     evt.initMouseEvent("click", true, true, window,
       0, 0, 0, 0, 0, false, false, false, false, 0, null);
     $("testdiv").dispatchEvent(evt);
   }
-  
+
+  function matches(s, p, name) {
+    ok(s.match(p) != null, name,
+       "got " + uneval(s) + ", expected a string matching " + uneval(p));
+  }
+
   function checkForStacks() {
-    is(getStack1(), correctStack1,
-       "Stack from walking caller chain should be correct");
+    matches(getStack1(), /checkForStacks .* onclick .* simulateClick/,
+            "Stack from walking caller chain should be correct");
     isnot(getStack2().indexOf("simulateClick()@"),  -1,
           "Stack from |new Error().stack| should include simulateClick");
   }
 
   simulateClick();
 </script>
 </pre>
 </body>
new file mode 100644
--- /dev/null
+++ b/other-licenses/ply/COPYING
@@ -0,0 +1,504 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
new file mode 100644
--- /dev/null
+++ b/other-licenses/ply/README
@@ -0,0 +1,9 @@
+David Beazley's PLY (Python Lex-Yacc)
+http://www.dabeaz.com/ply/
+
+Licensed under the GPL (v2.1 or later).
+
+This directory contains just the code and license from PLY version 2.5;
+the full distribution (see the URL) also contains examples, tests,
+documentation, and a longer README.
+
new file mode 100644
--- /dev/null
+++ b/other-licenses/ply/ply/__init__.py
@@ -0,0 +1,4 @@
+# PLY package
+# Author: David Beazley (dave@dabeaz.com)
+
+__all__ = ['lex','yacc']
new file mode 100644
--- /dev/null
+++ b/other-licenses/ply/ply/lex.py
@@ -0,0 +1,896 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Author: David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2008, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+# -----------------------------------------------------------------------------
+
+__version__    = "2.5"
+__tabversion__ = "2.4"       # Version of table file used
+
+import re, sys, types, copy, os
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# _INSTANCETYPE sets the valid set of instance types recognized
+# by PLY when lexers are defined by a class. In order to maintain
+# backwards compatibility with Python-2.0, we have to check for
+# the existence of ObjectType.
+
+try:
+    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+    _INSTANCETYPE = types.InstanceType
+    class object: pass       # Note: needed if no new-style classes present
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+
+class LexError(Exception):
+    def __init__(self,message,s):
+         self.args = (message,)
+         self.text = s
+
+# An object used to issue one-time warning messages for various features
+
+class LexWarning(object):
+   def __init__(self):
+      self.warned = 0
+   def __call__(self,msg):
+      if not self.warned:
+         sys.stderr.write("ply.lex: Warning: " + msg+"\n")
+         self.warned = 1
+
+_SkipWarning = LexWarning()         # Warning for use of t.skip() on tokens
+
+# Token class.  This class is used to represent the tokens produced.
+class LexToken(object):
+    def __str__(self):
+        return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+    def __repr__(self):
+        return str(self)
+    def skip(self,n):
+        self.lexer.skip(n)
+        _SkipWarning("Calling t.skip() on a token is deprecated.  Please use t.lexer.skip()")
+
+# -----------------------------------------------------------------------------
+# Lexer class
+#
+# This class encapsulates all of the methods and data associated with a lexer.
+#
+#    input()          -  Store a new string in the lexer
+#    token()          -  Get the next token
+# -----------------------------------------------------------------------------
+
+class Lexer:
+    def __init__(self):
+        self.lexre = None             # Master regular expression. This is a list of
+                                      # tuples (re,findex) where re is a compiled
+                                      # regular expression and findex is a list
+                                      # mapping regex group numbers to rules
+        self.lexretext = None         # Current regular expression strings
+        self.lexstatere = {}          # Dictionary mapping lexer states to master regexs
+        self.lexstateretext = {}      # Dictionary mapping lexer states to regex strings
+        self.lexstaterenames = {}     # Dictionary mapping lexer states to symbol names
+        self.lexstate = "INITIAL"     # Current lexer state
+        self.lexstatestack = []       # Stack of lexer states
+        self.lexstateinfo = None      # State information
+        self.lexstateignore = {}      # Dictionary of ignored characters for each state
+        self.lexstateerrorf = {}      # Dictionary of error functions for each state
+        self.lexreflags = 0           # Optional re compile flags
+        self.lexdata = None           # Actual input data (as a string)
+        self.lexpos = 0               # Current position in input text
+        self.lexlen = 0               # Length of the input text
+        self.lexerrorf = None         # Error rule (if any)
+        self.lextokens = None         # List of valid tokens
+        self.lexignore = ""           # Ignored characters
+        self.lexliterals = ""         # Literal characters that can be passed through
+        self.lexmodule = None         # Module
+        self.lineno = 1               # Current line number
+        self.lexdebug = 0             # Debugging mode
+        self.lexoptimize = 0          # Optimized mode
+
+    def clone(self,object=None):
+        c = copy.copy(self)
+
+        # If the object parameter has been supplied, it means we are attaching the
+        # lexer to a new object.  In this case, we have to rebind all methods in
+        # the lexstatere and lexstateerrorf tables.
+
+        if object:
+            newtab = { }
+            for key, ritem in self.lexstatere.items():
+                newre = []
+                for cre, findex in ritem:
+                     newfindex = []
+                     for f in findex:
+                         if not f or not f[0]:
+                             newfindex.append(f)
+                             continue
+                         newfindex.append((getattr(object,f[0].__name__),f[1]))
+                newre.append((cre,newfindex))
+                newtab[key] = newre
+            c.lexstatere = newtab
+            c.lexstateerrorf = { }
+            for key, ef in self.lexstateerrorf.items():
+                c.lexstateerrorf[key] = getattr(object,ef.__name__)
+            c.lexmodule = object
+        return c
+
+    # ------------------------------------------------------------
+    # writetab() - Write lexer information to a table file
+    # ------------------------------------------------------------
+    def writetab(self,tabfile,outputdir=""):
+        if isinstance(tabfile,types.ModuleType):
+            return
+        basetabfilename = tabfile.split(".")[-1]
+        filename = os.path.join(outputdir,basetabfilename)+".py"
+        tf = open(filename,"w")
+        tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+        tf.write("_lextokens    = %s\n" % repr(self.lextokens))
+        tf.write("_lexreflags   = %s\n" % repr(self.lexreflags))
+        tf.write("_lexliterals  = %s\n" % repr(self.lexliterals))
+        tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+        tabre = { }
+        # Collect all functions in the initial state
+        initial = self.lexstatere["INITIAL"]
+        initialfuncs = []
+        for part in initial:
+            for f in part[1]:
+                if f and f[0]:
+                    initialfuncs.append(f)
+
+        for key, lre in self.lexstatere.items():
+             titem = []
+             for i in range(len(lre)):
+                  titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
+             tabre[key] = titem
+
+        tf.write("_lexstatere   = %s\n" % repr(tabre))
+        tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+        taberr = { }
+        for key, ef in self.lexstateerrorf.items():
+             if ef:
+                  taberr[key] = ef.__name__
+             else:
+                  taberr[key] = None
+        tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+        tf.close()
+
+    # ------------------------------------------------------------
+    # readtab() - Read lexer information from a tab file
+    # ------------------------------------------------------------
+    def readtab(self,tabfile,fdict):
+        if isinstance(tabfile,types.ModuleType):
+            lextab = tabfile
+        else:
+            exec "import %s as lextab" % tabfile
+        self.lextokens      = lextab._lextokens
+        self.lexreflags     = lextab._lexreflags
+        self.lexliterals    = lextab._lexliterals
+        self.lexstateinfo   = lextab._lexstateinfo
+        self.lexstateignore = lextab._lexstateignore
+        self.lexstatere     = { }
+        self.lexstateretext = { }
+        for key,lre in lextab._lexstatere.items():
+             titem = []
+             txtitem = []
+             for i in range(len(lre)):
+                  titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+                  txtitem.append(lre[i][0])
+             self.lexstatere[key] = titem
+             self.lexstateretext[key] = txtitem
+        self.lexstateerrorf = { }
+        for key,ef in lextab._lexstateerrorf.items():
+             self.lexstateerrorf[key] = fdict[ef]
+        self.begin('INITIAL')
+
+    # ------------------------------------------------------------
+    # input() - Push a new string into the lexer
+    # ------------------------------------------------------------
+    def input(self,s):
+        # Pull off the first character to see if s looks like a string
+        c = s[:1]
+        if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)):
+            raise ValueError, "Expected a string"
+        self.lexdata = s
+        self.lexpos = 0
+        self.lexlen = len(s)
+
+    # ------------------------------------------------------------
+    # begin() - Changes the lexing state
+    # ------------------------------------------------------------
+    def begin(self,state):
+        if not self.lexstatere.has_key(state):
+            raise ValueError, "Undefined state"
+        self.lexre = self.lexstatere[state]
+        self.lexretext = self.lexstateretext[state]
+        self.lexignore = self.lexstateignore.get(state,"")
+        self.lexerrorf = self.lexstateerrorf.get(state,None)
+        self.lexstate = state
+
+    # ------------------------------------------------------------
+    # push_state() - Changes the lexing state and saves old on stack
+    # ------------------------------------------------------------
+    def push_state(self,state):
+        self.lexstatestack.append(self.lexstate)
+        self.begin(state)
+
+    # ------------------------------------------------------------
+    # pop_state() - Restores the previous state
+    # ------------------------------------------------------------
+    def pop_state(self):
+        self.begin(self.lexstatestack.pop())
+
+    # ------------------------------------------------------------
+    # current_state() - Returns the current lexing state
+    # ------------------------------------------------------------
+    def current_state(self):
+        return self.lexstate
+
+    # ------------------------------------------------------------
+    # skip() - Skip ahead n characters
+    # ------------------------------------------------------------
+    def skip(self,n):
+        self.lexpos += n
+
+    # ------------------------------------------------------------
+    # token() - Return the next token from the Lexer
+    #
+    # Note: This function has been carefully implemented to be as fast
+    # as possible.  Don't make changes unless you really know what
+    # you are doing
+    # ------------------------------------------------------------
+    def token(self):
+        # Make local copies of frequently referenced attributes
+        lexpos    = self.lexpos
+        lexlen    = self.lexlen
+        lexignore = self.lexignore
+        lexdata   = self.lexdata
+
+        while lexpos < lexlen:
+            # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+            if lexdata[lexpos] in lexignore:
+                lexpos += 1
+                continue
+
+            # Look for a regular expression match
+            for lexre,lexindexfunc in self.lexre:
+                m = lexre.match(lexdata,lexpos)
+                if not m: continue
+
+                # Create a token for return
+                tok = LexToken()
+                tok.value = m.group()
+                tok.lineno = self.lineno
+                tok.lexpos = lexpos
+
+                i = m.lastindex
+                func,tok.type = lexindexfunc[i]
+
+                if not func:
+                   # If no token type was set, it's an ignored token
+                   if tok.type:
+                      self.lexpos = m.end()
+                      return tok
+                   else:
+                      lexpos = m.end()
+                      break
+
+                lexpos = m.end()
+
+                # if func not callable, it means it's an ignored token
+                if not callable(func):
+                   break
+
+                # If token is processed by a function, call it
+
+                tok.lexer = self      # Set additional attributes useful in token rules
+                self.lexmatch = m
+                self.lexpos = lexpos
+
+                newtok = func(tok)
+
+                # Every function must return a token, if nothing, we just move to next token
+                if not newtok:
+                    lexpos    = self.lexpos         # This is here in case user has updated lexpos.
+                    lexignore = self.lexignore      # This is here in case there was a state change
+                    break
+
+                # Verify type of the token.  If not in the token map, raise an error
+                if not self.lexoptimize:
+                    if not self.lextokens.has_key(newtok.type):
+                        raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func.func_code.co_filename, func.func_code.co_firstlineno,
+                            func.__name__, newtok.type),lexdata[lexpos:])
+
+                return newtok
+            else:
+                # No match, see if in literals
+                if lexdata[lexpos] in self.lexliterals:
+                    tok = LexToken()
+                    tok.value = lexdata[lexpos]
+                    tok.lineno = self.lineno
+                    tok.type = tok.value
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos + 1
+                    return tok
+
+                # No match. Call t_error() if defined.
+                if self.lexerrorf:
+                    tok = LexToken()
+                    tok.value = self.lexdata[lexpos:]
+                    tok.lineno = self.lineno
+                    tok.type = "error"
+                    tok.lexer = self
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos
+                    newtok = self.lexerrorf(tok)
+                    if lexpos == self.lexpos:
+                        # Error method didn't change text position at all. This is an error.
+                        raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                    lexpos = self.lexpos
+                    if not newtok: continue
+                    return newtok
+
+                self.lexpos = lexpos
+                raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+        self.lexpos = lexpos + 1
+        if self.lexdata is None:
+             raise RuntimeError, "No input string given with input()"
+        return None
+
+# -----------------------------------------------------------------------------
+# _validate_file()
+#
+# This checks to see if there are duplicated t_rulename() functions or strings
+# in the parser input file.  This is done using a simple regular expression
+# match on each line in the given file.  If the file can't be located or opened,
+# a true result is returned by default.
+# -----------------------------------------------------------------------------
+
+def _validate_file(filename):
+    import os.path
+    base,ext = os.path.splitext(filename)
+    if ext != '.py': return 1        # No idea what the file is. Return OK
+
+    try:
+        f = open(filename)
+        lines = f.readlines()
+        f.close()
+    except IOError:
+        return 1                     # Couldn't find the file.  Don't worry about it
+
+    fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+    sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+    counthash = { }
+    linen = 1
+    noerror = 1
+    for l in lines:
+        m = fre.match(l)
+        if not m:
+            m = sre.match(l)
+        if m:
+            name = m.group(1)
+            prev = counthash.get(name)
+            if not prev:
+                counthash[name] = linen
+            else:
+                print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+                noerror = 0
+        linen += 1
+    return noerror
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist,namelist):
+    result = []
+    for f,name in zip(funclist,namelist):
+         if f and f[0]:
+             result.append((name, f[1]))
+         else:
+             result.append(f)
+    return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+     result = []
+     for n in namelist:
+          if n and n[0]:
+              result.append((fdict[n[0]],n[1]))
+          else:
+              result.append(n)
+     return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression.  Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict,toknames):
+    if not relist: return []
+    regex = "|".join(relist)
+    try:
+        lexre = re.compile(regex,re.VERBOSE | reflags)
+
+        # Build the index to function map for the matching engine
+        lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+        lexindexnames = lexindexfunc[:]
+
+        for f,i in lexre.groupindex.items():
+            handle = ldict.get(f,None)
+            if type(handle) in (types.FunctionType, types.MethodType):
+                lexindexfunc[i] = (handle,toknames[f])
+                lexindexnames[i] = f
+            elif handle is not None:
+                lexindexnames[i] = f
+                if f.find("ignore_") > 0:
+                    lexindexfunc[i] = (None,None)
+                else:
+                    lexindexfunc[i] = (None, toknames[f])
+        
+        return [(lexre,lexindexfunc)],[regex],[lexindexnames]
+    except Exception,e:
+        m = int(len(relist)/2)
+        if m == 0: m = 1
+        llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
+        rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
+        return llist+rlist, lre+rre, lnames+rnames
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token.  For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+    nonstate = 1
+    parts = s.split("_")
+    for i in range(1,len(parts)):
+         if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+    if i > 1:
+       states = tuple(parts[1:i])
+    else:
+       states = ('INITIAL',)
+
+    if 'ANY' in states:
+       states = tuple(names.keys())
+
+    tokenname = "_".join(parts[i:])
+    return (states,tokenname)
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir=""):
+    global lexer
+    ldict = None
+    stateinfo  = { 'INITIAL' : 'inclusive'}
+    error = 0
+    files = { }
+    lexobj = Lexer()
+    lexobj.lexdebug = debug
+    lexobj.lexoptimize = optimize
+    global token,input
+
+    if nowarn: warn = 0
+    else: warn = 1
+
+    if object: module = object
+
+    if module:
+        # User supplied a module object.
+        if isinstance(module, types.ModuleType):
+            ldict = module.__dict__
+        elif isinstance(module, _INSTANCETYPE):
+            _items = [(k,getattr(module,k)) for k in dir(module)]
+            ldict = { }
+            for (i,v) in _items:
+                ldict[i] = v
+        else:
+            raise ValueError,"Expected a module or instance"
+        lexobj.lexmodule = module
+
+    else:
+        # No module given.  We might be able to get information from the caller.
+        try:
+            raise RuntimeError
+        except RuntimeError:
+            e,b,t = sys.exc_info()
+            f = t.tb_frame
+            f = f.f_back                    # Walk out to our calling function
+            if f.f_globals is f.f_locals:   # Collect global and local variations from caller
+               ldict = f.f_globals
+            else:
+               ldict = f.f_globals.copy()
+               ldict.update(f.f_locals)
+
+    if optimize and lextab:
+        try:
+            lexobj.readtab(lextab,ldict)
+            token = lexobj.token
+            input = lexobj.input
+            lexer = lexobj
+            return lexobj
+
+        except ImportError:
+            pass
+
+    # Get the tokens, states, and literals variables (if any)
+
+    tokens = ldict.get("tokens",None)
+    states = ldict.get("states",None)
+    literals = ldict.get("literals","")
+
+    if not tokens:
+        raise SyntaxError,"lex: module does not define 'tokens'"
+
+    if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+        raise SyntaxError,"lex: tokens must be a list or tuple."
+
+    # Build a dictionary of valid token names
+    lexobj.lextokens = { }
+    if not optimize:
+        for n in tokens:
+            if not _is_identifier.match(n):
+                print >>sys.stderr, "lex: Bad token name '%s'" % n
+                error = 1
+            if warn and lexobj.lextokens.has_key(n):
+                print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
+            lexobj.lextokens[n] = None
+    else:
+        for n in tokens: lexobj.lextokens[n] = None
+
+    if debug:
+        print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+
+    try:
+         for c in literals:
+               if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
+                    print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
+                    error = 1
+                    continue
+
+    except TypeError:
+         print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
+         error = 1
+
+    lexobj.lexliterals = literals
+
+    # Build statemap
+    if states:
+         if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
+              print >>sys.stderr, "lex: states must be defined as a tuple or list."
+              error = 1
+         else:
+              for s in states:
+                    if not isinstance(s,types.TupleType) or len(s) != 2:
+                           print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+                           error = 1
+                           continue
+                    name, statetype = s
+                    if not isinstance(name,types.StringType):
+                           print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
+                           error = 1
+                           continue
+                    if not (statetype == 'inclusive' or statetype == 'exclusive'):
+                           print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+                           error = 1
+                           continue
+                    if stateinfo.has_key(name):
+                           print >>sys.stderr, "lex: state '%s' already defined." % name
+                           error = 1
+                           continue
+                    stateinfo[name] = statetype
+
+    # Get a list of symbols with the t_ or s_ prefix
+    tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
+
+    # Now build up a list of functions and a list of strings
+
+    funcsym =  { }        # Symbols defined as functions
+    strsym =   { }        # Symbols defined as strings
+    toknames = { }        # Mapping of symbols to token names
+
+    for s in stateinfo.keys():
+         funcsym[s] = []
+         strsym[s] = []
+
+    ignore   = { }        # Ignore strings by state
+    errorf   = { }        # Error functions by state
+
+    if len(tsymbols) == 0:
+        raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+
+    for f in tsymbols:
+        t = ldict[f]
+        states, tokname = _statetoken(f,stateinfo)
+        toknames[f] = tokname
+
+        if callable(t):
+            for s in states: funcsym[s].append((f,t))
+        elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+            for s in states: strsym[s].append((f,t))
+        else:
+            print >>sys.stderr, "lex: %s not defined as a function or string" % f
+            error = 1
+
+    # Sort the functions by line number
+    for f in funcsym.values():
+        f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+
+    # Sort the strings by regular expression length
+    for s in strsym.values():
+        s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+
+    regexs = { }
+
+    # Build the master regular expressions
+    for state in stateinfo.keys():
+        regex_list = []
+
+        # Add rules defined by functions first
+        for fname, f in funcsym[state]:
+            line = f.func_code.co_firstlineno
+            file = f.func_code.co_filename
+            files[file] = None
+            tokname = toknames[fname]
+
+            ismethod = isinstance(f, types.MethodType)
+
+            if not optimize:
+                nargs = f.func_code.co_argcount
+                if ismethod:
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                if nargs > reqargs:
+                    print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+                    error = 1
+                    continue
+
+                if nargs < reqargs:
+                    print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+                    error = 1
+                    continue
+
+                if tokname == 'ignore':
+                    print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+                    error = 1
+                    continue
+
+            if tokname == 'error':
+                errorf[state] = f
+                continue
+
+            if f.__doc__:
+                if not optimize:
+                    try:
+                        c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | reflags)
+                        if c.match(""):
+                             print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+                             error = 1
+                             continue
+                    except re.error,e:
+                        print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+                        if '#' in f.__doc__:
+                             print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
+                        error = 1
+                        continue
+
+                    if debug:
+                        print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+
+                # Okay. The regular expression seemed okay.  Let's append it to the master regular
+                # expression we're building
+
+                regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
+            else:
+                print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+
+        # Now add all of the simple rules
+        for name,r in strsym[state]:
+            tokname = toknames[name]
+
+            if tokname == 'ignore':
+                 if "\\" in r:
+                      print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
+                 ignore[state] = r
+                 continue
+
+            if not optimize:
+                if tokname == 'error':
+                    raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+                    error = 1
+                    continue
+
+                if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
+                    print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+                    error = 1
+                    continue
+                try:
+                    c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
+                    if (c.match("")):
+                         print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
+                         error = 1
+                         continue
+                except re.error,e:
+                    print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+                    if '#' in r:
+                         print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+
+                    error = 1
+                    continue
+                if debug:
+                    print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+
+            regex_list.append("(?P<%s>%s)" % (name,r))
+
+        if not regex_list:
+             print >>sys.stderr, "lex: No rules defined for state '%s'" % state
+             error = 1
+
+        regexs[state] = regex_list
+
+
+    if not optimize:
+        for f in files.keys():
+           if not _validate_file(f):
+                error = 1
+
+    if error:
+        raise SyntaxError,"lex: Unable to build lexer."
+
+    # From this point forward, we're reasonably confident that we can build the lexer.
+    # No more errors will be generated, but there might be some warning messages.
+
+    # Build the master regular expressions
+
+    for state in regexs.keys():
+        lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,toknames)
+        lexobj.lexstatere[state] = lexre
+        lexobj.lexstateretext[state] = re_text
+        lexobj.lexstaterenames[state] = re_names
+        if debug:
+            for i in range(len(re_text)):
+                 print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+
+    # For inclusive states, we need to add the INITIAL state
+    for state,type in stateinfo.items():
+        if state != "INITIAL" and type == 'inclusive':
+             lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+             lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+             lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+    lexobj.lexstateinfo = stateinfo
+    lexobj.lexre = lexobj.lexstatere["INITIAL"]
+    lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+
+    # Set up ignore variables
+    lexobj.lexstateignore = ignore
+    lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+    # Set up error functions
+    lexobj.lexstateerrorf = errorf
+    lexobj.lexerrorf = errorf.get("INITIAL",None)
+    if warn and not lexobj.lexerrorf:
+        print >>sys.stderr, "lex: Warning. no t_error rule is defined."
+
+    # Check state information for ignore and error rules
+    for s,stype in stateinfo.items():
+        if stype == 'exclusive':
+              if warn and not errorf.has_key(s):
+                   print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
+              if warn and not ignore.has_key(s) and lexobj.lexignore:
+                   print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+        elif stype == 'inclusive':
+              if not errorf.has_key(s):
+                   errorf[s] = errorf.get("INITIAL",None)
+              if not ignore.has_key(s):
+                   ignore[s] = ignore.get("INITIAL","")
+
+
+    # Create global versions of the token() and input() functions
+    token = lexobj.token
+    input = lexobj.input
+    lexer = lexobj
+
+    # If in optimize mode, we write the lextab
+    if lextab and optimize:
+        lexobj.writetab(lextab,outputdir)
+
+    return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+    if not data:
+        try:
+            filename = sys.argv[1]
+            f = open(filename)
+            data = f.read()
+            f.close()
+        except IndexError:
+            print "Reading from standard input (type EOF to end):"
+            data = sys.stdin.read()
+
+    if lexer:
+        _input = lexer.input
+    else:
+        _input = input
+    _input(data)
+    if lexer:
+        _token = lexer.token
+    else:
+        _token = token
+
+    while 1:
+        tok = _token()
+        if not tok: break
+        print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+    def set_doc(f):
+        if callable(r):
+            f.__doc__ = r.__doc__
+        else:
+            f.__doc__ = r
+        return f
+    return set_doc
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
new file mode 100644
--- /dev/null
+++ b/other-licenses/ply/ply/yacc.py
@@ -0,0 +1,2895 @@
+#-----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Author(s): David M. Beazley (dave@dabeaz.com)
+#
+# Copyright (C) 2001-2008, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+# See the file COPYING for a complete copy of the LGPL.
+#
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings.  The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system.  PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist).  However, most of the variables used during table
+# construction are defined in terms of global variables.  Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing.  LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book).  LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style."   Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+__version__    = "2.5"
+__tabversion__ = "2.4"       # Table version
+
+#-----------------------------------------------------------------------------
+#                     === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug   = 1                # Debugging mode.  If set, yacc generates a
+                               # a 'parser.out' file in the current directory
+
+debug_file  = 'parser.out'     # Default name of the debugging file
+tab_module  = 'parsetab'       # Default name of the table module
+default_lr  = 'LALR'           # Default LR table generation method
+
+error_count = 3                # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel   = 0                # Set to True if developing yacc.  This turns off optimized
+                               # implementations of certain functions.
+
+import re, types, sys, cStringIO, md5, os.path
+
+# Exception raised for yacc-related errors
+class YaccError(Exception):   pass
+
+# Exception raised for errors raised in production rules
+class SyntaxError(Exception): pass
+
+
+# Available instance types.  This is used when parsers are defined by a class.
+# it's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+    _INSTANCETYPE = types.InstanceType
+    class object: pass     # Note: needed if no new-style classes present
+
+#-----------------------------------------------------------------------------
+#                        ===  LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself.  These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+#        .type       = Grammar symbol type
+#        .value      = Symbol value
+#        .lineno     = Starting line number
+#        .endlineno  = Ending line number (optional, set automatically)
+#        .lexpos     = Starting lex position
+#        .endlexpos  = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+    def __str__(self):    return self.type
+    def __repr__(self):   return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule.   Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined).   The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol.  The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+    def __init__(self,s,stack=None):
+        self.slice = s
+        self.stack = stack
+        self.lexer = None
+        self.parser= None
+    def __getitem__(self,n):
+        if n >= 0: return self.slice[n].value
+        else: return self.stack[n].value
+
+    def __setitem__(self,n,v):
+        self.slice[n].value = v
+
+    def __getslice__(self,i,j):
+        return [s.value for s in self.slice[i:j]]
+
+    def __len__(self):
+        return len(self.slice)
+
+    def lineno(self,n):
+        return getattr(self.slice[n],"lineno",0)
+
+    def linespan(self,n):
+        startline = getattr(self.slice[n],"lineno",0)
+        endline = getattr(self.slice[n],"endlineno",startline)
+        return startline,endline
+
+    def lexpos(self,n):
+        return getattr(self.slice[n],"lexpos",0)
+
+    def lexspan(self,n):
+        startpos = getattr(self.slice[n],"lexpos",0)
+        endpos = getattr(self.slice[n],"endlexpos",startpos)
+        return startpos,endpos
+
+    def error(self):
+       raise SyntaxError
+
+
+# The LR Parsing engine.   This is defined as a class so that multiple parsers
+# can exist in the same process.  A user never instantiates this directly.
+# Instead, the global yacc() function should be used to create a suitable Parser
+# object.
+
+class Parser:
+    def __init__(self,magic=None):
+
+        # This is a hack to keep users from trying to instantiate a Parser
+        # object directly.
+
+        if magic != "xyzzy":
+            raise YaccError, "Can't directly instantiate Parser. Use yacc() instead."
+
+        # Reset internal state
+        self.productions = None          # List of productions
+        self.errorfunc   = None          # Error handling function
+        self.action      = { }           # LR Action table
+        self.goto        = { }           # LR goto table
+        self.require     = { }           # Attribute require table
+        self.method      = "Unknown LR"  # Table construction method used
+
+    def errok(self):
+        self.errorok     = 1
+
+    def restart(self):
+        del self.statestack[:]
+        del self.symstack[:]
+        sym = YaccSymbol()
+        sym.type = '$end'
+        self.symstack.append(sym)
+        self.statestack.append(0)
+
+    def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        if debug or yaccdevel:
+            return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
+        elif tracking:
+            return self.parseopt(input,lexer,debug,tracking,tokenfunc)
+        else:
+            return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
+        
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parsedebug().
+    #
+    # This is the debugging enabled version of parse().  All changes made to the
+    # parsing engine should be made here.   For the non-debugging version,
+    # copy this code to a method parseopt() and delete all of the sections
+    # enclosed in:
+    #
+    #      #--! DEBUG
+    #      statements
+    #      #--! DEBUG
+    #
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+    def parsedebug(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+        endsym  = "$end"                 # End symbol
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            import lex
+            lexer = lex.lexer
+        
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = endsym
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            # --! DEBUG
+            if debug > 1:
+                print 'state', state
+            # --! DEBUG
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = endsym
+
+            # --! DEBUG
+            if debug:
+                errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
+            # --! DEBUG
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            # --! DEBUG
+            if debug > 1:
+                print 'action', t
+            # --! DEBUG
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    if ltype is endsym:
+                        # Error, end of input
+                        sys.stderr.write("yacc: Parse error. EOF\n")
+                        return
+                    statestack.append(t)
+                    state = t
+                    
+                    # --! DEBUG
+                    if debug > 1:
+                        sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
+                    # --! DEBUG
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    # --! DEBUG
+                    if debug > 1:
+                        sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
+                    # --! DEBUG
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # --! TRACKING
+                        if tracking:
+                           t1 = targ[1]
+                           sym.lineno = t1.lineno
+                           sym.lexpos = t1.lexpos
+                           t1 = targ[-1]
+                           sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+                           sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+                        # --! TRACKING
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        # --! TRACKING
+                        if tracking:
+                           sym.lineno = lexer.lineno
+                           sym.lexpos = lexer.lexpos
+                        # --! TRACKING
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    return getattr(n,"value",None)
+
+            if t == None:
+
+                # --! DEBUG
+                if debug:
+                    sys.stderr.write(errorlead + "\n")
+                # --! DEBUG
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type is endsym:
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type is not endsym:
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type is endsym:
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError, "yacc: internal parser error!!!\n"
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parseopt().
+    #
+    # Optimized version of parse() method.  DO NOT EDIT THIS CODE DIRECTLY.
+    # Edit the debug version above, then copy any modifications to the method
+    # below while removing #--! DEBUG sections.
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+    def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            import lex
+            lexer = lex.lexer
+        
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = '$end'
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = '$end'
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    if ltype == '$end':
+                        # Error, end of input
+                        sys.stderr.write("yacc: Parse error. EOF\n")
+                        return
+                    statestack.append(t)
+                    state = t
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # --! TRACKING
+                        if tracking:
+                           t1 = targ[1]
+                           sym.lineno = t1.lineno
+                           sym.lexpos = t1.lexpos
+                           t1 = targ[-1]
+                           sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+                           sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+                        # --! TRACKING
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        # --! TRACKING
+                        if tracking:
+                           sym.lineno = lexer.lineno
+                           sym.lexpos = lexer.lexpos
+                        # --! TRACKING
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    return getattr(n,"value",None)
+
+            if t == None:
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type == '$end':
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type != '$end':
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type == '$end':
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError, "yacc: internal parser error!!!\n"
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parseopt_notrack().
+    #
+    # Optimized version of parseopt() with line number tracking removed. 
+    # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
+    # code in the #--! TRACKING sections
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+    def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            import lex
+            lexer = lex.lexer
+        
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = '$end'
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = '$end'
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    if ltype == '$end':
+                        # Error, end of input
+                        sys.stderr.write("yacc: Parse error. EOF\n")
+                        return
+                    statestack.append(t)
+                    state = t
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.func(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    return getattr(n,"value",None)
+
+            if t == None:
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type == '$end':
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type != '$end':
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type == '$end':
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError, "yacc: internal parser error!!!\n"
+
+
+# -----------------------------------------------------------------------------
+#                          === Parser Construction ===
+#
+# The following functions and variables are used to implement the yacc() function
+# itself.   This is pretty hairy stuff involving lots of error checking,
+# construction of LR items, kernels, and so forth.   Although a lot of
+# this work is done using global variables, the resulting Parser object
+# is completely self contained--meaning that it is safe to repeatedly
+# call yacc() with different grammars in the same application.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# validate_file()
+#
+# This function checks to see if there are duplicated p_rulename() functions
+# in the parser module file.  Without this function, it is really easy for
+# users to make mistakes by cutting and pasting code fragments (and it's a real
+# bugger to try and figure out why the resulting parser doesn't work).  Therefore,
+# we just do a little regular expression pattern matching of def statements
+# to try and detect duplicates.
+# -----------------------------------------------------------------------------
+
+def validate_file(filename):
+    base,ext = os.path.splitext(filename)
+    if ext != '.py': return 1          # No idea. Assume it's okay.
+
+    try:
+        f = open(filename)
+        lines = f.readlines()
+        f.close()
+    except IOError:
+        return 1                       # Oh well
+
+    # Match def p_funcname(
+    fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+    counthash = { }
+    linen = 1
+    noerror = 1
+    for l in lines:
+        m = fre.match(l)
+        if m:
+            name = m.group(1)
+            prev = counthash.get(name)
+            if not prev:
+                counthash[name] = linen
+            else:
+                sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
+                noerror = 0
+        linen += 1
+    return noerror
+
+# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
+def validate_dict(d):
+    for n,v in d.items():
+        if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
+        if n[0:2] == 't_': continue
+
+        if n[0:2] == 'p_':
+            sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
+        if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
+            try:
+                doc = v.__doc__.split(" ")
+                if doc[1] == ':':
+                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
+            except StandardError:
+                pass
+
+# -----------------------------------------------------------------------------
+#                           === GRAMMAR FUNCTIONS ===
+#
+# The following global variables and functions are used to store, manipulate,
+# and verify the grammar rules specified by the user.
+# -----------------------------------------------------------------------------
+
+# Initialize all of the global variables used during grammar construction
+def initialize_vars():
+    global Productions, Prodnames, Prodmap, Terminals
+    global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
+    global Errorfunc, Signature, Requires
+
+    Productions  = [None]  # A list of all of the productions.  The first
+                           # entry is always reserved for the purpose of
+                           # building an augmented grammar
+
+    Prodnames    = { }     # A dictionary mapping the names of nonterminals to a list of all
+                           # productions of that nonterminal.
+
+    Prodmap      = { }     # A dictionary that is only used to detect duplicate
+                           # productions.
+
+    Terminals    = { }     # A dictionary mapping the names of terminal symbols to a
+                           # list of the rules where they are used.
+
+    Nonterminals = { }     # A dictionary mapping names of nonterminals to a list
+                           # of rule numbers where they are used.
+
+    First        = { }     # A dictionary of precomputed FIRST(x) symbols
+
+    Follow       = { }     # A dictionary of precomputed FOLLOW(x) symbols
+
+    Precedence   = { }     # Precedence rules for each terminal. Contains tuples of the
+                           # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+    UsedPrecedence = { }   # Precedence rules that were actually used by the grammer.
+                           # This is only used to provide error checking and to generate
+                           # a warning about unused precedence rules.
+
+    LRitems      = [ ]     # A list of all LR items for the grammar.  These are the
+                           # productions with the "dot" like E -> E . PLUS E
+
+    Errorfunc    = None    # User defined error handler
+
+    Signature    = md5.new()   # Digital signature of the grammar rules, precedence
+                               # and other information.  Used to determined when a
+                               # parsing table needs to be regenerated.
+    
+    Signature.update(__tabversion__)
+
+    Requires     = { }     # Requires list
+
+    # File objects used when creating the parser.out debugging file
+    global _vf, _vfc
+    _vf           = cStringIO.StringIO()
+    _vfc          = cStringIO.StringIO()
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# It has a few required attributes:
+#
+#       name     - Name of the production (nonterminal)
+#       prod     - A list of symbols making up its production
+#       number   - Production number.
+#
+# In addition, a few additional attributes are used to help with debugging or
+# optimization of table generation.
+#
+#       file     - File where production action is defined.
+#       lineno   - Line number where action is defined
+#       func     - Action function
+#       prec     - Precedence level
+#       lr_next  - Next LR item. Example, if we are ' E -> E . PLUS E'
+#                  then lr_next refers to 'E -> E PLUS . E'
+#       lr_index - LR item index (location of the ".") in the prod list.
+#       lookaheads - LALR lookahead symbols for this item
+#       len      - Length of the production (number of symbols on right hand side)
+# -----------------------------------------------------------------------------
+
+class Production:
+    def __init__(self,**kw):
+        for k,v in kw.items():
+            setattr(self,k,v)
+        self.lr_index = -1
+        self.lr0_added = 0    # Flag indicating whether or not added to LR0 closure
+        self.lr1_added = 0    # Flag indicating whether or not added to LR1
+        self.usyms = [ ]
+        self.lookaheads = { }
+        self.lk_added = { }
+        self.setnumbers = [ ]
+
+    def __str__(self):
+        if self.prod:
+            s = "%s -> %s" % (self.name," ".join(self.prod))
+        else:
+            s = "%s -> <empty>" % self.name
+        return s
+
+    def __repr__(self):
+        return str(self)
+
+    # Compute lr_items from the production
+    def lr_item(self,n):
+        if n > len(self.prod): return None
+        p = Production()
+        p.name = self.name
+        p.prod = list(self.prod)
+        p.number = self.number
+        p.lr_index = n
+        p.lookaheads = { }
+        p.setnumbers = self.setnumbers
+        p.prod.insert(n,".")
+        p.prod = tuple(p.prod)
+        p.len = len(p.prod)
+        p.usyms = self.usyms
+
+        # Precompute list of productions immediately following
+        try:
+            p.lrafter = Prodnames[p.prod[n+1]]
+        except (IndexError,KeyError),e:
+            p.lrafter = []
+        try:
+            p.lrbefore = p.prod[n-1]
+        except IndexError:
+            p.lrbefore = None
+
+        return p
+
+class MiniProduction:
+    pass
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# add_production()
+#
+# Given an action function, this function assembles a production rule.
+# The production rule is assumed to be found in the function's docstring.
+# This rule has the general syntax:
+#
+#              name1 ::= production1
+#                     |  production2
+#                     |  production3
+#                    ...
+#                     |  productionn
+#              name2 ::= production1
+#                     |  production2
+#                    ...
+# -----------------------------------------------------------------------------
+
+def add_production(f,file,line,prodname,syms):
+
+    if Terminals.has_key(prodname):
+        sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
+        return -1
+    if prodname == 'error':
+        sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
+        return -1
+
+    if not _is_identifier.match(prodname):
+        sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
+        return -1
+
+    for x in range(len(syms)):
+        s = syms[x]
+        if s[0] in "'\"":
+             try:
+                 c = eval(s)
+                 if (len(c) > 1):
+                      sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
+                      return -1
+                 if not Terminals.has_key(c):
+                      Terminals[c] = []
+                 syms[x] = c
+                 continue
+             except SyntaxError:
+                 pass
+        if not _is_identifier.match(s) and s != '%prec':
+            sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
+            return -1
+
+    # See if the rule is already in the rulemap
+    map = "%s -> %s" % (prodname,syms)
+    if Prodmap.has_key(map):
+        m = Prodmap[map]
+        sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
+        sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
+        return -1
+
+    p = Production()
+    p.name = prodname
+    p.prod = syms
+    p.file = file
+    p.line = line
+    p.func = f
+    p.number = len(Productions)
+
+
+    Productions.append(p)
+    Prodmap[map] = p
+    if not Nonterminals.has_key(prodname):
+        Nonterminals[prodname] = [ ]
+
+    # Add all terminals to Terminals
+    i = 0
+    while i < len(p.prod):
+        t = p.prod[i]
+        if t == '%prec':
+            try:
+                precname = p.prod[i+1]
+            except IndexError:
+                sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
+                return -1
+
+            prec = Precedence.get(precname,None)
+            if not prec:
+                sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
+                return -1
+            else:
+                p.prec = prec
+                UsedPrecedence[precname] = 1
+            del p.prod[i]
+            del p.prod[i]
+            continue
+
+        if Terminals.has_key(t):
+            Terminals[t].append(p.number)
+            # Is a terminal.  We'll assign a precedence to p based on this
+            if not hasattr(p,"prec"):
+                p.prec = Precedence.get(t,('right',0))
+        else:
+            if not Nonterminals.has_key(t):
+                Nonterminals[t] = [ ]
+            Nonterminals[t].append(p.number)
+        i += 1
+
+    if not hasattr(p,"prec"):
+        p.prec = ('right',0)
+
+    # Set final length of productions
+    p.len  = len(p.prod)
+    p.prod = tuple(p.prod)
+
+    # Calculate unique syms in the production
+    p.usyms = [ ]
+    for s in p.prod:
+        if s not in p.usyms:
+            p.usyms.append(s)
+
+    # Add to the global productions list
+    try:
+        Prodnames[p.name].append(p)
+    except KeyError:
+        Prodnames[p.name] = [ p ]
+    return 0
+
+# Given a raw rule function, this function rips out its doc string
+# and adds rules to the grammar
+
+def add_function(f):
+    line = f.func_code.co_firstlineno
+    file = f.func_code.co_filename
+    error = 0
+
+    if isinstance(f,types.MethodType):
+        reqdargs = 2
+    else:
+        reqdargs = 1
+
+    if f.func_code.co_argcount > reqdargs:
+        sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
+        return -1
+
+    if f.func_code.co_argcount < reqdargs:
+        sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
+        return -1
+
+    if f.__doc__:
+        # Split the doc string into lines
+        pstrings = f.__doc__.splitlines()
+        lastp = None
+        dline = line
+        for ps in pstrings:
+            dline += 1
+            p = ps.split()
+            if not p: continue
+            try:
+                if p[0] == '|':
+                    # This is a continuation of a previous rule
+                    if not lastp:
+                        sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
+                        return -1
+                    prodname = lastp
+                    if len(p) > 1:
+                        syms = p[1:]
+                    else:
+                        syms = [ ]
+                else:
+                    prodname = p[0]
+                    lastp = prodname
+                    assign = p[1]
+                    if len(p) > 2:
+                        syms = p[2:]
+                    else:
+                        syms = [ ]
+                    if assign != ':' and assign != '::=':
+                        sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
+                        return -1
+
+
+                e = add_production(f,file,dline,prodname,syms)
+                error += e
+
+
+            except StandardError:
+                sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
+                error -= 1
+    else:
+        sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
+    return error
+
+
+# Cycle checking code (Michael Dyck)
+
+def compute_reachable():
+    '''
+    Find each symbol that can be reached from the start symbol.
+    Print a warning for any nonterminals that can't be reached.
+    (Unused terminals have already had their warning.)
+    '''
+    Reachable = { }
+    for s in Terminals.keys() + Nonterminals.keys():
+        Reachable[s] = 0
+
+    mark_reachable_from( Productions[0].prod[0], Reachable )
+
+    for s in Nonterminals.keys():
+        if not Reachable[s]:
+            sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
+
+def mark_reachable_from(s, Reachable):
+    '''
+    Mark all symbols that are reachable from symbol s.
+    '''
+    if Reachable[s]:
+        # We've already reached symbol s.
+        return
+    Reachable[s] = 1
+    for p in Prodnames.get(s,[]):
+        for r in p.prod:
+            mark_reachable_from(r, Reachable)
+
+# -----------------------------------------------------------------------------
+# compute_terminates()
+#
+# This function looks at the various parsing rules and tries to detect
+# infinite recursion cycles (grammar rules where there is no possible way
+# to derive a string of only terminals).
+# -----------------------------------------------------------------------------
+def compute_terminates():
+    '''
+    Raise an error for any symbols that don't terminate.
+    '''
+    Terminates = {}
+
+    # Terminals:
+    for t in Terminals.keys():
+        Terminates[t] = 1
+
+    Terminates['$end'] = 1
+
+    # Nonterminals:
+
+    # Initialize to false:
+    for n in Nonterminals.keys():
+        Terminates[n] = 0
+
+    # Then propagate termination until no change:
+    while 1:
+        some_change = 0
+        for (n,pl) in Prodnames.items():
+            # Nonterminal n terminates iff any of its productions terminates.
+            for p in pl:
+                # Production p terminates iff all of its rhs symbols terminate.
+                for s in p.prod:
+                    if not Terminates[s]:
+                        # The symbol s does not terminate,
+                        # so production p does not terminate.
+                        p_terminates = 0
+                        break
+                else:
+                    # didn't break from the loop,
+                    # so every symbol s terminates
+                    # so production p terminates.
+                    p_terminates = 1
+
+                if p_terminates:
+                    # symbol n terminates!
+                    if not Terminates[n]:
+                        Terminates[n] = 1
+                        some_change = 1
+                    # Don't need to consider any more productions for this n.
+                    break
+
+        if not some_change:
+            break
+
+    some_error = 0
+    for (s,terminates) in Terminates.items():
+        if not terminates:
+            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+                # s is used-but-not-defined, and we've already warned of that,
+                # so it would be overkill to say that it's also non-terminating.
+                pass
+            else:
+                sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
+                some_error = 1
+
+    return some_error
+
+# -----------------------------------------------------------------------------
+# verify_productions()
+#
+# This function examines all of the supplied rules to see if they seem valid.
+# -----------------------------------------------------------------------------
+def verify_productions(cycle_check=1):
+    error = 0
+    for p in Productions:
+        if not p: continue
+
+        for s in p.prod:
+            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
+                sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
+                error = 1
+                continue
+
+    unused_tok = 0
+    # Now verify all of the tokens
+    if yaccdebug:
+        _vf.write("Unused terminals:\n\n")
+    for s,v in Terminals.items():
+        if s != 'error' and not v:
+            sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
+            if yaccdebug: _vf.write("   %s\n"% s)
+            unused_tok += 1
+
+    # Print out all of the productions
+    if yaccdebug:
+        _vf.write("\nGrammar\n\n")
+        for i in range(1,len(Productions)):
+            _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
+
+    unused_prod = 0
+    # Verify the use of all productions
+    for s,v in Nonterminals.items():
+        if not v:
+            p = Prodnames[s][0]
+            sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
+            unused_prod += 1
+
+
+    if unused_tok == 1:
+        sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
+    if unused_tok > 1:
+        sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
+
+    if unused_prod == 1:
+        sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
+    if unused_prod > 1:
+        sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
+
+    if yaccdebug:
+        _vf.write("\nTerminals, with rules where they appear\n\n")
+        ks = Terminals.keys()
+        ks.sort()
+        for k in ks:
+            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
+        _vf.write("\nNonterminals, with rules where they appear\n\n")
+        ks = Nonterminals.keys()
+        ks.sort()
+        for k in ks:
+            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
+
+    if (cycle_check):
+        compute_reachable()
+        error += compute_terminates()
+#        error += check_cycles()
+    return error
+
+# -----------------------------------------------------------------------------
+# build_lritems()
+#
+# This function walks the list of productions and builds a complete set of the
+# LR items.  The LR items are stored in two ways:  First, they are uniquely
+# numbered and placed in the list _lritems.  Second, a linked list of LR items
+# is built for each production.  For example:
+#
+#   E -> E PLUS E
+#
+# Creates the list
+#
+#  [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+# -----------------------------------------------------------------------------
+
+def build_lritems():
+    for p in Productions:
+        lastlri = p
+        lri = p.lr_item(0)
+        i = 0
+        while 1:
+            lri = p.lr_item(i)
+            lastlri.lr_next = lri
+            if not lri: break
+            lri.lr_num = len(LRitems)
+            LRitems.append(lri)
+            lastlri = lri
+            i += 1
+
+    # In order for the rest of the parser generator to work, we need to
+    # guarantee that no more lritems are generated.  Therefore, we nuke
+    # the p.lr_item method.  (Only used in debugging)
+    # Production.lr_item = None
+
+# -----------------------------------------------------------------------------
+# add_precedence()
+#
+# Given a list of precedence rules, add to the precedence table.
+# -----------------------------------------------------------------------------
+
+def add_precedence(plist):
+    plevel = 0
+    error = 0
+    for p in plist:
+        plevel += 1
+        try:
+            prec = p[0]
+            terms = p[1:]
+            if prec != 'left' and prec != 'right' and prec != 'nonassoc':
+                sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
+                return -1
+            for t in terms:
+                if Precedence.has_key(t):
+                    sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
+                    error += 1
+                    continue
+                Precedence[t] = (prec,plevel)
+        except:
+            sys.stderr.write("yacc: Invalid precedence table.\n")
+            error += 1
+
+    return error
+
+# -----------------------------------------------------------------------------
+# check_precedence()
+#
+# Checks the use of the Precedence tables.  This makes sure all of the symbols
+# are terminals or were used with %prec
+# -----------------------------------------------------------------------------
+
+def check_precedence():
+    error = 0
+    for precname in Precedence.keys():
+        if not (Terminals.has_key(precname) or UsedPrecedence.has_key(precname)):
+            sys.stderr.write("yacc: Precedence rule '%s' defined for unknown symbol '%s'\n" % (Precedence[precname][0],precname))
+            error += 1
+    return error
+
+# -----------------------------------------------------------------------------
+# augment_grammar()
+#
+# Compute the augmented grammar.  This is just a rule S' -> start where start
+# is the starting symbol.
+# -----------------------------------------------------------------------------
+
+def augment_grammar(start=None):
+    if not start:
+        start = Productions[1].name
+    Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
+    Productions[0].usyms = [ start ]
+    Nonterminals[start].append(0)
+
+
+# -------------------------------------------------------------------------
+# first()
+#
+# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+#
+# During execution of compute_first1, the result may be incomplete.
+# Afterward (e.g., when called from compute_follow()), it will be complete.
+# -------------------------------------------------------------------------
+def first(beta):
+
+    # We are computing First(x1,x2,x3,...,xn)
+    result = [ ]
+    for x in beta:
+        x_produces_empty = 0
+
+        # Add all the non-<empty> symbols of First[x] to the result.
+        for f in First[x]:
+            if f == '<empty>':
+                x_produces_empty = 1
+            else:
+                if f not in result: result.append(f)
+
+        if x_produces_empty:
+            # We have to consider the next x in beta,
+            # i.e. stay in the loop.
+            pass
+        else:
+            # We don't have to consider any further symbols in beta.
+            break
+    else:
+        # There was no 'break' from the loop,
+        # so x_produces_empty was true for all x in beta,
+        # so beta produces empty as well.
+        result.append('<empty>')
+
+    return result
+
+
+# FOLLOW(x)
+# Given a non-terminal.  This function computes the set of all symbols
+# that might follow it.  Dragon book, p. 189.
+
+def compute_follow(start=None):
+    # Add '$end' to the follow list of the start symbol
+    for k in Nonterminals.keys():
+        Follow[k] = [ ]
+
+    if not start:
+        start = Productions[1].name
+
+    Follow[start] = [ '$end' ]
+
+    while 1:
+        didadd = 0
+        for p in Productions[1:]:
+            # Here is the production set
+            for i in range(len(p.prod)):
+                B = p.prod[i]
+                if Nonterminals.has_key(B):
+                    # Okay. We got a non-terminal in a production
+                    fst = first(p.prod[i+1:])
+                    hasempty = 0
+                    for f in fst:
+                        if f != '<empty>' and f not in Follow[B]:
+                            Follow[B].append(f)
+                            didadd = 1
+                        if f == '<empty>':
+                            hasempty = 1
+                    if hasempty or i == (len(p.prod)-1):
+                        # Add elements of follow(a) to follow(b)
+                        for f in Follow[p.name]:
+                            if f not in Follow[B]:
+                                Follow[B].append(f)
+                                didadd = 1
+        if not didadd: break
+
+    if 0 and yaccdebug:
+        _vf.write('\nFollow:\n')
+        for k in Nonterminals.keys():
+            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
+
+# -------------------------------------------------------------------------
+# compute_first1()
+#
+# Compute the value of FIRST1(X) for all symbols
+# -------------------------------------------------------------------------
+def compute_first1():
+
+    # Terminals:
+    for t in Terminals.keys():
+        First[t] = [t]
+
+    First['$end'] = ['$end']
+    First['#'] = ['#'] # what's this for?
+
+    # Nonterminals:
+
+    # Initialize to the empty set:
+    for n in Nonterminals.keys():
+        First[n] = []
+
+    # Then propagate symbols until no change:
+    while 1:
+        some_change = 0
+        for n in Nonterminals.keys():
+            for p in Prodnames[n]:
+                for f in first(p.prod):
+                    if f not in First[n]:
+                        First[n].append( f )
+                        some_change = 1
+        if not some_change:
+            break
+
+    if 0 and yaccdebug:
+        _vf.write('\nFirst:\n')
+        for k in Nonterminals.keys():
+            _vf.write("%-20s : %s\n" %
+                (k, " ".join([str(s) for s in First[k]])))
+
+# -----------------------------------------------------------------------------
+#                           === SLR Generation ===
+#
+# The following functions are used to construct SLR (Simple LR) parsing tables
+# as described on p.221-229 of the dragon book.
+# -----------------------------------------------------------------------------
+
+# Global variables for the LR parsing engine
+def lr_init_vars():
+    global _lr_action, _lr_goto, _lr_method
+    global _lr_goto_cache, _lr0_cidhash
+
+    _lr_action       = { }        # Action table
+    _lr_goto         = { }        # Goto table
+    _lr_method       = "Unknown"  # LR method used
+    _lr_goto_cache   = { }
+    _lr0_cidhash     = { }
+
+
+# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+# prodlist is a list of productions.
+
+_add_count = 0       # Counter used to detect cycles
+
+def lr0_closure(I):
+    global _add_count
+
+    _add_count += 1
+    prodlist = Productions
+
+    # Add everything in I to J
+    J = I[:]
+    didadd = 1
+    while didadd:
+        didadd = 0
+        for j in J:
+            for x in j.lrafter:
+                if x.lr0_added == _add_count: continue
+                # Add B --> .G to J
+                J.append(x.lr_next)
+                x.lr0_added = _add_count
+                didadd = 1
+
+    return J
+
+# Compute the LR(0) goto function goto(I,X) where I is a set
+# of LR(0) items and X is a grammar symbol.   This function is written
+# in a way that guarantees uniqueness of the generated goto sets
+# (i.e. the same goto set will never be returned as two different Python
+# objects).  With uniqueness, we can later do fast set comparisons using
+# id(obj) instead of element-wise comparison.
+
+def lr0_goto(I,x):
+    # First we look for a previously cached entry
+    g = _lr_goto_cache.get((id(I),x),None)
+    if g: return g
+
+    # Now we generate the goto set in a way that guarantees uniqueness
+    # of the result
+
+    s = _lr_goto_cache.get(x,None)
+    if not s:
+        s = { }
+        _lr_goto_cache[x] = s
+
+    gs = [ ]
+    for p in I:
+        n = p.lr_next
+        if n and n.lrbefore == x:
+            s1 = s.get(id(n),None)
+            if not s1:
+                s1 = { }
+                s[id(n)] = s1
+            gs.append(n)
+            s = s1
+    g = s.get('$end',None)
+    if not g:
+        if gs:
+            g = lr0_closure(gs)
+            s['$end'] = g
+        else:
+            s['$end'] = gs
+    _lr_goto_cache[(id(I),x)] = g
+    return g
+
+_lr0_cidhash = { }
+
+# Compute the LR(0) sets of item function
+def lr0_items():
+
+    C = [ lr0_closure([Productions[0].lr_next]) ]
+    i = 0
+    for I in C:
+        _lr0_cidhash[id(I)] = i
+        i += 1
+
+    # Loop over the items in C and each grammar symbols
+    i = 0
+    while i < len(C):
+        I = C[i]
+        i += 1
+
+        # Collect all of the symbols that could possibly be in the goto(I,X) sets
+        asyms = { }
+        for ii in I:
+            for s in ii.usyms:
+                asyms[s] = None
+
+        for x in asyms.keys():
+            g = lr0_goto(I,x)
+            if not g:  continue
+            if _lr0_cidhash.has_key(id(g)): continue
+            _lr0_cidhash[id(g)] = len(C)
+            C.append(g)
+
+    return C
+
+# -----------------------------------------------------------------------------
+#                       ==== LALR(1) Parsing ====
+#
+# LALR(1) parsing is almost exactly the same as SLR except that instead of
+# relying upon Follow() sets when performing reductions, a more selective
+# lookahead set that incorporates the state of the LR(0) machine is utilized.
+# Thus, we mainly just have to focus on calculating the lookahead sets.
+#
+# The method used here is due to DeRemer and Pennelo (1982).
+#
+# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+#     Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+#     Vol. 4, No. 4, Oct. 1982, pp. 615-649
+#
+# Further details can also be found in:
+#
+#  J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+#      McGraw-Hill Book Company, (1985).
+#
+# Note:  This implementation is a complete replacement of the LALR(1)
+#        implementation in PLY-1.x releases.   That version was based on
+#        a less efficient algorithm and it had bugs in its implementation.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# compute_nullable_nonterminals()
+#
+# Creates a dictionary containing all of the non-terminals that might produce
+# an empty production.
+# -----------------------------------------------------------------------------
+
+def compute_nullable_nonterminals():
+    nullable = {}
+    num_nullable = 0
+    while 1:
+       for p in Productions[1:]:
+           if p.len == 0:
+                nullable[p.name] = 1
+                continue
+           for t in p.prod:
+                if not nullable.has_key(t): break
+           else:
+                nullable[p.name] = 1
+       if len(nullable) == num_nullable: break
+       num_nullable = len(nullable)
+    return nullable
+
+# -----------------------------------------------------------------------------
+# find_nonterminal_trans(C)
+#
+# Given a set of LR(0) items, this functions finds all of the non-terminal
+# transitions.    These are transitions in which a dot appears immediately before
+# a non-terminal.   Returns a list of tuples of the form (state,N) where state
+# is the state number and N is the nonterminal symbol.
+#
+# The input C is the set of LR(0) items.
+# -----------------------------------------------------------------------------
+
+def find_nonterminal_transitions(C):
+     trans = []
+     for state in range(len(C)):
+         for p in C[state]:
+             if p.lr_index < p.len - 1:
+                  t = (state,p.prod[p.lr_index+1])
+                  if Nonterminals.has_key(t[1]):
+                        if t not in trans: trans.append(t)
+         state = state + 1
+     return trans
+
+# -----------------------------------------------------------------------------
+# dr_relation()
+#
+# Computes the DR(p,A) relationships for non-terminal transitions.  The input
+# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+#
+# Returns a list of terminals.
+# -----------------------------------------------------------------------------
+
+def dr_relation(C,trans,nullable):
+    dr_set = { }
+    state,N = trans
+    terms = []
+
+    g = lr0_goto(C[state],N)
+    for p in g:
+       if p.lr_index < p.len - 1:
+           a = p.prod[p.lr_index+1]
+           if Terminals.has_key(a):
+               if a not in terms: terms.append(a)
+
+    # This extra bit is to handle the start state
+    if state == 0 and N == Productions[0].prod[0]:
+       terms.append('$end')
+
+    return terms
+
+# -----------------------------------------------------------------------------
+# reads_relation()
+#
+# Computes the READS() relation (p,A) READS (t,C).
+# -----------------------------------------------------------------------------
+
+def reads_relation(C, trans, empty):
+    # Look for empty transitions
+    rel = []
+    state, N = trans
+
+    g = lr0_goto(C[state],N)
+    j = _lr0_cidhash.get(id(g),-1)
+    for p in g:
+        if p.lr_index < p.len - 1:
+             a = p.prod[p.lr_index + 1]
+             if empty.has_key(a):
+                  rel.append((j,a))
+
+    return rel
+
+# -----------------------------------------------------------------------------
+# compute_lookback_includes()
+#
+# Determines the lookback and includes relations
+#
+# LOOKBACK:
+#
+# This relation is determined by running the LR(0) state machine forward.
+# For example, starting with a production "N : . A B C", we run it forward
+# to obtain "N : A B C ."   We then build a relationship between this final
+# state and the starting state.   These relationships are stored in a dictionary
+# lookdict.
+#
+# INCLUDES:
+#
+# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+#
+# This relation is used to determine non-terminal transitions that occur
+# inside of other non-terminal transition states.   (p,A) INCLUDES (p', B)
+# if the following holds:
+#
+#       B -> LAT, where T -> epsilon and p' -L-> p
+#
+# L is essentially a prefix (which may be empty), T is a suffix that must be
+# able to derive an empty string.  State p' must lead to state p with the string L.
+#
+# -----------------------------------------------------------------------------
+
+def compute_lookback_includes(C,trans,nullable):
+
+    lookdict = {}          # Dictionary of lookback relations
+    includedict = {}       # Dictionary of include relations
+
+    # Make a dictionary of non-terminal transitions
+    dtrans = {}
+    for t in trans:
+        dtrans[t] = 1
+
+    # Loop over all transitions and compute lookbacks and includes
+    for state,N in trans:
+        lookb = []
+        includes = []
+        for p in C[state]:
+            if p.name != N: continue
+
+            # Okay, we have a name match.  We now follow the production all the way
+            # through the state machine until we get the . on the right hand side
+
+            lr_index = p.lr_index
+            j = state
+            while lr_index < p.len - 1:
+                 lr_index = lr_index + 1
+                 t = p.prod[lr_index]
+
+                 # Check to see if this symbol and state are a non-terminal transition
+                 if dtrans.has_key((j,t)):
+                       # Yes.  Okay, there is some chance that this is an includes relation
+                       # the only way to know for certain is whether the rest of the
+                       # production derives empty
+
+                       li = lr_index + 1
+                       while li < p.len:
+                            if Terminals.has_key(p.prod[li]): break      # No forget it
+                            if not nullable.has_key(p.prod[li]): break
+                            li = li + 1
+                       else:
+                            # Appears to be a relation between (j,t) and (state,N)
+                            includes.append((j,t))
+
+                 g = lr0_goto(C[j],t)               # Go to next set
+                 j = _lr0_cidhash.get(id(g),-1)     # Go to next state
+
+            # When we get here, j is the final state, now we have to locate the production
+            for r in C[j]:
+                 if r.name != p.name: continue
+                 if r.len != p.len:   continue
+                 i = 0
+                 # This look is comparing a production ". A B C" with "A B C ."
+                 while i < r.lr_index:
+                      if r.prod[i] != p.prod[i+1]: break
+                      i = i + 1
+                 else:
+                      lookb.append((j,r))
+        for i in includes:
+             if not includedict.has_key(i): includedict[i] = []
+             includedict[i].append((state,N))
+        lookdict[(state,N)] = lookb
+
+    return lookdict,includedict
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+#     F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs:  X    - An input set
+#          R    - A relation
+#          FP   - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X,R,FP):
+    N = { }
+    for x in X:
+       N[x] = 0
+    stack = []
+    F = { }
+    for x in X:
+        if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
+    return F
+
+def traverse(x,N,stack,F,X,R,FP):
+    stack.append(x)
+    d = len(stack)
+    N[x] = d
+    F[x] = FP(x)             # F(X) <- F'(x)
+
+    rel = R(x)               # Get y's related to x
+    for y in rel:
+        if N[y] == 0:
+             traverse(y,N,stack,F,X,R,FP)
+        N[x] = min(N[x],N[y])
+        for a in F.get(y,[]):
+            if a not in F[x]: F[x].append(a)
+    if N[x] == d:
+       N[stack[-1]] = sys.maxint
+       F[stack[-1]] = F[x]
+       element = stack.pop()
+       while element != x:
+           N[stack[-1]] = sys.maxint
+           F[stack[-1]] = F[x]
+           element = stack.pop()
+
+# -----------------------------------------------------------------------------
+# compute_read_sets()
+#
+# Given a set of LR(0) items, this function computes the read sets.
+#
+# Inputs:  C        =  Set of LR(0) items
+#          ntrans   = Set of nonterminal transitions
+#          nullable = Set of empty transitions
+#
+# Returns a set containing the read sets
+# -----------------------------------------------------------------------------
+
+def compute_read_sets(C, ntrans, nullable):
+    FP = lambda x: dr_relation(C,x,nullable)
+    R =  lambda x: reads_relation(C,x,nullable)
+    F = digraph(ntrans,R,FP)
+    return F
+
+# -----------------------------------------------------------------------------
+# compute_follow_sets()
+#
+# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+# and an include set, this function computes the follow sets
+#
+# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+#
+# Inputs:
+#            ntrans     = Set of nonterminal transitions
+#            readsets   = Readset (previously computed)
+#            inclsets   = Include sets (previously computed)
+#
+# Returns a set containing the follow sets
+# -----------------------------------------------------------------------------
+
+def compute_follow_sets(ntrans,readsets,inclsets):
+     FP = lambda x: readsets[x]
+     R  = lambda x: inclsets.get(x,[])
+     F = digraph(ntrans,R,FP)
+     return F
+
+# -----------------------------------------------------------------------------
+# add_lookaheads()
+#
+# Attaches the lookahead symbols to grammar rules.
+#
+# Inputs:    lookbacks         -  Set of lookback relations
+#            followset         -  Computed follow set
+#
+# This function directly attaches the lookaheads to productions contained
+# in the lookbacks set
+# -----------------------------------------------------------------------------
+
+def add_lookaheads(lookbacks,followset):
+    for trans,lb in lookbacks.items():
+        # Loop over productions in lookback
+        for state,p in lb:
+             if not p.lookaheads.has_key(state):
+                  p.lookaheads[state] = []
+             f = followset.get(trans,[])
+             for a in f:
+                  if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+# -----------------------------------------------------------------------------
+# add_lalr_lookaheads()
+#
+# This function does all of the work of adding lookahead information for use
+# with LALR parsing
+# -----------------------------------------------------------------------------
+
+def add_lalr_lookaheads(C):
+    # Determine all of the nullable nonterminals
+    nullable = compute_nullable_nonterminals()
+
+    # Find all non-terminal transitions
+    trans = find_nonterminal_transitions(C)
+
+    # Compute read sets
+    readsets = compute_read_sets(C,trans,nullable)
+
+    # Compute lookback/includes relations
+    lookd, included = compute_lookback_includes(C,trans,nullable)
+
+    # Compute LALR FOLLOW sets
+    followsets = compute_follow_sets(trans,readsets,included)
+
+    # Add all of the lookaheads
+    add_lookaheads(lookd,followsets)
+
+# -----------------------------------------------------------------------------
+# lr_parse_table()
+#
+# This function constructs the parse tables for SLR or LALR
+# -----------------------------------------------------------------------------
+def lr_parse_table(method):
+    global _lr_method
+    goto = _lr_goto           # Goto array
+    action = _lr_action       # Action array
+    actionp = { }             # Action production array (temporary)
+
+    _lr_method = method
+
+    n_srconflict = 0
+    n_rrconflict = 0
+
+    if yaccdebug:
+        sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
+        _vf.write("\n\nParsing method: %s\n\n" % method)
+
+    # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+    # This determines the number of states
+
+    C = lr0_items()
+
+    if method == 'LALR':
+        add_lalr_lookaheads(C)
+
+
+    # Build the parser table, state by state
+    st = 0
+    for I in C:
+        # Loop over each production in I
+        actlist = [ ]              # List of actions
+        st_action  = { }
+        st_actionp = { }
+        st_goto    = { }
+        if yaccdebug:
+            _vf.write("\nstate %d\n\n" % st)
+            for p in I:
+                _vf.write("    (%d) %s\n" % (p.number, str(p)))
+            _vf.write("\n")
+
+        for p in I:
+            try:
+                if p.len == p.lr_index + 1:
+                    if p.name == "S'":
+                        # Start symbol. Accept!
+                        st_action["$end"] = 0
+                        st_actionp["$end"] = p
+                    else:
+                        # We are at the end of a production.  Reduce!
+                        if method == 'LALR':
+                            laheads = p.lookaheads[st]
+                        else:
+                            laheads = Follow[p.name]
+                        for a in laheads:
+                            actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+                            r = st_action.get(a,None)
+                            if r is not None:
+                                # Whoa. Have a shift/reduce or reduce/reduce conflict
+                                if r > 0:
+                                    # Need to decide on shift or reduce here
+                                    # By default we favor shifting. Need to add
+                                    # some precedence rules here.
+                                    sprec,slevel = Productions[st_actionp[a].number].prec
+                                    rprec,rlevel = Precedence.get(a,('right',0))
+                                    if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+                                        # We really need to reduce here.
+                                        st_action[a] = -p.number
+                                        st_actionp[a] = p
+                                        if not slevel and not rlevel:
+                                            _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+                                            _vf.write("  ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+                                            n_srconflict += 1
+                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                        st_action[a] = None
+                                    else:
+                                        # Hmmm. Guess we'll keep the shift
+                                        if not rlevel:
+                                            _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+                                            _vf.write("  ! shift/reduce conflict for %s resolved as shift.\n" % a)
+                                            n_srconflict +=1
+                                elif r < 0:
+                                    # Reduce/reduce conflict.   In this case, we favor the rule
+                                    # that was defined first in the grammar file
+                                    oldp = Productions[-r]
+                                    pp = Productions[p.number]
+                                    if oldp.line > pp.line:
+                                        st_action[a] = -p.number
+                                        st_actionp[a] = p
+                                    # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
+                                    n_rrconflict += 1
+                                    _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
+                                    _vf.write("  ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
+                                else:
+                                    sys.stderr.write("Unknown conflict in state %d\n" % st)
+                            else:
+                                st_action[a] = -p.number
+                                st_actionp[a] = p
+                else:
+                    i = p.lr_index
+                    a = p.prod[i+1]       # Get symbol right after the "."
+                    if Terminals.has_key(a):
+                        g = lr0_goto(I,a)
+                        j = _lr0_cidhash.get(id(g),-1)
+                        if j >= 0:
+                            # We are in a shift state
+                            actlist.append((a,p,"shift and go to state %d" % j))
+                            r = st_action.get(a,None)
+                            if r is not None:
+                                # Whoa have a shift/reduce or shift/shift conflict
+                                if r > 0:
+                                    if r != j:
+                                        sys.stderr.write("Shift/shift conflict in state %d\n" % st)
+                                elif r < 0:
+                                    # Do a precedence check.
+                                    #   -  if precedence of reduce rule is higher, we reduce.
+                                    #   -  if precedence of reduce is same and left assoc, we reduce.
+                                    #   -  otherwise we shift
+                                    rprec,rlevel = Productions[st_actionp[a].number].prec
+                                    sprec,slevel = Precedence.get(a,('right',0))
+                                    if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+                                        # We decide to shift here... highest precedence to shift
+                                        st_action[a] = j
+                                        st_actionp[a] = p
+                                        if not rlevel:
+                                            n_srconflict += 1
+                                            _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
+                                            _vf.write("  ! shift/reduce conflict for %s resolved as shift.\n" % a)
+                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                        st_action[a] = None
+                                    else:
+                                        # Hmmm. Guess we'll keep the reduce
+                                        if not slevel and not rlevel:
+                                            n_srconflict +=1
+                                            _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
+                                            _vf.write("  ! shift/reduce conflict for %s resolved as reduce.\n" % a)
+
+                                else:
+                                    sys.stderr.write("Unknown conflict in state %d\n" % st)
+                            else:
+                                st_action[a] = j
+                                st_actionp[a] = p
+
+            except StandardError,e:
+               print sys.exc_info()
+               raise YaccError, "Hosed in lr_parse_table"
+
+        # Print the actions associated with each terminal
+        if yaccdebug:
+          _actprint = { }
+          for a,p,m in actlist:
+            if st_action.has_key(a):
+                if p is st_actionp[a]:
+                    _vf.write("    %-15s %s\n" % (a,m))
+                    _actprint[(a,m)] = 1
+          _vf.write("\n")
+          for a,p,m in actlist:
+            if st_action.has_key(a):
+                if p is not st_actionp[a]:
+                    if not _actprint.has_key((a,m)):
+                        _vf.write("  ! %-15s [ %s ]\n" % (a,m))
+                        _actprint[(a,m)] = 1
+
+        # Construct the goto table for this state
+        if yaccdebug:
+            _vf.write("\n")
+        nkeys = { }
+        for ii in I:
+            for s in ii.usyms:
+                if Nonterminals.has_key(s):
+                    nkeys[s] = None
+        for n in nkeys.keys():
+            g = lr0_goto(I,n)
+            j = _lr0_cidhash.get(id(g),-1)
+            if j >= 0:
+                st_goto[n] = j
+                if yaccdebug:
+                    _vf.write("    %-30s shift and go to state %d\n" % (n,j))
+
+        action[st] = st_action
+        actionp[st] = st_actionp
+        goto[st] = st_goto
+
+        st += 1
+
+    if yaccdebug:
+        if n_srconflict == 1:
+            sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
+        if n_srconflict > 1:
+            sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
+        if n_rrconflict == 1:
+            sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
+        if n_rrconflict > 1:
+            sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
+
+# -----------------------------------------------------------------------------
+#                          ==== LR Utility functions ====
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _lr_write_tables()
+#
+# This function writes the LR parsing tables to a file
+# -----------------------------------------------------------------------------
+
+def lr_write_tables(modulename=tab_module,outputdir=''):
+    if isinstance(modulename, types.ModuleType):
+        print >>sys.stderr, "Warning module %s is inconsistent with the grammar (ignored)" % modulename
+        return
+
+    basemodulename = modulename.split(".")[-1]
+    filename = os.path.join(outputdir,basemodulename) + ".py"
+    try:
+        f = open(filename,"w")
+
+        f.write("""
+# %s
+# This file is automatically generated. Do not edit.
+
+_lr_method = %s
+
+_lr_signature = %s
+""" % (filename, repr(_lr_method), repr(Signature.digest())))
+
+        # Change smaller to 0 to go back to original tables
+        smaller = 1
+
+        # Factor out names to try and make smaller
+        if smaller:
+            items = { }
+
+            for s,nd in _lr_action.items():
+               for name,v in nd.items():
+                  i = items.get(name)
+                  if not i:
+                     i = ([],[])
+                     items[name] = i
+                  i[0].append(s)
+                  i[1].append(v)
+
+            f.write("\n_lr_action_items = {")
+            for k,v in items.items():
+                f.write("%r:([" % k)
+                for i in v[0]:
+                    f.write("%r," % i)
+                f.write("],[")
+                for i in v[1]:
+                    f.write("%r," % i)
+
+                f.write("]),")
+            f.write("}\n")
+
+            f.write("""
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _lr_action.has_key(_x):  _lr_action[_x] = { }
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+""")
+
+        else:
+            f.write("\n_lr_action = { ");
+            for k,v in _lr_action.items():
+                f.write("(%r,%r):%r," % (k[0],k[1],v))
+            f.write("}\n");
+
+        if smaller:
+            # Factor out names to try and make smaller
+            items = { }
+
+            for s,nd in _lr_goto.items():
+               for name,v in nd.items():
+                  i = items.get(name)
+                  if not i:
+                     i = ([],[])
+                     items[name] = i
+                  i[0].append(s)
+                  i[1].append(v)
+
+            f.write("\n_lr_goto_items = {")
+            for k,v in items.items():
+                f.write("%r:([" % k)
+                for i in v[0]:
+                    f.write("%r," % i)
+                f.write("],[")
+                for i in v[1]:
+                    f.write("%r," % i)
+
+                f.write("]),")
+            f.write("}\n")
+
+            f.write("""
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+       if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+""")
+        else:
+            f.write("\n_lr_goto = { ");
+            for k,v in _lr_goto.items():
+                f.write("(%r,%r):%r," % (k[0],k[1],v))
+            f.write("}\n");
+
+        # Write production table
+        f.write("_lr_productions = [\n")
+        for p in Productions:
+            if p:
+                if (p.func):
+                    f.write("  (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
+                else:
+                    f.write("  (%r,%d,None,None,None),\n" % (p.name, p.len))
+            else:
+                f.write("  None,\n")
+        f.write("]\n")
+
+        f.close()
+
+    except IOError,e:
+        print >>sys.stderr, "Unable to create '%s'" % filename
+        print >>sys.stderr, e
+        return
+
+def lr_read_tables(module=tab_module,optimize=0):
+    global _lr_action, _lr_goto, _lr_productions, _lr_method
+    try:
+        if isinstance(module,types.ModuleType):
+            parsetab = module
+        else:
+            exec "import %s as parsetab" % module
+
+        if (optimize) or (Signature.digest() == parsetab._lr_signature):
+            _lr_action = parsetab._lr_action
+            _lr_goto   = parsetab._lr_goto
+            _lr_productions = parsetab._lr_productions
+            _lr_method = parsetab._lr_method
+            return 1
+        else:
+            return 0
+
+    except (ImportError,AttributeError):
+        return 0
+
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build the parser module
+# -----------------------------------------------------------------------------
+
+def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
+    global yaccdebug
+    yaccdebug = debug
+
+    initialize_vars()
+    files = { }
+    error = 0
+
+
+    # Add parsing method to signature
+    Signature.update(method)
+
+    # If a "module" parameter was supplied, extract its dictionary.
+    # Note: a module may in fact be an instance as well.
+
+    if module:
+        # User supplied a module object.
+        if isinstance(module, types.ModuleType):
+            ldict = module.__dict__
+        elif isinstance(module, _INSTANCETYPE):
+            _items = [(k,getattr(module,k)) for k in dir(module)]
+            ldict = { }
+            for i in _items:
+                ldict[i[0]] = i[1]
+        else:
+            raise ValueError,"Expected a module"
+
+    else:
+        # No module given.  We might be able to get information from the caller.
+        # Throw an exception and unwind the traceback to get the globals
+
+        try:
+            raise RuntimeError
+        except RuntimeError:
+            e,b,t = sys.exc_info()
+            f = t.tb_frame
+            f = f.f_back           # Walk out to our calling function
+            if f.f_globals is f.f_locals:   # Collect global and local variations from caller
+               ldict = f.f_globals
+            else:
+               ldict = f.f_globals.copy()
+               ldict.update(f.f_locals)
+
+    # Add starting symbol to signature
+    if not start:
+        start = ldict.get("start",None)
+    if start:
+        Signature.update(start)
+
+    # Look for error handler
+    ef = ldict.get('p_error',None)
+    if ef:
+        if isinstance(ef,types.FunctionType):
+            ismethod = 0
+        elif isinstance(ef, types.MethodType):
+            ismethod = 1
+        else:
+            raise YaccError,"'p_error' defined, but is not a function or method."
+        eline = ef.func_code.co_firstlineno
+        efile = ef.func_code.co_filename
+        files[efile] = None
+
+        if (ef.func_code.co_argcount != 1+ismethod):
+            raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
+        global Errorfunc
+        Errorfunc = ef
+    else:
+        print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
+
+    # If running in optimized mode.  We're going to read tables instead
+
+    if (optimize and lr_read_tables(tabmodule,1)):
+        # Read parse table
+        del Productions[:]
+        for p in _lr_productions:
+            if not p:
+                Productions.append(None)
+            else:
+                m = MiniProduction()
+                m.name = p[0]
+                m.len  = p[1]
+                m.file = p[3]
+                m.line = p[4]
+                if p[2]:
+                    m.func = ldict[p[2]]
+                Productions.append(m)
+
+    else:
+        # Get the tokens map
+        if (module and isinstance(module,_INSTANCETYPE)):
+            tokens = getattr(module,"tokens",None)
+        else:
+            tokens = ldict.get("tokens",None)
+
+        if not tokens:
+            raise YaccError,"module does not define a list 'tokens'"
+        if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+            raise YaccError,"tokens must be a list or tuple."
+
+        # Check to see if a requires dictionary is defined.
+        requires = ldict.get("require",None)
+        if requires:
+            if not (isinstance(requires,types.DictType)):
+                raise YaccError,"require must be a dictionary."
+
+            for r,v in requires.items():
+                try:
+                    if not (isinstance(v,types.ListType)):
+                        raise TypeError
+                    v1 = [x.split(".") for x in v]
+                    Requires[r] = v1
+                except StandardError:
+                    print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
+
+
+        # Build the dictionary of terminals.  We a record a 0 in the
+        # dictionary to track whether or not a terminal is actually
+        # used in the grammar
+
+        if 'error' in tokens:
+            print >>sys.stderr, "yacc: Illegal token 'error'.  Is a reserved word."
+            raise YaccError,"Illegal token name"
+
+        for n in tokens:
+            if Terminals.has_key(n):
+                print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
+            Terminals[n] = [ ]
+
+        Terminals['error'] = [ ]
+
+        # Get the precedence map (if any)
+        prec = ldict.get("precedence",None)
+        if prec:
+            if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
+                raise YaccError,"precedence must be a list or tuple."
+            add_precedence(prec)
+            Signature.update(repr(prec))
+
+        for n in tokens:
+            if not Precedence.has_key(n):
+                Precedence[n] = ('right',0)         # Default, right associative, 0 precedence
+
+        # Get the list of built-in functions with p_ prefix
+        symbols = [ldict[f] for f in ldict.keys()
+               if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
+                   and ldict[f].__name__ != 'p_error')]
+
+        # Check for non-empty symbols
+        if len(symbols) == 0:
+            raise YaccError,"no rules of the form p_rulename are defined."
+
+        # Sort the symbols by line number
+        symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
+
+        # Add all of the symbols to the grammar
+        for f in symbols:
+            if (add_function(f)) < 0:
+                error += 1
+            else:
+                files[f.func_code.co_filename] = None
+
+        # Make a signature of the docstrings
+        for f in symbols:
+            if f.__doc__:
+                Signature.update(f.__doc__)
+
+        lr_init_vars()
+
+        if error:
+            raise YaccError,"Unable to construct parser."
+
+        if not lr_read_tables(tabmodule):
+
+            # Validate files
+            for filename in files.keys():
+                if not validate_file(filename):
+                    error = 1
+
+            # Validate dictionary
+            validate_dict(ldict)
+
+            if start and not Prodnames.has_key(start):
+                raise YaccError,"Bad starting symbol '%s'" % start
+
+            augment_grammar(start)
+            error = verify_productions(cycle_check=check_recursion)
+            otherfunc = [ldict[f] for f in ldict.keys()
+               if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
+
+            # Check precedence rules
+            if check_precedence():
+                error = 1
+
+            if error:
+                raise YaccError,"Unable to construct parser."
+
+            build_lritems()
+            compute_first1()
+            compute_follow(start)
+
+            if method in ['SLR','LALR']:
+                lr_parse_table(method)
+            else:
+                raise YaccError, "Unknown parsing method '%s'" % method
+
+            if write_tables:
+                lr_write_tables(tabmodule,outputdir)
+
+            if yaccdebug:
+                try:
+                    f = open(os.path.join(outputdir,debugfile),"w")
+                    f.write(_vfc.getvalue())
+                    f.write("\n\n")
+                    f.write(_vf.getvalue())
+                    f.close()
+                except IOError,e:
+                    print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
+
+    # Made it here.   Create a parser object and set up its internal state.
+    # Set global parse() method to bound method of parser object.
+
+    p = Parser("xyzzy")
+    p.productions = Productions
+    p.errorfunc = Errorfunc
+    p.action = _lr_action
+    p.goto   = _lr_goto
+    p.method = _lr_method
+    p.require = Requires
+
+    global parse
+    parse = p.parse
+
+    global parser
+    parser = p
+
+    # Clean up all of the globals we created
+    if (not optimize):
+        yacc_cleanup()
+    return p
+
+# yacc_cleanup function.  Delete all of the global variables
+# used during table construction
+
+def yacc_cleanup():
+    global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+    del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
+
+    global Productions, Prodnames, Prodmap, Terminals
+    global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
+    global Errorfunc, Signature, Requires
+
+    del Productions, Prodnames, Prodmap, Terminals
+    del Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
+    del Errorfunc, Signature, Requires
+
+    global _vf, _vfc
+    del _vf, _vfc
+
+
+# Stub that raises an error if parsing is attempted without first calling yacc()
+def parse(*args,**kwargs):
+    raise YaccError, "yacc: No parser built with yacc()"
--- a/storage/src/mozStorageStatementWrapper.cpp
+++ b/storage/src/mozStorageStatementWrapper.cpp
@@ -469,16 +469,24 @@ mozStorageStatementWrapper::OuterObject(
 NS_IMETHODIMP
 mozStorageStatementWrapper::InnerObject(nsIXPConnectWrappedNative *wrapper,
                                         JSContext *cx, JSObject *obj,
                                         JSObject **_retval)
 {
     return NS_ERROR_NOT_IMPLEMENTED;
 }
 
+/* void postCreatePrototype (in JSContextPtr cx, in JSObjectPtr proto); */
+NS_IMETHODIMP
+mozStorageStatementWrapper::PostCreatePrototype(JSContext * cx,
+                                                JSObject * proto)
+{
+    return NS_OK;
+}
+
 /*************************************************************************
  ****
  **** mozStorageStatementRow
  ****
  *************************************************************************/
 
 NS_IMPL_ISUPPORTS2(mozStorageStatementRow, mozIStorageStatementRow, nsIXPCScriptable)
 
@@ -734,16 +742,23 @@ mozStorageStatementRow::OuterObject(nsIX
 NS_IMETHODIMP
 mozStorageStatementRow::InnerObject(nsIXPConnectWrappedNative *wrapper,
                                     JSContext *cx, JSObject *obj,
                                     JSObject **_retval)
 {
     return NS_ERROR_NOT_IMPLEMENTED;
 }
 
+/* void postCreatePrototype (in JSContextPtr cx, in JSObjectPtr proto); */
+NS_IMETHODIMP
+mozStorageStatementRow::PostCreatePrototype(JSContext * cx, JSObject * proto)
+{
+    return NS_OK;
+}
+
 /*************************************************************************
  ****
  **** mozStorageStatementParams
  ****
  *************************************************************************/
 
 NS_IMPL_ISUPPORTS2(mozStorageStatementParams, mozIStorageStatementParams, nsIXPCScriptable)
 
@@ -1014,8 +1029,14 @@ mozStorageStatementParams::OuterObject(n
 NS_IMETHODIMP
 mozStorageStatementParams::InnerObject(nsIXPConnectWrappedNative *wrapper,
                                        JSContext *cx, JSObject *obj,
                                        JSObject **_retval)
 {
     return NS_ERROR_NOT_IMPLEMENTED;
 }
 
+/* void postCreatePrototype (in JSContextPtr cx, in JSObjectPtr proto); */
+NS_IMETHODIMP
+mozStorageStatementParams::PostCreatePrototype(JSContext * cx, JSObject * proto)
+{
+    return NS_OK;
+}
--- a/widget/public/nsGUIEvent.h
+++ b/widget/public/nsGUIEvent.h
@@ -39,23 +39,16 @@
 
 #ifndef nsGUIEvent_h__
 #define nsGUIEvent_h__
 
 #include "nsPoint.h"
 #include "nsRect.h"
 #include "nsEvent.h"
 #include "nsStringGlue.h"
-
-// nsIDOMEvent contains a long enum which includes a member called ERROR,
-// which conflicts with something that Windows defines somewhere.
-// So, undefine it:
-#ifdef WIN32
-#undef ERROR
-#endif
 #include "nsCOMPtr.h"
 #include "nsIAtom.h"
 #include "nsIDOMKeyEvent.h"
 #include "nsWeakPtr.h"
 #include "nsIWidget.h"
 #include "nsTArray.h"
 #include "nsTraceRefcnt.h"
 
new file mode 100644
--- /dev/null
+++ b/xpcom/idl-parser/header.py
@@ -0,0 +1,444 @@
+#!/usr/bin/env python
+# header.py - Generate C++ header files from IDL.
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+#   Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Benjamin Smedberg <benjamin@smedbergs.us>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+"""Print a C++ header file for the IDL files specified on the command line"""
+
+import sys, os.path, re, xpidl
+
+printdoccomments = False
+
+if printdoccomments:
+    def printComments(fd, clist, indent):
+        for c in clist:
+            fd.write("%s%s\n" % (indent, c))
+else:
+    def printComments(fd, clist, indent):
+        pass
+
+def firstCap(str):
+    return str[0].upper() + str[1:]
+
+def attributeParamName(a):
+    return "a" + firstCap(a.name)
+
+def attributeNativeName(a, getter):
+    binaryname = a.binaryname is not None and a.binaryname or firstCap(a.name)
+    return "%s%s" % (getter and 'Get' or 'Set', binaryname)
+
+def attributeParamlist(a, getter):
+    return "%s%s" % (a.realtype.nativeType(getter and 'out' or 'in'),
+                     attributeParamName(a))
+
+def attributeAsNative(a, getter):
+        scriptable = a.isScriptable() and "NS_SCRIPTABLE " or ""
+        params = {'scriptable': scriptable,
+                  'binaryname': attributeNativeName(a, getter),
+                  'paramlist': attributeParamlist(a, getter)}
+        return "%(scriptable)sNS_IMETHOD %(binaryname)s(%(paramlist)s)" % params
+
+def methodNativeName(m):
+    return m.binaryname is not None and m.binaryname or firstCap(m.name)
+
+def methodReturnType(m, macro):
+    """macro should be NS_IMETHOD or NS_IMETHODIMP"""
+    if m.notxpcom:
+        return "%s_(%s)" % (macro, m.realtype.nativeType('in').strip())
+    else:
+        return macro
+
+def methodAsNative(m):
+    scriptable = m.isScriptable() and "NS_SCRIPTABLE " or ""
+
+    return "%s%s %s(%s)" % (scriptable,
+                            methodReturnType(m, 'NS_IMETHOD'),
+                            methodNativeName(m),
+                            paramlistAsNative(m.params,
+                                              m.realtype,
+                                              notxpcom=m.notxpcom))
+
+def paramlistAsNative(l, rettype, notxpcom, empty='void'):
+    l = list(l)
+    if not notxpcom and rettype.name != 'void':
+        l.append(xpidl.Param(paramtype='out',
+                             type=None,
+                             name='_retval',
+                             attlist=[],
+                             location=None,
+                             realtype=rettype))
+
+    if len(l) == 0:
+        return empty
+
+    return ", ".join([paramAsNative(p) for p in l])
+
+def paramAsNative(p):
+    if p.paramtype == 'in':
+        typeannotate = ''
+    else:
+        typeannotate = ' NS_%sPARAM' % p.paramtype.upper()
+
+    return "%s%s%s" % (p.nativeType(),
+                       p.name,
+                       typeannotate)
+
+def paramlistNames(l, rettype, notxpcom):
+    names = [p.name for p in l]
+    if not notxpcom and rettype.name != 'void':
+        names.append('_retval')
+    if len(names) == 0:
+        return ''
+    return ', '.join(names)
+
+header = """/*
+ * DO NOT EDIT.  THIS FILE IS GENERATED FROM %(filename)s
+ */
+
+#ifndef __gen_%(basename)s_h__
+#define __gen_%(basename)s_h__
+"""
+
+include = """
+#ifndef __gen_%(basename)s_h__
+#include "%(basename)s.h"
+#endif
+"""
+
+header_end = """/* For IDL files that don't want to include root IDL files. */
+#ifndef NS_NO_VTABLE
+#define NS_NO_VTABLE
+#endif
+"""
+
+footer = """
+#endif /* __gen_%(basename)s_h__ */
+"""
+
+forward_decl = """class %(name)s; /* forward declaration */
+
+"""
+
+def idl_basename(f):
+    """returns the base name of a file with the last extension stripped"""
+    return os.path.basename(f).rpartition('.')[0]
+
+def print_header(idl, fd, filename):
+    fd.write(header % {'filename': filename,
+                       'basename': idl_basename(filename)})
+
+    foundinc = False
+    for inc in idl.includes():
+        if not foundinc:
+            foundinc = True
+            fd.write('\n')
+        fd.write(include % {'basename': idl_basename(inc.filename)})
+
+    fd.write('\n')
+    fd.write(header_end)
+
+    for p in idl.productions:
+        if p.kind == 'include': continue
+        if p.kind == 'cdata':
+            fd.write(p.data)
+            continue
+
+        if p.kind == 'forward':
+            fd.write(forward_decl % {'name': p.name})
+            continue
+        if p.kind == 'interface':
+            write_interface(p, fd)
+            continue
+        if p.kind == 'typedef':
+            printComments(fd, p.doccomments, '')
+            fd.write("typedef %s %s;\n\n" % (p.realtype.nativeType('in'),
+                                             p.name))
+
+    fd.write(footer % {'basename': idl_basename(filename)})
+
+iface_header = r"""
+/* starting interface:    %(name)s */
+#define %(defname)s_IID_STR "%(iid)s"
+
+#define %(defname)s_IID \
+  {0x%(m0)s, 0x%(m1)s, 0x%(m2)s, \
+    { %(m3joined)s }}
+
+"""
+
+uuid_decoder = re.compile(r"""(?P<m0>[a-f0-9]{8})-
+                              (?P<m1>[a-f0-9]{4})-
+                              (?P<m2>[a-f0-9]{4})-
+                              (?P<m3>[a-f0-9]{4})-
+                              (?P<m4>[a-f0-9]{12})$""", re.X)
+
+iface_prolog = """ {
+ public: 
+
+  NS_DECLARE_STATIC_IID_ACCESSOR(%(defname)s_IID)
+
+"""
+
+iface_epilog = """};
+
+  NS_DEFINE_STATIC_IID_ACCESSOR(%(name)s, %(defname)s_IID)
+
+/* Use this macro when declaring classes that implement this interface. */
+#define NS_DECL_%(macroname)s """
+
+
+iface_forward = """
+
+/* Use this macro to declare functions that forward the behavior of this interface to another object. */
+#define NS_FORWARD_%(macroname)s(_to) """
+
+iface_forward_safe = """
+
+/* Use this macro to declare functions that forward the behavior of this interface to another object in a safe way. */
+#define NS_FORWARD_SAFE_%(macroname)s(_to) """
+
+iface_template_prolog = """
+
+#if 0
+/* Use the code below as a template for the implementation class for this interface. */
+
+/* Header file */
+class %(implclass)s : public %(name)s
+{
+public:
+  NS_DECL_ISUPPORTS
+  NS_DECL_%(macroname)s
+
+  %(implclass)s();
+
+private:
+  ~%(implclass)s();
+
+protected:
+  /* additional members */
+};
+
+/* Implementation file */
+NS_IMPL_ISUPPORTS1(%(implclass)s, %(name)s)
+
+%(implclass)s::%(implclass)s()
+{
+  /* member initializers and constructor code */
+}
+
+%(implclass)s::~%(implclass)s()
+{
+  /* destructor code */
+}
+
+"""
+
+example_tmpl = """%(returntype)s %(implclass)s::%(nativeName)s(%(paramList)s)
+{
+    return NS_ERROR_NOT_IMPLEMENTED;
+}
+"""
+
+iface_template_epilog = """/* End of implementation class template. */
+#endif
+
+"""
+
+def write_interface(iface, fd):
+    if iface.namemap is None:
+        raise Exception("Interface was not resolved.")
+
+    def write_const_decl(c):
+        printComments(fd, c.doccomments, '  ')
+
+        basetype = c.basetype
+        value = c.getValue()
+
+        fd.write("  enum { %(name)s = %(value)s%(signed)s };\n\n" % {
+                     'name': c.name,
+                     'value': value,
+                     'signed': (not basetype.signed) and 'U' or ''})
+
+    def write_method_decl(m):
+        printComments(fd, m.doccomments, '  ')
+
+        fd.write("  /* %s */\n" % m.toIDL())
+        fd.write("  %s = 0;\n\n" % methodAsNative(m))
+                                                                           
+    def write_attr_decl(a):
+        printComments(fd, a.doccomments, '  ')
+
+        fd.write("  /* %s */\n" % a.toIDL());
+
+        fd.write("  %s = 0;\n" % attributeAsNative(a, True))
+        if not a.readonly:
+            fd.write("  %s = 0;\n" % attributeAsNative(a, False))
+        fd.write("\n")
+
+    defname = iface.name.upper()
+    if iface.name[0:2] == 'ns':
+        defname = 'NS_' + defname[2:]
+
+    names = uuid_decoder.match(iface.attributes.uuid).groupdict()
+    m3str = names['m3'] + names['m4']
+    names['m3joined'] = ", ".join(["0x%s" % m3str[i:i+2] for i in xrange(0, 16, 2)])
+
+    if iface.name[2] == 'I':
+        implclass = iface.name[:2] + iface.name[3:]
+    else:
+        implclass = '_MYCLASS_'
+
+    names.update({'defname': defname,
+                  'macroname': iface.name.upper(),
+                  'name': iface.name,
+                  'iid': iface.attributes.uuid,
+                  'implclass': implclass})
+
+    fd.write(iface_header % names)
+
+    printComments(fd, iface.doccomments, '')
+
+    fd.write("class ")
+    foundcdata = False
+    for m in iface.members:
+        if isinstance(m, xpidl.CDATA):
+            foundcdata = True
+
+    if not foundcdata:
+        fd.write("NS_NO_VTABLE ")
+
+    if iface.attributes.scriptable:
+        fd.write("NS_SCRIPTABLE ")
+    if iface.attributes.deprecated:
+        fd.write("NS_DEPRECATED ")
+    fd.write(iface.name)
+    if iface.base:
+        fd.write(" : public %s" % iface.base)
+    fd.write(iface_prolog % names)
+    for member in iface.members:
+        if isinstance(member, xpidl.ConstMember):
+            write_const_decl(member)
+        elif isinstance(member, xpidl.Attribute):
+            write_attr_decl(member)
+        elif isinstance(member, xpidl.Method):
+            write_method_decl(member)
+        elif isinstance(member, xpidl.CDATA):
+            fd.write("  %s" % member.data)
+        else:
+            raise Exception("Unexpected interface member: %s" % member)
+
+    fd.write(iface_epilog % names)
+
+    for member in iface.members:
+        if isinstance(member, xpidl.Attribute):
+            fd.write("\\\n  %s; " % attributeAsNative(member, True))
+            if not member.readonly:
+                fd.write("\\\n  %s; " % attributeAsNative(member, False))
+        elif isinstance(member, xpidl.Method):
+            fd.write("\\\n  %s; " % methodAsNative(member))
+    if len(iface.members) == 0:
+        fd.write('\\\n  /* no methods! */')
+    elif not member.kind in ('attribute', 'method'):
+       fd.write('\\')
+
+    fd.write(iface_forward % names)
+
+    def emitTemplate(tmpl):
+        for member in iface.members:
+            if isinstance(member, xpidl.Attribute):
+                fd.write(tmpl % {'asNative': attributeAsNative(member, True),
+                                 'nativeName': attributeNativeName(member, True),
+                                 'paramList': attributeParamName(member)})
+                if not member.readonly:
+                    fd.write(tmpl % {'asNative': attributeAsNative(member, False),
+                                     'nativeName': attributeNativeName(member, False),
+                                     'paramList': attributeParamName(member)})
+            elif isinstance(member, xpidl.Method):
+                fd.write(tmpl % {'asNative': methodAsNative(member),
+                                 'nativeName': methodNativeName(member),
+                                 'paramList': paramlistNames(member.params, member.realtype, member.notxpcom)})
+        if len(iface.members) == 0:
+            fd.write('\\\n  /* no methods! */')
+        elif not member.kind in ('attribute', 'method'):
+            fd.write('\\')
+
+    emitTemplate("\\\n  %(asNative)s { return _to %(nativeName)s(%(paramList)s); } ")
+
+    fd.write(iface_forward_safe % names)
+
+    emitTemplate("\\\n  %(asNative)s { return !_to ? NS_ERROR_NULL_POINTER : _to->%(nativeName)s(%(paramList)s); } ")
+
+    fd.write(iface_template_prolog % names)
+
+    for member in iface.members:
+        if isinstance(member, xpidl.ConstMember) or isinstance(member, xpidl.CDATA): continue
+        fd.write("/* %s */\n" % member.toIDL())
+        if isinstance(member, xpidl.Attribute):
+            fd.write(example_tmpl % {'implclass': implclass,
+                                     'returntype': 'NS_IMETHODIMP',
+                                     'nativeName': attributeNativeName(member, True),
+                                     'paramList': attributeParamlist(member, True)})
+            if not member.readonly:
+                fd.write(example_tmpl % {'implclass': implclass,
+                                         'returntype': 'NS_IMETHODIMP',
+                                         'nativeName': attributeNativeName(member, False),
+                                         'paramList': attributeParamlist(member, False)})
+        elif isinstance(member, xpidl.Method):
+            fd.write(example_tmpl % {'implclass': implclass,
+                                     'returntype': methodReturnType(member, 'NS_IMETHODIMP'),
+                                     'nativeName': methodNativeName(member),
+                                     'paramList': paramlistAsNative(member.params, member.realtype, notxpcom=member.notxpcom, empty='')})
+        fd.write('\n')
+
+    fd.write(iface_template_epilog)
+
+if __name__ == '__main__':
+    from optparse import OptionParser
+    o = OptionParser()
+    o.add_option('-I', action='append', dest='incdirs', help="Directory to search for imported files", default=[])
+    o.add_option('--cachedir', dest='cachedir', help="Directory in which to cache lex/parse tables.", default='')
+    options, args = o.parse_args()
+    file, = args
+
+    if options.cachedir != '':
+        sys.path.append(options.cachedir)
+
+    p = xpidl.IDLParser(outputdir=options.cachedir)
+    idl = p.parse(open(file).read(), filename=file)
+    idl.resolve(options.incdirs, p)
+    print_header(idl, sys.stdout, file)
new file mode 100644
--- /dev/null
+++ b/xpcom/idl-parser/xpidl.py
@@ -0,0 +1,1243 @@
+#!/usr/bin/env python
+# xpidl.py - A parser for cross-platform IDL (XPIDL) files.
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+#   Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2008
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Benjamin Smedberg <benjamin@smedbergs.us>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+"""A parser for cross-platform IDL (XPIDL) files."""
+
+import sys, os.path, re
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
+                             'other-licenses', 'ply'))
+from ply import lex, yacc
+
+"""A type conforms to the following pattern:
+
+    def isScriptable(self):
+        'returns True or False'
+
+    def nativeType(self, calltype):
+        'returns a string representation of the native type
+        calltype must be 'in', 'out', or 'inout'
+
+Interface members const/method/attribute conform to the following pattern:
+
+    name = 'string'
+
+    def toIDL(self):
+        'returns the member signature as IDL'
+"""
+
+def attlistToIDL(attlist):
+    if len(attlist) == 0:
+        return ''
+
+    attlist = list(attlist)
+    attlist.sort(cmp=lambda a,b: cmp(a[0], b[0]))
+
+    return '[%s] ' % ','.join(["%s%s" % (name, value is not None and '(%s)' % value or '')
+                              for name, value, aloc in attlist])
+
+_paramsHardcode = {
+    2: ('array', 'shared', 'iid_is', 'size_is', 'retval'),
+    3: ('array', 'size_is', 'const'),
+}
+
+def paramAttlistToIDL(attlist):
+    if len(attlist) == 0:
+        return ''
+
+    # Hack alert: g_hash_table_foreach is pretty much unimitatable... hardcode
+    # quirk
+    attlist = list(attlist)
+    sorted = []
+    if len(attlist) in _paramsHardcode:
+        for p in _paramsHardcode[len(attlist)]:
+            i = 0
+            while i < len(attlist):
+                if attlist[i][0] == p:
+                    sorted.append(attlist[i])
+                    del attlist[i]
+                    continue
+
+                i += 1
+
+    sorted.extend(attlist)
+
+    return '[%s] ' % ', '.join(["%s%s" % (name, value is not None and ' (%s)' % value or '')
+                                for name, value, aloc in sorted])
+
+class BuiltinLocation(object):
+    def get(self):
+        return "<builtin type>"
+
+    def __str__(self):
+        return self.get()
+
+class Builtin(object):
+    kind = 'builtin'
+    location = BuiltinLocation
+
+    def __init__(self, name, nativename, signed=False, maybeConst=False):
+        self.name = name
+        self.nativename = nativename
+        self.signed = signed
+        self.maybeConst = maybeConst
+
+    def isScriptable(self):
+        return True
+
+    def nativeType(self, calltype, shared=False, const=False):
+        if const:
+            print >>sys.stderr, IDLError("[const] doesn't make sense on builtin types.", self.location, warning=True)
+            const = 'const '
+        elif calltype == 'in' and self.nativename.endswith('*'):
+            const = 'const '
+        elif shared:
+            if not self.nativename.endswith('*'):
+                raise IDLError("[shared] not applicable to non-pointer types.", self.location)
+            const = 'const '
+        else:
+            const = ''
+        return "%s%s %s" % (const, self.nativename,
+                            calltype != 'in' and '*' or '')
+
+builtinNames = [
+    Builtin('boolean', 'PRBool'),
+    Builtin('void', 'void'),
+    Builtin('octet', 'PRUint8'),
+    Builtin('short', 'PRInt16', True, True),
+    Builtin('long', 'PRInt32', True, True),
+    Builtin('long long', 'PRInt64', True, False),
+    Builtin('unsigned short', 'PRUint16', False, True),
+    Builtin('unsigned long', 'PRUint32', False, True),
+    Builtin('unsigned long long', 'PRUint64', False, False),
+    Builtin('float', 'float', True, False),
+    Builtin('double', 'double', True, False),
+    Builtin('char', 'char', True, False),
+    Builtin('string', 'char *', False, False),
+    Builtin('wchar', 'PRUnichar', False, False),
+    Builtin('wstring', 'PRUnichar *', False, False),
+]
+
+builtinMap = {}
+for b in builtinNames:
+    builtinMap[b.name] = b
+
+class Location(object):
+    _line = None
+
+    def __init__(self, lexer, lineno, lexpos):
+        self._lineno = lineno
+        self._lexpos = lexpos
+        self._lexdata = lexer.lexdata
+        self._file = getattr(lexer, 'filename', "<unknown>")
+
+    def __eq__(self, other):
+        return self._lexpos == other._lexpos and \
+               self._file == other._file
+
+    def resolve(self):
+        if self._line:
+            return
+
+        startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
+        endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
+        self._line = self._lexdata[startofline:endofline]
+        self._colno = self._lexpos - startofline
+
+    def pointerline(self):
+        def i():
+            for i in xrange(0, self._colno):
+                yield " "
+            yield "^"
+
+        return "".join(i())
+
+    def get(self):
+        self.resolve()
+        return "%s line %s:%s" % (self._file, self._lineno, self._colno)
+
+    def __str__(self):
+        self.resolve()
+        return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
+                                          self._line, self.pointerline())
+
+class NameMap(object):
+    """Map of name -> object. Each object must have a .name and .location property.
+    Setting the same name twice throws an error."""
+    def __init__(self):
+        self._d = {}
+
+    def __getitem__(self, key):
+        if key in builtinMap:
+            return builtinMap[key]
+        return self._d[key]
+
+    def __iter__(self):
+        return self._d.itervalues()
+
+    def __contains__(self, key):
+        return key in builtinMap or key in self._d
+
+    def set(self, object):
+        if object.name in builtinMap:
+            raise IDLError("name '%s' is a builtin and cannot be redeclared" % (object.name), object.location)
+        if object.name in self._d:
+            old = self._d[object.name]
+            if old == object: return
+            if isinstance(old, Forward) and isinstance(object, Interface):
+                self._d[object.name] = object
+            elif isinstance(old, Interface) and isinstance(object, Forward):
+                pass
+            else:
+                raise IDLError("name '%s' specified twice. Previous location: %s" % (object.name, self._d[object.name].location), object.location)
+        else:
+            self._d[object.name] = object
+
+    def get(self, id, location):
+        try:
+            return self[id]
+        except KeyError:
+            raise IDLError("Name '%s' not found", location)
+
+class IDLError(Exception):
+    def __init__(self, message, location, warning=False):
+        self.message = message
+        self.location = location
+        self.warning = warning
+
+    def __str__(self):
+        return "%s: %s, %s" % (self.warning and 'warning' or 'error',
+                               self.message, self.location)
+
+class Include(object):
+    kind = 'include'
+
+    def __init__(self, filename, location):
+        self.filename = filename
+        self.location = location
+
+    def __str__(self):
+        return "".join(["include '%s'\n" % self.filename])
+
+    def resolve(self, parent):
+        def incfiles():
+            yield self.filename
+            for dir in parent.incdirs:
+                yield os.path.join(dir, self.filename)
+
+        for file in incfiles():
+            if not os.path.exists(file): continue
+
+            self.IDL = parent.parser.parse(open(file).read(), filename=file)
+            self.IDL.resolve(parent.incdirs, parent.parser)
+            for type in self.IDL.getNames():
+                parent.setName(type)
+            return
+
+        raise IDLError("File '%s' not found" % self.filename, self.location)
+
+class IDL(object):
+    def __init__(self, productions):
+        self.productions = productions
+
+    def setName(self, object):
+        self.namemap.set(object)
+
+    def getName(self, id, location):
+        try:
+            return self.namemap[id]
+        except KeyError:
+            raise IDLError("type '%s' not found" % id, location)
+
+    def hasName(self, id):
+        return id in self.namemap
+
+    def getNames(self):
+        return iter(self.namemap)
+
+    def __str__(self):
+        return "".join([str(p) for p in self.productions])
+
+    def resolve(self, incdirs, parser):
+        self.namemap = NameMap()
+        self.incdirs = incdirs
+        self.parser = parser
+        for p in self.productions:
+            p.resolve(self)
+
+    def includes(self):
+        for p in self.productions:
+            if p.kind == 'include':
+                yield p
+
+class CDATA(object):
+    kind = 'cdata'
+    _re = re.compile(r'\n+')
+
+    def __init__(self, data, location):
+        self.data = self._re.sub('\n', data)
+        self.location = location
+
+    def resolve(self, parent):
+        pass
+
+    def __str__(self):
+        return "cdata: %s\n\t%r\n" % (self.location.get(), self.data)
+
+class Typedef(object):
+    kind = 'typedef'
+
+    def __init__(self, type, name, location, doccomments):
+        self.type = type
+        self.name = name
+        self.location = location
+        self.doccomments = doccomments
+
+    def __eq__(self, other):
+        return self.name == other.name and self.type == other.type
+
+    def resolve(self, parent):
+        parent.setName(self)
+        self.realtype = parent.getName(self.type, self.location)
+
+    def isScriptable(self):
+        return self.realtype.isScriptable()
+
+    def nativeType(self, calltype):
+        return "%s %s" % (self.name,
+                          calltype != 'in' and '*' or '')
+
+    def __str__(self):
+        return "typedef %s %s\n" % (self.type, self.name)
+
+class Forward(object):
+    kind = 'forward'
+
+    def __init__(self, name, location, doccomments):
+        self.name = name
+        self.location = location
+        self.doccomments = doccomments
+
+    def __eq__(self, other):
+        return other.kind == 'forward' and other.name == self.name
+
+    def resolve(self, parent):
+        # Hack alert: if an identifier is already present, move the doccomments
+        # forward.
+        if parent.hasName(self.name):
+            for i in xrange(0, len(parent.productions)):
+                if parent.productions[i] is self: break
+            for i in xrange(i + 1, len(parent.productions)):
+                if hasattr(parent.productions[i], 'doccomments'):
+                    parent.productions[i].doccomments[0:0] = self.doccomments
+                    break
+
+        parent.setName(self)
+
+    def isScriptable(self):
+        return True
+
+    def nativeType(self, calltype):
+        return "%s %s" % (self.name,
+                          calltype != 'in' and '* *' or '*')
+
+    def __str__(self):
+        return "forward-declared %s\n" % self.name
+
+class Native(object):
+    kind = 'native'
+
+    modifier = None
+    specialtype = None
+
+    specialtypes = {
+        'nsid': None,
+        'domstring': 'nsAString',
+        'utf8string': 'nsACString',
+        'cstring': 'nsACString',
+        'astring': 'nsAString'
+        }
+
+    def __init__(self, name, nativename, attlist, location):
+        self.name = name
+        self.nativename = nativename
+        self.location = location
+
+        for name, value, aloc in attlist:
+            if value is not None:
+                raise IDLError("Unexpected attribute value", aloc)
+            if name in ('ptr', 'ref'):
+                if self.modifier is not None:
+                    raise IDLError("More than one ptr/ref modifier", aloc)
+                self.modifier = name
+            elif name in self.specialtypes.keys():
+                if self.specialtype is not None:
+                    raise IDLError("More than one special type", aloc)
+                self.specialtype = name
+                if self.specialtypes[name] is not None:
+                    self.nativename = self.specialtypes[name]
+            else:
+                raise IDLError("Unexpected attribute", aloc)
+
+    def __eq__(self, other):
+        return self.name == other.name and \
+               self.nativename == other.nativename and \
+               self.modifier == other.modifier and \
+               self.specialtype == other.specialtype
+
+    def resolve(self, parent):
+        parent.setName(self)
+
+    def isScriptable(self):
+        if self.specialtype is None:
+            return False
+
+        if self.specialtype == 'nsid':
+            return self.modifier is not None
+
+        return self.modifier == 'ref'
+
+    def nativeType(self, calltype, const=False, shared=False):
+        if shared:
+            if calltype != 'out':
+                raise IDLError("[shared] only applies to out parameters.")
+            const = True
+
+        if self.specialtype is not None and calltype == 'in':
+            const = True
+
+        if self.modifier == 'ptr':
+            m = '*' + (calltype != 'in' and '*' or '')
+        elif self.modifier == 'ref':
+            m = '& '
+        else:
+            m = calltype != 'in' and '*' or ''
+        return "%s%s %s" % (const and 'const ' or '', self.nativename, m)
+
+    def __str__(self):
+        return "native %s(%s)\n" % (self.name, self.nativename)
+
+class Interface(object):
+    kind = 'interface'
+
+    def __init__(self, name, attlist, base, members, location, doccomments):
+        self.name = name
+        self.attributes = InterfaceAttributes(attlist, location)
+        self.base = base
+        self.members = members
+        self.location = location
+        self.namemap = NameMap()
+        self.doccomments = doccomments
+        self.nativename = name
+
+        for m in members:
+            if not isinstance(m, CDATA):
+                self.namemap.set(m)
+
+    def __eq__(self, other):
+        return self.name == other.name and self.location == other.location
+
+    def resolve(self, parent):
+        self.idl = parent
+
+        # Hack alert: if an identifier is already present, libIDL assigns
+        # doc comments incorrectly. This is quirks-mode extraordinaire!
+        if parent.hasName(self.name):
+            for member in self.members:
+                if hasattr(member, 'doccomments'):
+                    member.doccomments[0:0] = self.doccomments
+                    break
+            self.doccomments = parent.getName(self.name, None).doccomments
+
+        parent.setName(self)
+        if self.base is None:
+            if self.name != 'nsISupports':
+                print >>sys.stderr, IDLError("interface '%s' not derived from nsISupports",
+                                             self.location, warning=True)
+        else:
+            realbase = parent.getName(self.base, self.location)
+            if realbase.kind != 'interface':
+                raise IDLError("interface '%s' inherits from non-interface type '%s'" % (self.name, self.base), self.location)
+
+            if self.attributes.scriptable and not realbase.attributes.scriptable:
+                print >>sys.stderr, IDLError("interface '%s' is scriptable but derives from non-scriptable '%s'" % (self.name, self.base), self.location, warning=True)
+
+        for member in self.members:
+            member.resolve(self)
+
+    def isScriptable(self):
+        # NOTE: this is not whether *this* interface is scriptable... it's
+        # whether, when used as a type, it's scriptable, which is true of all
+        # interfaces.
+        return True
+
+    def nativeType(self, calltype, const=False):
+        return "%s%s %s" % (const and 'const ' or '',
+                            self.name,
+                            calltype != 'in' and '* *' or '*')
+
+    def __str__(self):
+        l = ["interface %s\n" % self.name]
+        if self.base is not None:
+            l.append("\tbase %s\n" % self.base)
+        l.append(str(self.attributes))
+        if self.members is None:
+            l.append("\tincomplete type\n")
+        else:
+            for m in self.members:
+                l.append(str(m))
+        return "".join(l)
+
+    def getConst(self, name, location):
+        c = self.namemap.get(name, location)
+        if c.kind != 'const':
+            raise IDLError("symbol '%s' is not a constant", c.location)
+
+        return c.getValue()
+
+class InterfaceAttributes(object):
+    uuid = None
+    scriptable = False
+    function = False
+    deprecated = False
+
+    def setuuid(self, value):
+        self.uuid = value.lower()
+
+    def setscriptable(self):
+        self.scriptable = True
+
+    def setfunction(self):
+        self.function = True
+
+    def setnoscript(self):
+        self.noscript = True
+
+    def setdeprecated(self):
+        self.deprecated = True
+
+    actions = {
+        'uuid':       (True, setuuid),
+        'scriptable': (False, setscriptable),
+        'function':   (False, setfunction),
+        'noscript':   (False, setnoscript),
+        'deprecated': (False, setdeprecated),
+        'object':     (False, lambda self: True),
+        }
+
+    def __init__(self, attlist, location):
+        def badattribute(self):
+            raise IDLError("Unexpected interface attribute '%s'" % name, location)
+
+        for name, val, aloc in attlist:
+            hasval, action = self.actions.get(name, (False, badattribute))
+            if hasval:
+                if val is None:
+                    raise IDLError("Expected value for attribute '%s'" % name,
+                                   aloc)
+
+                action(self, val)
+            else:
+                if val is not None:
+                    raise IDLError("Unexpected value for attribute '%s'" % name,
+                                   aloc)
+
+                action(self)
+
+        if self.uuid is None:
+            raise IDLError("interface has no uuid", location)
+
+    def __str__(self):
+        l = []
+        if self.uuid:
+            l.append("\tuuid: %s\n" % self.uuid)
+        if self.scriptable:
+            l.append("\tscriptable\n")
+        if self.function:
+            l.append("\tfunction\n")
+        return "".join(l)
+
+class ConstMember(object):
+    kind = 'const'
+    def __init__(self, type, name, value, location, doccomments):
+        self.type = type
+        self.name = name
+        self.value = value
+        self.location = location
+        self.doccomments = doccomments
+
+    def resolve(self, parent):
+        self.realtype = parent.idl.getName(self.type, self.location)
+        self.iface = parent
+        basetype = self.realtype
+        while isinstance(basetype, Typedef):
+            basetype = basetype.realtype
+        if not isinstance(basetype, Builtin) or not basetype.maybeConst:
+            raise IDLError("const may only be a short or long type, not %s" % self.type, self.location)
+
+        self.basetype = basetype
+
+    def getValue(self):
+        return self.value(self.iface)
+
+class Attribute(object):
+    kind = 'attribute'
+    noscript = False
+    notxpcom = False
+
+    def __init__(self, type, name, attlist, readonly, location, doccomments):
+        self.type = type
+        self.name = name
+        self.attlist = attlist
+        self.readonly = readonly
+        self.location = location
+        self.binaryname = None
+        self.doccomments = doccomments
+
+        for name, value, aloc in attlist:
+            if name == 'binaryname':
+                if value is None:
+                    raise IDLError("binaryname attribute requires a value",
+                                   aloc)
+
+                self.binaryname = value
+                continue
+
+            if value is not None:
+                raise IDLError("Unexpected attribute value", aloc)
+
+            if name == 'noscript':
+                self.noscript = True
+            elif name == 'notxpcom':
+                self.notxpcom = True
+            else:
+                raise IDLError("Unexpected attribute '%s'", aloc)
+
+    def resolve(self, iface):
+        self.iface = iface
+        self.realtype = iface.idl.getName(self.type, self.location)
+
+    def toIDL(self):
+        attribs = attlistToIDL(self.attlist)
+        readonly = self.readonly and 'readonly ' or ''
+        return "%s%sattribute %s %s;" % (attribs, readonly, self.type, self.name)
+        
+    def isScriptable(self):
+        if not self.iface.attributes.scriptable: return False
+        return not (self.noscript or self.notxpcom)
+
+    def __str__(self):
+        return "\t%sattribute %s %s\n" % (self.readonly and 'readonly ' or '',
+                                          self.type, self.name)
+
+class Method(object):
+    kind = 'method'
+    noscript = False
+    notxpcom = False
+    binaryname = None
+
+    def __init__(self, type, name, attlist, paramlist, location, doccomments, raises):
+        self.type = type
+        self.name = name
+        self.attlist = attlist
+        self.params = paramlist
+        self.location = location
+        self.doccomments = doccomments
+        self.raises = raises
+
+        for name, value, aloc in attlist:
+            if name == 'binaryname':
+                if value is None:
+                    raise IDLError("binaryname attribute requires a value",
+                                   aloc)
+
+                self.binaryname = value
+                continue
+
+            if value is not None:
+                raise IDLError("Unexpected attribute value", aloc)
+
+            if name == 'noscript':
+                self.noscript = True
+            elif name == 'notxpcom':
+                self.notxpcom = True
+            else:
+                raise IDLError("Unexpected attribute '%s'", aloc)
+
+        self.namemap = NameMap()
+        for p in paramlist:
+            self.namemap.set(p)
+
+    def resolve(self, iface):
+        self.iface = iface
+        self.realtype = self.iface.idl.getName(self.type, self.location)
+        for p in self.params:
+            p.resolve(self)
+
+    def isScriptable(self):
+        if not self.iface.attributes.scriptable: return False
+        return not (self.noscript or self.notxpcom)
+
+    def __str__(self):
+        return "\t%s %s(%s)\n" % (self.type, self.name, ", ".join([p.name for p in self.params]))
+
+    def toIDL(self):
+        if len(self.raises):
+            raises = ' raises (%s)' % ','.join(self.raises)
+        else:
+            raises = ''
+
+        return "%s%s %s (%s)%s;" % (attlistToIDL(self.attlist),
+                                    self.type,
+                                    self.name,
+                                    ", ".join([p.toIDL()
+                                               for p in self.params]),
+                                    raises)
+
+class Param(object):
+    size_is = None
+    iid_is = None
+    const = False
+    array = False
+    retval = False
+    shared = False
+    optional = False
+
+    def __init__(self, paramtype, type, name, attlist, location, realtype=None):
+        self.paramtype = paramtype
+        self.type = type
+        self.name = name
+        self.attlist = attlist
+        self.location = location
+        self.realtype = realtype
+
+        for name, value, aloc in attlist:
+            # Put the value-taking attributes first!
+            if name == 'size_is':
+                if value is None:
+                    raise IDLError("'size_is' must specify a parameter", aloc)
+                self.size_is = value
+            elif name == 'iid_is':
+                if value is None:
+                    raise IDLError("'iid_is' must specify a parameter", aloc)
+                self.iid_is = value
+            else:
+                if value is not None:
+                    raise IDLError("Unexpected value for attribute '%s'" % name,
+                                   aloc)
+
+                if name == 'const':
+                    self.const = True
+                elif name == 'array':
+                    self.array = True
+                elif name == 'retval':
+                    self.retval = True
+                elif name == 'shared':
+                    self.shared = True
+                elif name == 'optional':
+                    self.optional = True
+                else:
+                    raise IDLError("Unexpected attribute '%s'" % name, aloc)
+
+    def resolve(self, method):
+        self.realtype = method.iface.idl.getName(self.type, self.location)
+        if self.array:
+            self.realtype = Array(self.realtype)
+
+    def nativeType(self):
+        kwargs = {}
+        if self.shared: kwargs['shared'] = True
+        if self.const: kwargs['const'] = True
+
+        try:
+            return self.realtype.nativeType(self.paramtype, **kwargs)
+        except IDLError, e:
+            raise IDLError(e.message, self.location)
+        except TypeError, e:
+            raise IDLError("Unexpected parameter attribute", self.location)
+
+    def toIDL(self):
+        return "%s%s %s %s" % (paramAttlistToIDL(self.attlist),
+                               self.paramtype,
+                               self.type,
+                               self.name)
+
+class Array(object):
+    def __init__(self, basetype):
+        self.type = basetype
+
+    def isScriptable(self):
+        return self.type.isScriptable()
+
+    def nativeType(self, calltype, const=False):
+        return "%s%s*" % (const and 'const ' or '',
+                          self.type.nativeType(calltype))
+
+class IDLParser(object):
+    keywords = {
+        'const': 'CONST',
+        'interface': 'INTERFACE',
+        'in': 'IN',
+        'inout': 'INOUT',
+        'out': 'OUT',
+        'attribute': 'ATTRIBUTE',
+        'raises': 'RAISES',
+        'readonly': 'READONLY',
+        'native': 'NATIVE',
+        'typedef': 'TYPEDEF'
+        }
+
+    tokens = [
+        'IDENTIFIER',
+        'CDATA',
+        'INCLUDE',
+        'IID',
+        'NUMBER',
+        'HEXNUM',
+        'LSHIFT',
+        'RSHIFT',
+        'NATIVEID',
+        ]
+
+    tokens.extend(keywords.values())
+
+    states = (
+        ('nativeid', 'exclusive'),
+    )
+
+    hexchar = r'[a-fA-F0-9]'
+
+    t_NUMBER = r'-?\d+'
+    t_HEXNUM = r'0x%s+' % hexchar
+    t_LSHIFT = r'<<'
+    t_RSHIFT=  r'>>'
+
+    literals = '"(){}[],;:=|+-*'
+
+    t_ignore = ' \t'
+
+    def t_multilinecomment(self, t):
+        r'/\*(?s).*?\*/'
+        t.lexer.lineno += t.value.count('\n')
+        if t.value.startswith("/**"):
+            self._doccomments.append(t.value)
+
+    def t_singlelinecomment(self, t):
+        r'(?m)//.*?$'
+
+    def t_IID(self, t):
+        return t
+    t_IID.__doc__ = r'%(c)s{8}-%(c)s{4}-%(c)s{4}-%(c)s{4}-%(c)s{12}' % {'c': hexchar}
+
+    def t_IDENTIFIER(self, t):
+        r'unsigned\ long\ long|unsigned\ short|unsigned\ long|long\ long|[A-Za-z][A-Za-z_0-9]*'
+        t.type = self.keywords.get(t.value, 'IDENTIFIER')
+        return t
+
+    def t_LCDATA(self, t):
+        r'(?s)%\{[ ]*C\+\+[ ]*\n(?P<cdata>.*?\n?)%\}[ ]*(C\+\+)?'
+        t.type = 'CDATA'
+        t.value = t.lexer.lexmatch.group('cdata')
+        t.lexer.lineno += t.value.count('\n')
+        return t
+
+    def t_INCLUDE(self, t):
+        r'\#include[ \t]+"[^"\n]+"'
+        inc, value, end = t.value.split('"')
+        t.value = value
+        return t
+
+    def t_directive(self, t):
+        r'\#(?P<directive>[a-zA-Z]+)[^\n]+'
+        print >>sys.stderr, IDLError("Unrecognized directive %s" % t.lexer.lexmatch.group('directive'),
+                                     Location(lexer=self.lexer,
+                                              lineno=self.lexer.lineno,
+                                              lexpos=self.lexer.lexpos))
+
+    def t_newline(self, t):
+        r'\n+'
+        t.lexer.lineno += len(t.value)
+
+    def t_nativeid_NATIVEID(self, t):
+        r'[^()\n]+(?=\))'
+        t.lexer.begin('INITIAL')
+        return t
+
+    t_nativeid_ignore = ''
+
+    def t_ANY_error(self, t):
+        raise IDLError("unrecognized input",
+                       Location(lexer=self.lexer,
+                                lineno=self.lexer.lineno,
+                                lexpos=self.lexer.lexpos))
+
+    precedence = (
+        ('left', '|'),
+        ('left', 'LSHIFT', 'RSHIFT'),
+        ('left', '+', '-'),
+        ('left', '*'),
+        ('left', 'UMINUS'),
+    )
+
+    def p_idlfile(self, p):
+        """idlfile : productions"""
+        p[0] = IDL(p[1])
+
+    def p_productions_start(self, p):
+        """productions : """
+        p[0] = []
+
+    def p_productions_cdata(self, p):
+        """productions : CDATA productions"""
+        p[0] = list(p[2])
+        p[0].insert(0, CDATA(p[1], self.getLocation(p, 1)))
+
+    def p_productions_include(self, p):
+        """productions : INCLUDE productions"""
+        p[0] = list(p[2])
+        p[0].insert(0, Include(p[1], self.getLocation(p, 1)))
+
+    def p_productions_interface(self, p):
+        """productions : interface productions
+                       | typedef productions
+                       | native productions"""
+        p[0] = list(p[2])
+        p[0].insert(0, p[1])
+
+    def p_typedef(self, p):
+        """typedef : TYPEDEF IDENTIFIER IDENTIFIER ';'"""
+        p[0] = Typedef(type=p[2],
+                       name=p[3],
+                       location=self.getLocation(p, 1),
+                       doccomments=p.slice[1].doccomments)
+
+    def p_native(self, p):
+        """native : attributes NATIVE IDENTIFIER afternativeid '(' NATIVEID ')' ';'"""
+        p[0] = Native(name=p[3],
+                      nativename=p[6],
+                      attlist=p[1]['attlist'],
+                      location=self.getLocation(p, 2))
+
+    def p_afternativeid(self, p):
+        """afternativeid : """
+        # this is a place marker: we switch the lexer into literal identifier
+        # mode here, to slurp up everything until the closeparen
+        self.lexer.begin('nativeid')
+
+    def p_anyident(self, p):
+        """anyident : IDENTIFIER
+                    | CONST"""
+        p[0] = {'value': p[1],
+                'location': self.getLocation(p, 1)}
+
+    def p_attributes(self, p):
+        """attributes : '[' attlist ']'
+                      | """
+        if len(p) == 1:
+            p[0] = {'attlist': []}
+        else:
+            p[0] = {'attlist': p[2],
+                    'doccomments': p.slice[1].doccomments}
+
+    def p_attlist_start(self, p):
+        """attlist : attribute"""
+        p[0] = [p[1]]
+
+    def p_attlist_continue(self, p):
+        """attlist : attribute ',' attlist"""
+        p[0] = list(p[3])
+        p[0].insert(0, p[1])
+
+    def p_attribute(self, p):
+        """attribute : anyident attributeval"""
+        p[0] = (p[1]['value'], p[2], p[1]['location'])
+
+    def p_attributeval(self, p):
+        """attributeval : '(' IDENTIFIER ')'
+                        | '(' IID ')'
+                        | """
+        if len(p) > 1:
+            p[0] = p[2]
+
+    def p_interface(self, p):
+        """interface : attributes INTERFACE IDENTIFIER ifacebase ifacebody ';'"""
+        atts, INTERFACE, name, base, body, SEMI = p[1:]
+        attlist = atts['attlist']
+        doccomments = []
+        if 'doccomments' in atts:
+            doccomments.extend(atts['doccomments'])
+        doccomments.extend(p.slice[2].doccomments)
+
+        l = lambda: self.getLocation(p, 2)
+
+        if body is None:
+            # forward-declared interface... must not have attributes!
+            if len(attlist) != 0:
+                raise IDLError("Forward-declared interface must not have attributes",
+                               list[0][3])
+
+            if base is not None:
+                raise IDLError("Forward-declared interface must not have a base",
+                               l())
+            p[0] = Forward(name=name, location=l(), doccomments=doccomments)
+        else:
+            p[0] = Interface(name=name,
+                             attlist=attlist,
+                             base=base,
+                             members=body,
+                             location=l(),
+                             doccomments=doccomments)
+
+    def p_ifacebody(self, p):
+        """ifacebody : '{' members '}'
+                     | """
+        if len(p) > 1:
+            p[0] = p[2]
+
+    def p_ifacebase(self, p):
+        """ifacebase : ':' IDENTIFIER
+                     | """
+        if len(p) == 3:
+            p[0] = p[2]
+
+    def p_members_start(self, p):
+        """members : """
+        p[0] = []
+
+    def p_members_continue(self, p):
+        """members : member members"""
+        p[0] = list(p[2])
+        p[0].insert(0, p[1])
+
+    def p_member_cdata(self, p):
+        """member : CDATA"""
+        p[0] = CDATA(p[1], self.getLocation(p, 1))
+
+    def p_member_const(self, p):
+        """member : CONST IDENTIFIER IDENTIFIER '=' number ';' """
+        p[0] = ConstMember(type=p[2], name=p[3],
+                           value=p[5], location=self.getLocation(p, 1),
+                           doccomments=p.slice[1].doccomments)
+
+# All "number" products return a function(interface)
+
+    def p_number_decimal(self, p):
+        """number : NUMBER"""
+        n = int(p[1])
+        p[0] = lambda i: n
+
+    def p_number_hex(self, p):
+        """number : HEXNUM"""
+        n = int(p[1], 16)
+        p[0] = lambda i: n
+
+    def p_number_identifier(self, p):
+        """number : IDENTIFIER"""
+        id = p[1]
+        loc = self.getLocation(p, 1)
+        p[0] = lambda i: i.getConst(id, loc)
+
+    def p_number_paren(self, p):
+        """number : '(' number ')'"""
+        p[0] = p[2]
+
+    def p_number_neg(self, p):
+        """number : '-' number %prec UMINUS"""
+        n = p[2]
+        p[0] = lambda i: - n(i)
+
+    def p_number_add(self, p):
+        """number : number '+' number
+                  | number '-' number
+                  | number '*' number"""
+        n1 = p[1]
+        n2 = p[3]
+        if p[2] == '+':
+            p[0] = lambda i: n1(i) + n2(i)
+        elif p[2] == '-':
+            p[0] = lambda i: n1(i) - n2(i)
+        else:
+            p[0] = lambda i: n1(i) * n2(i)
+
+    def p_number_shift(self, p):
+        """number : number LSHIFT number
+                  | number RSHIFT number"""
+        n1 = p[1]
+        n2 = p[3]
+        if p[2] == '<<':
+            p[0] = lambda i: n1(i) << n2(i)
+        else:
+            p[0] = lambda i: n1(i) >> n2(i)
+
+    def p_number_bitor(self, p):
+        """number : number '|' number"""
+        n1 = p[1]
+        n2 = p[3]
+        p[0] = lambda i: n1(i) | n2(i)
+
+    def p_member_att(self, p):
+        """member : attributes optreadonly ATTRIBUTE IDENTIFIER IDENTIFIER ';'"""
+        if 'doccomments' in p[1]:
+            doccomments = p[1]['doccomments']
+        elif p[2] is not None:
+            doccomments = p[2]
+        else:
+            doccomments = p.slice[3].doccomments
+
+        p[0] = Attribute(type=p[4],
+                         name=p[5],
+                         attlist=p[1]['attlist'],
+                         readonly=p[2] is not None,
+                         location=self.getLocation(p, 1),
+                         doccomments=doccomments)
+
+    def p_member_method(self, p):
+        """member : attributes IDENTIFIER IDENTIFIER '(' paramlist ')' raises ';'"""
+        if 'doccomments' in p[1]:
+            doccomments = p[1]['doccomments']
+        else:
+            doccomments = p.slice[2].doccomments
+
+        p[0] = Method(type=p[2],
+                      name=p[3],
+                      attlist=p[1]['attlist'],
+                      paramlist=p[5],
+                      location=self.getLocation(p, 1),
+                      doccomments=doccomments,
+                      raises=p[7])
+
+    def p_paramlist(self, p):
+        """paramlist : param moreparams
+                     | """
+        if len(p) == 1:
+            p[0] = []
+        else:
+            p[0] = list(p[2])
+            p[0].insert(0, p[1])
+
+    def p_moreparams_start(self, p):
+        """moreparams :"""
+        p[0] = []
+
+    def p_moreparams_continue(self, p):
+        """moreparams : ',' param moreparams"""
+        p[0] = list(p[3])
+        p[0].insert(0, p[2])
+
+    def p_param(self, p):
+        """param : attributes paramtype IDENTIFIER IDENTIFIER"""
+        p[0] = Param(paramtype=p[2],
+                     type=p[3],
+                     name=p[4],
+                     attlist=p[1]['attlist'],
+                     location=self.getLocation(p, 3))
+
+    def p_paramtype(self, p):
+        """paramtype : IN
+                     | INOUT
+                     | OUT"""
+        p[0] = p[1]
+
+    def p_optreadonly(self, p):
+        """optreadonly : READONLY
+                       | """
+        if len(p) > 1:
+            p[0] = p.slice[1].doccomments
+        else:
+            p[0] = None
+
+    def p_raises(self, p):
+        """raises : RAISES '(' idlist ')'
+                  | """
+        if len(p) == 1:
+            p[0] = []
+        else:
+            p[0] = p[3]
+
+    def p_idlist(self, p):
+        """idlist : IDENTIFIER"""
+        p[0] = [p[1]]
+
+    def p_idlist_continue(self, p):
+        """idlist : IDENTIFIER ',' idlist"""
+        p[0] = list(p[3])
+        p[0].insert(0, p[1])
+
+    def p_error(self, t):
+        location = Location(self.lexer, t.lineno, t.lexpos)
+        raise IDLError("invalid syntax", location)
+
+    def __init__(self, outputdir=''):
+        self._doccomments = []
+        self.lexer = lex.lex(object=self,
+                             outputdir=outputdir,
+                             lextab='xpidllex',
+                             optimize=1)
<