Merge m-c to d-b.
authorMs2ger <ms2ger@gmail.com>
Tue, 21 Feb 2012 22:44:16 +0100
changeset 87463 7e6c70d95373
parent 87462 9bde0d25d76e (current diff)
parent 87319 8d6b7cd7baee (diff)
child 87464 0738da49af22
push id132
push userMs2ger@gmail.com
push dateTue, 21 Feb 2012 22:54:28 +0000
milestone13.0a1
Merge m-c to d-b.
caps/src/nsScriptSecurityManager.cpp
content/base/src/nsGenericElement.cpp
content/html/content/src/nsHTMLTableElement.cpp
dom/Makefile.in
dom/base/nsGlobalWindow.h
dom/base/nsJSEnvironment.cpp
dom/workers/WorkerScope.cpp
js/src/jsapi.cpp
js/src/jsapi.h
js/xpconnect/src/XPCQuickStubs.cpp
js/xpconnect/src/XPCWrappedNativeJSOps.cpp
js/xpconnect/src/XPCWrappedNativeScope.cpp
js/xpconnect/src/dombindingsgen.py
js/xpconnect/src/xpcpublic.h
--- a/.hgignore
+++ b/.hgignore
@@ -40,8 +40,10 @@
 # SVN directories
 \.svn/
 
 # Ignore the files and directory that Eclipse IDE creates
 \.project$
 \.cproject$
 \.settings/
 
+# Ignore WebIDLParser generated files
+^dom/bindings/parser/webidlyacc.py
--- a/.hgtags
+++ b/.hgtags
@@ -71,10 +71,9 @@ 41b84b87c816403e1b74963d8094cff0406c989e
 c0983049bcaa9551e5f276d5a77ce154c151e0b0 AURORA_BASE_20110927
 462c726144bc1fb45b61e774f64ac5d61b4e047c UPDATE_PACKAGING_R15
 54bfd8bf682e295ffd7f22fa921ca343957b6c1c AURORA_BASE_20111108
 a8506ab2c65480cf2f85f54e203ea746522c62bb AURORA_BASE_20111220
 462c726144bc1fb45b61e774f64ac5d61b4e047c UPDATE_PACKAGING_R16
 bbc7014db2de49e2301680d2a86be8a53108a88a AURORA_BASE_20120131
 bbc7014db2de49e2301680d2a86be8a53108a88a AURORA_BASE_20120131
 0000000000000000000000000000000000000000 AURORA_BASE_20120131
-0000000000000000000000000000000000000000 AURORA_BASE_20120131
 bbc7014db2de49e2301680d2a86be8a53108a88a AURORA_BASE_20120131
--- a/caps/src/nsScriptSecurityManager.cpp
+++ b/caps/src/nsScriptSecurityManager.cpp
@@ -89,18 +89,20 @@
 #include "nsAboutProtocolUtils.h"
 #include "nsIClassInfo.h"
 #include "nsIURIFixup.h"
 #include "nsCDefaultURIFixup.h"
 #include "nsIChromeRegistry.h"
 #include "nsIContentSecurityPolicy.h"
 #include "nsIAsyncVerifyRedirectCallback.h"
 #include "mozilla/Preferences.h"
+#include "mozilla/dom/bindings/Utils.h"
 
 using namespace mozilla;
+using namespace mozilla::dom;
 
 static NS_DEFINE_CID(kZipReaderCID, NS_ZIPREADER_CID);
 
 nsIIOService    *nsScriptSecurityManager::sIOService = nsnull;
 nsIXPConnect    *nsScriptSecurityManager::sXPConnect = nsnull;
 nsIThreadJSContextStack *nsScriptSecurityManager::sJSContextStack = nsnull;
 nsIStringBundle *nsScriptSecurityManager::sStrBundle = nsnull;
 JSRuntime       *nsScriptSecurityManager::sRuntime   = 0;
@@ -2438,19 +2440,27 @@ nsScriptSecurityManager::doGetObjectPrin
                                               aAllowShortCircuit
 #else
                                               true
 #endif
                                               );
             if (result) {
                 break;
             }
-        } else if (!(~jsClass->flags & (JSCLASS_HAS_PRIVATE |
-                                        JSCLASS_PRIVATE_IS_NSISUPPORTS))) {
-            nsISupports *priv = (nsISupports *) js::GetObjectPrivate(aObj);
+        } else {
+            nsISupports *priv;
+            if (!(~jsClass->flags & (JSCLASS_HAS_PRIVATE |
+                                     JSCLASS_PRIVATE_IS_NSISUPPORTS))) {
+                priv = (nsISupports *) js::GetObjectPrivate(aObj);
+            } else if ((jsClass->flags & JSCLASS_IS_DOMJSCLASS) &&
+                       bindings::DOMJSClass::FromJSClass(jsClass)->mDOMObjectIsISupports) {
+                priv = bindings::UnwrapDOMObject<nsISupports>(aObj);
+            } else {
+                priv = nsnull;
+            }
 
 #ifdef DEBUG
             if (aAllowShortCircuit) {
                 nsCOMPtr<nsIXPConnectWrappedNative> xpcWrapper =
                     do_QueryInterface(priv);
 
                 NS_ASSERTION(!xpcWrapper ||
                              !strcmp(jsClass->name, "XPCNativeWrapper"),
--- a/content/base/src/nsContentList.cpp
+++ b/content/base/src/nsContentList.cpp
@@ -187,17 +187,17 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(nsSimpleContentList)
 NS_INTERFACE_MAP_END_INHERITING(nsBaseContentList)
 
 
 NS_IMPL_ADDREF_INHERITED(nsSimpleContentList, nsBaseContentList)
 NS_IMPL_RELEASE_INHERITED(nsSimpleContentList, nsBaseContentList)
 
 JSObject*
-nsSimpleContentList::WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+nsSimpleContentList::WrapObject(JSContext *cx, JSObject *scope,
                                 bool *triedToWrap)
 {
   return mozilla::dom::binding::NodeList::create(cx, scope, this, triedToWrap);
 }
 
 // nsFormContentList
 
 nsFormContentList::nsFormContentList(nsIContent *aForm,
@@ -504,17 +504,17 @@ nsContentList::~nsContentList()
 
   if (mDestroyFunc) {
     // Clean up mData
     (*mDestroyFunc)(mData);
   }
 }
 
 JSObject*
-nsContentList::WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+nsContentList::WrapObject(JSContext *cx, JSObject *scope,
                           bool *triedToWrap)
 {
   return mozilla::dom::binding::HTMLCollection::create(cx, scope, this,
                                                        triedToWrap);
 }
 
 DOMCI_DATA(ContentList, nsContentList)
 
--- a/content/base/src/nsContentList.h
+++ b/content/base/src/nsContentList.h
@@ -133,17 +133,17 @@ public:
   }
 
   void Reset() {
     mElements.Clear();
   }
 
   virtual PRInt32 IndexOf(nsIContent *aContent, bool aDoFlush);
 
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap) = 0;
 
 protected:
   nsTArray< nsCOMPtr<nsIContent> > mElements;
 };
 
 
 class nsSimpleContentList : public nsBaseContentList
@@ -157,17 +157,17 @@ public:
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(nsSimpleContentList,
                                            nsBaseContentList)
 
   virtual nsINode* GetParentObject()
   {
     return mRoot;
   }
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap);
 
 private:
   // This has to be a strong reference, the root might go away before the list.
   nsCOMPtr<nsINode> mRoot;
 };
 
 // This class is used only by form element code and this is a static
@@ -290,17 +290,17 @@ public:
                 void* aData,
                 bool aDeep = true,
                 nsIAtom* aMatchAtom = nsnull,
                 PRInt32 aMatchNameSpaceId = kNameSpaceID_None,
                 bool aFuncMayDependOnAttr = true);
   virtual ~nsContentList();
 
   // nsWrapperCache
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap);
 
   // nsIDOMHTMLCollection
   NS_DECL_NSIDOMHTMLCOLLECTION
 
   // nsBaseContentList overrides
   virtual PRInt32 IndexOf(nsIContent *aContent, bool aDoFlush);
   virtual PRInt32 IndexOf(nsIContent* aContent);
--- a/content/base/src/nsGenericElement.cpp
+++ b/content/base/src/nsGenericElement.cpp
@@ -1748,17 +1748,17 @@ NS_INTERFACE_TABLE_HEAD(nsChildContentLi
     NS_INTERFACE_TABLE_ENTRY(nsChildContentList, nsIDOMNodeList)
   NS_OFFSET_AND_INTERFACE_TABLE_END
   NS_OFFSET_AND_INTERFACE_TABLE_TO_MAP_SEGUE
   NS_INTERFACE_MAP_ENTRIES_CYCLE_COLLECTION(nsChildContentList)
   NS_DOM_INTERFACE_MAP_ENTRY_CLASSINFO(NodeList)
 NS_INTERFACE_MAP_END
 
 JSObject*
-nsChildContentList::WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+nsChildContentList::WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap)
 {
   return mozilla::dom::binding::NodeList::create(cx, scope, this, triedToWrap);
 }
 
 NS_IMETHODIMP
 nsChildContentList::GetLength(PRUint32* aLength)
 {
--- a/content/base/src/nsGenericElement.h
+++ b/content/base/src/nsGenericElement.h
@@ -100,17 +100,17 @@ public:
   {
     SetIsProxy();
   }
 
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_CYCLE_COLLECTION_SKIPPABLE_SCRIPT_HOLDER_CLASS(nsChildContentList)
 
   // nsWrapperCache
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap);
 
   // nsIDOMNodeList interface
   NS_DECL_NSIDOMNODELIST
 
   // nsINodeList interface
   virtual PRInt32 IndexOf(nsIContent* aContent);
 
--- a/content/html/content/src/nsHTMLFormElement.cpp
+++ b/content/html/content/src/nsHTMLFormElement.cpp
@@ -145,17 +145,17 @@ public:
    * elements.
    *
    * @param aControls The list of sorted controls[out].
    * @return NS_OK or NS_ERROR_OUT_OF_MEMORY.
    */
   nsresult GetSortedControls(nsTArray<nsGenericHTMLFormElement*>& aControls) const;
 
   // nsWrapperCache
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap)
   {
     return mozilla::dom::binding::HTMLCollection::create(cx, scope, this,
                                                          triedToWrap);
   }
 
   nsHTMLFormElement* mForm;  // WEAK - the form owns me
 
--- a/content/html/content/src/nsHTMLSelectElement.cpp
+++ b/content/html/content/src/nsHTMLSelectElement.cpp
@@ -2054,17 +2054,17 @@ NS_INTERFACE_TABLE_HEAD(nsHTMLOptionColl
 NS_INTERFACE_MAP_END
 
 
 NS_IMPL_CYCLE_COLLECTING_ADDREF(nsHTMLOptionCollection)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(nsHTMLOptionCollection)
 
 
 JSObject*
-nsHTMLOptionCollection::WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+nsHTMLOptionCollection::WrapObject(JSContext *cx, JSObject *scope,
                                    bool *triedToWrap)
 {
   return mozilla::dom::binding::HTMLOptionsCollection::create(cx, scope, this,
                                                               triedToWrap);
 }
 
 NS_IMETHODIMP
 nsHTMLOptionCollection::GetLength(PRUint32* aLength)
--- a/content/html/content/src/nsHTMLSelectElement.h
+++ b/content/html/content/src/nsHTMLSelectElement.h
@@ -71,17 +71,17 @@ class nsHTMLOptionCollection: public nsI
                               public nsWrapperCache
 {
 public:
   nsHTMLOptionCollection(nsHTMLSelectElement* aSelect);
   virtual ~nsHTMLOptionCollection();
 
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
 
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap);
 
   // nsIDOMHTMLOptionsCollection interface
   NS_DECL_NSIDOMHTMLOPTIONSCOLLECTION
 
   // nsIDOMHTMLCollection interface, all its methods are defined in
   // nsIDOMHTMLOptionsCollection
 
--- a/content/html/content/src/nsHTMLTableElement.cpp
+++ b/content/html/content/src/nsHTMLTableElement.cpp
@@ -81,17 +81,17 @@ public:
     return mParent;
   }
 
   NS_IMETHOD    ParentDestroyed();
 
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(TableRowsCollection)
 
   // nsWrapperCache
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap)
   {
     return mozilla::dom::binding::HTMLCollection::create(cx, scope, this,
                                                          triedToWrap);
   }
 
 protected:
   // Those rows that are not in table sections
--- a/content/xbl/src/nsBindingManager.cpp
+++ b/content/xbl/src/nsBindingManager.cpp
@@ -109,17 +109,17 @@ public:
     return mContent;
   }
 
   PRInt32 GetInsertionPointCount() { return mElements->Length(); }
 
   nsXBLInsertionPoint* GetInsertionPointAt(PRInt32 i) { return static_cast<nsXBLInsertionPoint*>(mElements->ElementAt(i)); }
   void RemoveInsertionPointAt(PRInt32 i) { mElements->RemoveElementAt(i); }
 
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap)
   {
     return mozilla::dom::binding::NodeList::create(cx, scope, this,
                                                    triedToWrap);
   }
 
   NS_DECLARE_STATIC_IID_ACCESSOR(NS_ANONYMOUS_CONTENT_LIST_IID)
 private:
--- a/dom/Makefile.in
+++ b/dom/Makefile.in
@@ -71,16 +71,17 @@ DIRS = \
 ifeq (gonk,$(MOZ_WIDGET_TOOLKIT))
 DIRS += \
   interfaces/apps \
   $(NULL)
 endif
 
 DIRS += \
   base \
+  bindings \
   battery \
   power \
   sms \
   src \
   locales \
   network \
   plugins/base \
   plugins/ipc \
--- a/dom/base/nsGlobalWindow.h
+++ b/dom/base/nsGlobalWindow.h
@@ -294,18 +294,17 @@ public:
   nsPIDOMWindow* GetPrivateParent();
   // callback for close event
   void ReallyCloseWindow();
 
   // nsISupports
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
 
   // nsWrapperCache
-  JSObject *WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
-                       bool *triedToWrap)
+  JSObject *WrapObject(JSContext *cx, JSObject *scope, bool *triedToWrap)
   {
     NS_ASSERTION(IsOuterWindow(),
                  "Inner window supports nsWrapperCache, fix WrapObject!");
     *triedToWrap = true;
     return EnsureInnerWindow() ? GetWrapper() : nsnull;
   }
 
   // nsIScriptGlobalObject
--- a/dom/base/nsJSEnvironment.cpp
+++ b/dom/base/nsJSEnvironment.cpp
@@ -105,19 +105,22 @@
 #endif
 #include "prlog.h"
 #include "prthread.h"
 
 #include "mozilla/FunctionTimer.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Telemetry.h"
 
+#include "mozilla/dom/bindings/Utils.h"
+
 #include "sampler.h"
 
 using namespace mozilla;
+using namespace mozilla::dom;
 
 const size_t gStackSize = 8192;
 
 #ifdef PR_LOGGING
 static PRLogModuleInfo* gJSDiagnostics;
 #endif
 
 // Thank you Microsoft!
@@ -2196,23 +2199,31 @@ nsJSContext::GetGlobalObject()
     // If this assertion hits then it means that we have a window object as
     // our global, but we never called CreateOuterObject.
     NS_ASSERTION(inner == global, "Shouldn't be able to innerize here");
   }
 #endif
 
   JSClass *c = JS_GetClass(global);
 
-  if (!c || ((~c->flags) & (JSCLASS_HAS_PRIVATE |
-                            JSCLASS_PRIVATE_IS_NSISUPPORTS))) {
+  nsISupports *priv;
+  if (c) {
+    if (((~c->flags) & (JSCLASS_HAS_PRIVATE |
+                        JSCLASS_PRIVATE_IS_NSISUPPORTS))) {
+      priv = (nsISupports *)js::GetObjectPrivate(global);
+    } else if ((c->flags & JSCLASS_IS_DOMJSCLASS) &&
+               bindings::DOMJSClass::FromJSClass(c)->mDOMObjectIsISupports) {
+      priv = bindings::UnwrapDOMObject<nsISupports>(global);
+    } else {
+      return nsnull;
+    }
+  } else {
     return nsnull;
   }
 
-  nsISupports *priv = (nsISupports *)js::GetObjectPrivate(global);
-
   nsCOMPtr<nsIXPConnectWrappedNative> wrapped_native =
     do_QueryInterface(priv);
 
   nsCOMPtr<nsIScriptGlobalObject> sgo;
   if (wrapped_native) {
     // The global object is a XPConnect wrapped native, the native in
     // the wrapper might be the nsIScriptGlobalObject
 
--- a/dom/base/nsJSUtils.cpp
+++ b/dom/base/nsJSUtils.cpp
@@ -56,16 +56,20 @@
 #include "nsIXPConnect.h"
 #include "nsCOMPtr.h"
 #include "nsContentUtils.h"
 #include "nsIScriptSecurityManager.h"
 #include "nsPIDOMWindow.h"
 
 #include "nsDOMJSUtils.h" // for GetScriptContextFromJSContext
 
+#include "mozilla/dom/bindings/Utils.h"
+
+using namespace mozilla::dom;
+
 JSBool
 nsJSUtils::GetCallingLocation(JSContext* aContext, const char* *aFilename,
                               PRUint32* aLineno)
 {
   // Get the current filename and line number
   JSStackFrame* frame = nsnull;
   JSScript* script = nsnull;
   do {
@@ -94,32 +98,39 @@ nsJSUtils::GetCallingLocation(JSContext*
   }
 
   return JS_FALSE;
 }
 
 nsIScriptGlobalObject *
 nsJSUtils::GetStaticScriptGlobal(JSContext* aContext, JSObject* aObj)
 {
-  nsISupports* supports;
   JSClass* clazz;
   JSObject* glob = aObj; // starting point for search
 
   if (!glob)
     return nsnull;
 
   glob = JS_GetGlobalForObject(aContext, glob);
   NS_ABORT_IF_FALSE(glob, "Infallible returns null");
 
   clazz = JS_GetClass(glob);
 
-  if (!clazz ||
-      !(clazz->flags & JSCLASS_HAS_PRIVATE) ||
-      !(clazz->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) ||
-      !(supports = (nsISupports*)::JS_GetPrivate(glob))) {
+  nsISupports* supports = nsnull;
+  if (clazz) {
+    if ((clazz->flags & JSCLASS_HAS_PRIVATE) &&
+        (clazz->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS)) {
+      supports = static_cast<nsISupports*>(JS_GetPrivate(glob));
+    } else if ((clazz->flags & JSCLASS_IS_DOMJSCLASS) &&
+               bindings::DOMJSClass::FromJSClass(clazz)->mDOMObjectIsISupports) {
+      supports = bindings::UnwrapDOMObject<nsISupports>(glob);
+    }
+  }
+
+  if (!supports) {
     return nsnull;
   }
 
   // We might either have a window directly (e.g. if the global is a
   // sandbox whose script object principal pointer is a window), or an
   // XPCWrappedNative for a window.  We could also have other
   // sandbox-related script object principals, but we can't do much
   // about those short of trying to walk the proto chain of |glob|
--- a/dom/base/nsWrapperCache.h
+++ b/dom/base/nsWrapperCache.h
@@ -178,17 +178,17 @@ public:
   /**
    * Wrap the object corresponding to this wrapper cache. If non-null is
    * returned, the object has already been stored in the wrapper cache and the
    * value set in triedToWrap is meaningless. If null is returned then
    * triedToWrap indicates whether an error occurred, if it's false then the
    * object doesn't actually support creating a wrapper through its WrapObject
    * hook.
    */
-  virtual JSObject* WrapObject(JSContext *cx, XPCWrappedNativeScope *scope,
+  virtual JSObject* WrapObject(JSContext *cx, JSObject *scope,
                                bool *triedToWrap)
   {
     *triedToWrap = false;
     return nsnull;
   }
 
   /**
    * Returns true if the object has a non-gray wrapper.
new file mode 100644
--- /dev/null
+++ b/dom/bindings/BindingGen.py
@@ -0,0 +1,65 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import cPickle
+import WebIDL
+from Configuration import *
+from Codegen import CGBindingRoot, replaceFileIfChanged
+
+def generate_binding_header(config, outputprefix, webidlfile):
+    """
+    |config| Is the configuration object.
+    |outputprefix| is a prefix to use for the header guards and filename.
+    """
+
+    filename = outputprefix + ".h"
+    root = CGBindingRoot(config, outputprefix, webidlfile)
+    if replaceFileIfChanged(filename, root.declare()):
+        print "Generating binding header: %s" % (filename)
+
+def generate_binding_cpp(config, outputprefix, webidlfile):
+    """
+    |config| Is the configuration object.
+    |outputprefix| is a prefix to use for the header guards and filename.
+    """
+
+    filename = outputprefix + ".cpp"
+    root = CGBindingRoot(config, outputprefix, webidlfile)
+    if replaceFileIfChanged(filename, root.define()):
+        print "Generating binding implementation: %s" % (filename)
+
+def main():
+
+    # Parse arguments.
+    from optparse import OptionParser
+    usagestring = "usage: %prog [header|cpp] configFile outputPrefix webIDLFile"
+    o = OptionParser(usage=usagestring)
+    o.add_option("--verbose-errors", action='store_true', default=False,
+                 help="When an error happens, display the Python traceback.")
+    (options, args) = o.parse_args()
+    if len(args) != 4 or (args[0] != "header" and args[0] != "cpp"):
+        o.error(usagestring)
+    buildTarget = args[0]
+    configFile = os.path.normpath(args[1])
+    outputPrefix = args[2]
+    webIDLFile = os.path.normpath(args[3])
+
+    # Load the parsing results
+    f = open('ParserResults.pkl', 'rb')
+    parserData = cPickle.load(f)
+    f.close()
+
+    # Create the configuration data.
+    config = Configuration(configFile, parserData)
+
+    # Generate the prototype classes.
+    if buildTarget == "header":
+        generate_binding_header(config, outputPrefix, webIDLFile);
+    else:
+        assert(buildTarget == "cpp")
+        generate_binding_cpp(config, outputPrefix, webIDLFile);
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Bindings.conf
@@ -0,0 +1,74 @@
+# DOM Bindings Configuration.
+#
+# The WebIDL interfaces are defined in dom/webidl. For each such interface, there
+# is a corresponding entry in the configuration table below. The configuration
+# table maps each interface name to a |descriptor| or list of |descriptor|s.
+#
+# A |descriptor| has a mandatory boolean member 'concrete' which indicates
+# whether the object unwraps to a known concrete type (arguments of the
+# |implements| variety, for example, are non-concrete).
+#
+# Valid fields for all descriptors:
+#   * concrete - Indicates whether this is a concrete descriptor (required)
+#   * workers - Indicates whether the descriptor is intended to be used for
+#               worker threads (defaults to false)
+#   * headerFile - The file in which the nativeClass or nativeInterface is
+#                  declared (defaults to an educated guess).
+#   * customTrace - The native class will use a custom trace hook (defaults to
+#                   true for workers, false otherwise).
+#   * customFinalize - The native class will use a custom finalize hook
+#                      (defaults to true for workers, false otherwise).
+#   * infallible - An array of attributes and methods specified in the .webidl
+#                  file that cannot fail and therefore do not require the final
+#                  nsresult& argument (defaults to an empty array).
+#
+# Valid fields for concrete descriptors:
+#   * nativeClass - The concrete class that instances of this interface will
+#                   unwrap to (required)
+#
+# Valid fields for non-concrete descriptors:
+#   * nativeInterface - The native type that instances of this interface will
+#                       unwrap to.
+
+DOMInterfaces = {
+
+'XMLHttpRequest': [
+{
+    'concrete': True,
+    'nativeClass': 'nsXMLHttpRequest',
+}],
+
+'XMLHttpRequestUpload': [
+{
+     'concrete': True,
+     'nativeClass': 'nsXMLHttpRequestUpload',
+     'headerFile': 'nsXMLHttpRequest.h'
+}],
+
+'EventTarget': [
+{
+    'concrete': True,
+    'nativeClass': 'nsDOMEventTargetHelper'
+}],
+
+'Event': [
+{
+    'concrete': True,
+    'nativeClass': 'nsDOMEvent'
+}],
+
+'EventListener': [
+{
+    # XXXbz should this really be true?
+    'concrete': True,
+    'nativeClass': 'nsIDOMEventListener'
+}],
+
+'XMLHttpRequestEventTarget': [
+{
+    'concrete': True,
+    'nativeClass': 'nsXHREventTarget',
+    'headerFile': 'nsXMLHttpRequest.h'
+}]
+
+}
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Codegen.py
@@ -0,0 +1,1757 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Common codegen classes.
+
+import os
+import string
+
+from WebIDL import *
+
+AUTOGENERATED_WARNING_COMMENT = \
+    "/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n\n"
+FINALIZE_HOOK_NAME = '_Finalize'
+TRACE_HOOK_NAME = '_Trace'
+
+def replaceFileIfChanged(filename, newContents):
+    """
+    Read a copy of the old file, so that we don't touch it if it hasn't changed.
+    Returns True if the file was updated, false otherwise.
+    """
+    oldFileContents = ""
+    try:
+        oldFile = open(filename, 'rb')
+        oldFileContents = ''.join(oldFile.readlines())
+        oldFile.close()
+    except:
+        pass
+
+    if newContents == oldFileContents:
+        return False
+
+    f = open(filename, 'wb')
+    f.write(newContents)
+    f.close()
+
+class CGThing():
+    """
+    Abstract base case for things that spit out code.
+    """
+    def __init__(self):
+        pass # Nothing for now
+    def declare(self):
+        """Produce code for a header file."""
+        assert(False)  # Override me!
+    def define(self):
+        """Produce code for a cpp file."""
+        assert(False) # Override me!
+
+class CGDOMJSClass(CGThing):
+    def __init__(self, descriptor):
+        CGThing.__init__(self)
+        self.descriptor = descriptor
+    def declare(self):
+        return "  extern DOMJSClass Class;\n"
+    def define(self):
+        traceHook = TRACE_HOOK_NAME if self.descriptor.customTrace else 'NULL'
+        prototypeChainString = ', '.join(['id::' + proto \
+                                          for proto in self.descriptor.prototypeChain])
+        return """
+DOMJSClass Class = {
+  { "%s",
+    JSCLASS_IS_DOMJSCLASS | JSCLASS_HAS_RESERVED_SLOTS(1),
+    JS_PropertyStub,       /* addProperty */
+    JS_PropertyStub,       /* delProperty */
+    JS_PropertyStub,       /* getProperty */
+    JS_StrictPropertyStub, /* setProperty */
+    JS_EnumerateStub,
+    JS_ResolveStub,
+    JS_ConvertStub,
+    %s,
+    NULL,                  /* reserved0 */
+    NULL,                  /* checkAccess */
+    NULL,                  /* call */
+    NULL,                  /* construct */
+    NULL,                  /* xdrObject */
+    NULL,                  /* hasInstance */
+    %s,
+    NULL                   /* reserved1 */
+  },
+  { %s }, -1, %s
+};
+""" % (self.descriptor.interface.identifier.name, FINALIZE_HOOK_NAME,
+       traceHook, prototypeChainString,
+       str(self.descriptor.nativeIsISupports).lower())
+
+class CGPrototypeJSClass(CGThing):
+    def __init__(self, descriptor):
+        CGThing.__init__(self)
+        self.descriptor = descriptor
+    def declare(self):
+        # We're purely for internal consumption
+        return ""
+    def define(self):
+        return """
+static JSClass PrototypeClass = {
+  "%s Prototype", 0,
+  JS_PropertyStub,       /* addProperty */
+  JS_PropertyStub,       /* delProperty */
+  JS_PropertyStub,       /* getProperty */
+  JS_StrictPropertyStub, /* setProperty */
+  JS_EnumerateStub,
+  JS_ResolveStub,
+  JS_ConvertStub,
+  NULL,                  /* finalize */
+  NULL,                  /* reserved0 */
+  NULL,                  /* checkAccess */
+  NULL,                  /* call */
+  NULL,                  /* construct */
+  NULL,                  /* xdrObject */
+  NULL,                  /* hasInstance */
+  NULL,                  /* trace */
+  NULL                   /* reserved1 */
+};
+""" % (self.descriptor.interface.identifier.name)
+
+class CGConstructorJSClass(CGThing):
+    def __init__(self, descriptor):
+        CGThing.__init__(self)
+        self.descriptor = descriptor
+    def declare(self):
+        # We're purely for internal consumption
+        return ""
+    def define(self):
+        return """
+static JSClass ConstructorClass = {
+  "Function", 0,
+  JS_PropertyStub,       /* addProperty */
+  JS_PropertyStub,       /* delProperty */
+  JS_PropertyStub,       /* getProperty */
+  JS_StrictPropertyStub, /* setProperty */
+  JS_EnumerateStub,
+  JS_ResolveStub,
+  JS_ConvertStub,
+  NULL,                  /* finalize */
+  NULL,                  /* reserved0 */
+  NULL,                  /* checkAccess */
+  // XXXbz This may need a useful call hook
+  NULL,                  /* call */
+  NULL,                  /* construct */
+  NULL,                  /* xdrObject */
+  // XXXbz This needs a useful hasInstance hook
+  NULL,                  /* hasInstance */
+  NULL,                  /* trace */
+  NULL                   /* reserved1 */
+};
+"""
+
+class CGList(CGThing):
+    def __init__(self, children, joiner=""):
+        CGThing.__init__(self)
+        self.children = children
+        self.joiner = joiner
+    def append(self, child):
+        self.children.append(child)
+    def prepend(self, child):
+        self.children.insert(0, child)
+    def declare(self):
+        return self.joiner.join([child.declare() for child in self.children])
+    def define(self):
+        return self.joiner.join([child.define() for child in self.children])
+
+class CGGeneric(CGThing):
+    def __init__(self, define="", declare=""):
+        self.declareText = declare
+        self.defineText = define
+    def declare(self):
+        return self.declareText
+    def define(self):
+        return self.defineText
+
+class CGIndenter(CGThing):
+    def __init__(self, child, indentLevel=2):
+        CGThing.__init__(self)
+        self.child = child
+        self.indent = " " * indentLevel
+        # We'll want to insert the indent at the beginnings of lines, but we
+        # don't want to indent empty lines.  So only indent lines that have a
+        # non-newline character on them.
+        self.pattern = re.compile("^(?=[^\n#])", re.MULTILINE)
+    def declare(self):
+        decl = self.child.declare()
+        if decl is not "":
+            return re.sub(self.pattern, self.indent, decl)
+        else:
+            return ""
+    def define(self):
+        defn = self.child.define()
+        if defn is not "":
+            return re.sub(self.pattern, self.indent, defn)
+        else:
+            return ""
+
+class CGWrapper(CGThing):
+    """
+    Generic CGThing that wraps other CGThings with pre and post text.
+    """
+    def __init__(self, child, pre="", post="", declarePre=None,
+                 declarePost=None, definePre=None, definePost=None,
+                 declareOnly=False):
+        CGThing.__init__(self)
+        self.child = child
+        self.declarePre = declarePre or pre
+        self.declarePost = declarePost or post
+        self.definePre = definePre or pre
+        self.definePost = definePost or post
+        self.declareOnly = declareOnly
+    def declare(self):
+        return self.declarePre + self.child.declare() + self.declarePost
+    def define(self):
+        if self.declareOnly:
+            return ''
+        return self.definePre + self.child.define() + self.definePost
+
+class CGNamespace(CGWrapper):
+    def __init__(self, namespace, child, declareOnly=False):
+        pre = "namespace %s {\n" % namespace
+        post="} // namespace %s\n" % namespace
+        CGWrapper.__init__(self, child, pre=pre, post=post,
+                           declareOnly=declareOnly)
+    @staticmethod
+    def build(namespaces, child, declareOnly=False):
+        """
+        Static helper method to build multiple wrapped namespaces.
+        """
+        if not namespaces:
+            return child
+        return CGNamespace(namespaces[0], CGNamespace.build(namespaces[1:],
+                                                            child),
+                           declareOnly=declareOnly)
+
+class CGIncludeGuard(CGWrapper):
+    """
+    Generates include guards for a header.
+    """
+    def __init__(self, prefix, child):
+        """|prefix| is the filename without the extension."""
+        define = 'mozilla_dom_bindings_%s_h__' % prefix
+        CGWrapper.__init__(self, child,
+                           declarePre='#ifndef %s\n#define %s\n\n' % (define, define),
+                           declarePost='\n#endif // %s\n' % define)
+
+class CGHeaders(CGWrapper):
+    """
+    Generates the appropriate include statements.
+    """
+    def __init__(self, descriptors, declareIncludes, child):
+        """
+        Builds a set of include to cover |descriptors|.
+
+        Also includes the files in |declareIncludes| in the header file.
+        """
+        def getInterfaceFilename(interface):
+            basename = os.path.basename(interface.filename())
+            return 'mozilla/dom/bindings/' + \
+                   basename.replace('.webidl', 'Binding.h')
+
+        # Determine the filenames for which we need headers.
+        interfaceDeps = [d.interface for d in descriptors]
+        interfaceDeps.extend([d.parent for d in interfaceDeps if d.parent])
+        bindingIncludes = [getInterfaceFilename(d) for d in interfaceDeps]
+
+        # Grab all the implementation declaration files we need.
+        implementationIncludes = [f for f in set([d.headerFile for d in descriptors])]
+
+        # Now find all the things we'll need as arguments because we
+        # need to wrap or unwrap them.
+        bindingHeaders = []
+        typeHeaders = []
+        for d in descriptors:
+            members = [m for m in d.interface.members]
+            signatures = [s for m in members if m.isMethod() for s in m.signatures()]
+            types = [s[0] for s in signatures]
+            types.extend([t.type for s in signatures for t in s[1]])
+
+            attrs = [a for a in members if a.isAttr()]
+            types.extend([a.type for a in attrs])
+
+            typeDescriptors = [d.getDescriptor(t.inner.identifier.name)
+                               for t in types
+                               if t.isInterface() and not t.isArrayBuffer()]
+            for typeDesc in typeDescriptors:
+                typeHeaders.append(typeDesc.headerFile)
+                bindingHeaders.append(getInterfaceFilename(typeDesc.interface))
+
+        implementationIncludes.extend([f for f in set(typeHeaders)])
+
+        # Let the machinery do its thing.
+        def _includeString(includes):
+            return ''.join(['#include "%s"\n' % i for i in includes]) + '\n'
+        CGWrapper.__init__(self, child,
+                           declarePre=_includeString(declareIncludes),
+                           definePre=_includeString(bindingIncludes) +
+                                     _includeString(bindingHeaders) +
+                                     _includeString(implementationIncludes))
+
+class Argument():
+    def __init__(self, argType, name):
+        self.argType = argType
+        self.name = name
+    def __str__(self):
+        return self.argType + ' ' + self.name
+
+class CGAbstractMethod(CGThing):
+    def __init__(self, descriptor, name, returnType, args, inline=False, static=False):
+        CGThing.__init__(self)
+        self.descriptor = descriptor
+        self.name = name
+        self.returnType = returnType
+        self.args = args
+        self.inline = inline
+        self.static = static
+    def _argstring(self):
+        return ', '.join([str(a) for a in self.args])
+    def _decorators(self):
+        decorators = []
+        if self.inline:
+            decorators.append('inline')
+        if self.static:
+            decorators.append('static')
+        decorators.append(self.returnType)
+        return ' '.join(decorators)
+    def declare(self):
+        if self.inline:
+            return self._define()
+        return "\n  %s %s(%s);\n" % (self._decorators(), self.name, self._argstring())
+    def _define(self):
+        return self.definition_prologue() + self.definition_body() + self.definition_epilogue()
+    def define(self):
+        return "" if self.inline else self._define()
+    def definition_prologue(self):
+        maybeNewline = " " if self.inline else "\n"
+        return "\n%s%s%s(%s)\n{" % (self._decorators(), maybeNewline,
+                                    self.name, self._argstring())
+    def definition_epilogue(self):
+        return "\n}\n"
+    def definition_body(self):
+        assert(False) # Override me!
+
+class CGAbstractStaticMethod(CGAbstractMethod):
+    """
+    Abstract base class for codegen of implementation-only (no
+    declaration) static methods.
+    """
+    def __init__(self, descriptor, name, returnType, args):
+        CGAbstractMethod.__init__(self, descriptor, name, returnType, args,
+                                  inline=False, static=True)
+    def declare(self):
+        # We only have implementation
+        return ""
+
+class CGAbstractClassHook(CGAbstractStaticMethod):
+    """
+    Meant for implementing JSClass hooks, like Finalize or Trace. Does very raw
+    'this' unwrapping as it assumes that the unwrapped type is always known.
+    """
+
+    def __init__(self, descriptor, name, returnType, args):
+        CGAbstractStaticMethod.__init__(self, descriptor, name, returnType,
+                                        args)
+
+    def definition_body_prologue(self):
+        return """
+  MOZ_ASSERT(js::GetObjectJSClass(obj) == Class.ToJSClass());
+  %s *self = UnwrapDOMObject<%s>(obj);
+""" % (self.descriptor.nativeClass, self.descriptor.nativeClass)
+
+    def definition_body(self):
+        return self.definition_body_prologue() + self.generate_code()
+
+    def generate_code(self):
+        # Override me
+        assert(False)
+
+class CGClassFinalizeHook(CGAbstractClassHook):
+    def __init__(self, descriptor):
+        args = [Argument('JSContext*', 'cx'), Argument('JSObject*', 'obj')]
+        CGAbstractClassHook.__init__(self, descriptor, FINALIZE_HOOK_NAME,
+                                         'void', args)
+
+    def generate_code(self):
+        if self.descriptor.customFinalize:
+            return """
+  if (self) {
+#if 0
+    self->%s(%s);
+#endif
+  }""" % (self.name, self.args[0].name)
+        return "\n  self->Release();"
+
+class CGClassTraceHook(CGAbstractClassHook):
+    def __init__(self, descriptor):
+        args = [Argument('JSTracer*', 'trc'), Argument('JSObject*', 'obj')]
+        CGAbstractClassHook.__init__(self, descriptor, TRACE_HOOK_NAME, 'void',
+                                     args)
+
+    def generate_code(self):
+        return """
+  if (self) {
+#if 0
+    self->%s(%s);
+#endif
+  }""" % (self.name, self.args[0].name)
+
+class MethodDefiner:
+    def __init__(self, descriptor):
+        self.descriptor = descriptor
+        self.methods = [m for m in self.descriptor.interface.members if
+                        m.isMethod()]
+    def hasMethods(self):
+        return len(self.methods) != 0
+    def __str__(self):
+        if not self.hasMethods():
+            return ""
+
+        # The length of a method is the maximum of the lengths of the
+        # argument lists of all its overloads.
+        def methodLength(method):
+            signatures = method.signatures()
+            return max([len(s[1]) for s in signatures])
+
+        funcdecls = ['    JS_FN("%s", %s, %s, JSPROP_ENUMERATE)' %
+                     (m.identifier.name, m.identifier.name,
+                      methodLength(m)) for m in self.methods]
+        # And add our JS_FS_END
+        funcdecls.append('    JS_FS_END')
+
+        return ("  static JSFunctionSpec methods[] = {\n" +
+                ',\n'.join(funcdecls) + "\n" +
+                "  };\n")
+
+class AttrDefiner:
+    def __init__(self, descriptor):
+        self.descriptor = descriptor
+        self.attrs = [m for m in self.descriptor.interface.members if
+                      m.isAttr()]
+    def hasAttrs(self):
+        return len(self.attrs) != 0
+    def __str__(self):
+        if not self.hasAttrs():
+            return ""
+
+        def flags(attr):
+            flags = "JSPROP_NATIVE_ACCESSORS | JSPROP_SHARED | JSPROP_ENUMERATE"
+            if attr.readonly:
+                return "JSPROP_READONLY | " + flags
+            return flags
+
+        def getter(attr):
+            return "get_" + attr.identifier.name
+
+        def setter(attr):
+            if attr.readonly:
+                return "NULL"
+            return "set_" + attr.identifier.name
+
+        attrdecls = ['    { "%s", 0, %s, %s, %s }' %
+                     (attr.identifier.name, flags(attr), getter(attr),
+                      setter(attr)) for attr in self.attrs]
+        attrdecls.append('    { 0, 0, 0, 0, 0 }')
+
+        return ("  static JSPropertySpec props[] = {\n" +
+                ',\n'.join(attrdecls) + "\n" +
+                "  };\n")
+
+class CGCreateProtoObjectMethod(CGAbstractMethod):
+    def __init__(self, descriptor):
+        args = [Argument('JSContext*', 'aCx'), Argument('JSObject*', 'aGlobal')]
+        CGAbstractMethod.__init__(self, descriptor, 'CreateProtoObject', 'JSObject*', args)
+    def definition_body(self):
+        protoChain = self.descriptor.prototypeChain
+        if len(protoChain) == 1:
+            getParentProto = "JS_GetObjectPrototype(aCx, aGlobal)"
+        else:
+            parentProtoName = self.descriptor.prototypeChain[-2]
+            getParentProto = "%s::GetProtoObject(aCx, aGlobal)" % (parentProtoName)
+
+        defineMethods = MethodDefiner(self.descriptor)
+        defineAttributes = AttrDefiner(self.descriptor);
+
+        needInterfaceObject = self.descriptor.interface.hasInterfaceObject
+
+        return """
+  JSObject* parentProto = %s;
+  if (!parentProto) {
+    return NULL;
+  }
+
+%s
+%s
+
+  return bindings::CreateProtoObject(aCx, parentProto, &PrototypeClass,
+                                     %s, %s, %s,
+                                     aGlobal, "%s");""" % (
+            getParentProto, defineMethods, defineAttributes,
+            "&ConstructorClass" if needInterfaceObject else "NULL",
+            "methods" if defineMethods.hasMethods() else "NULL",
+            "props" if defineAttributes.hasAttrs() else "NULL",
+            self.descriptor.interface.identifier.name if needInterfaceObject else "NULL")
+
+class CGGetProtoObjectMethod(CGAbstractMethod):
+    def __init__(self, descriptor):
+        args = [Argument('JSContext*', 'aCx'), Argument('JSObject*', 'aGlobal')]
+        CGAbstractMethod.__init__(self, descriptor, 'GetProtoObject',
+                                  'JSObject*', args, inline=True)
+    def definition_body(self):
+        return """
+  /* Get the prototype object for this class.  This will create the prototype
+     as needed. */
+
+  /* Make sure our global is sane.  Hopefully we can remove this sometime */
+  if (!(js::GetObjectClass(aGlobal)->flags & JSCLASS_DOM_GLOBAL)) {
+    return NULL;
+  }
+  /* Check to see whether the prototype is already installed */
+  JSObject **protoArray = GetProtoArray(aGlobal);
+  JSObject *ourProto = protoArray[id::%s];
+  if (!ourProto) {
+    ourProto = protoArray[id::%s] = CreateProtoObject(aCx, aGlobal);
+  }
+
+  /* ourProto might _still_ be null, but that's OK */
+  return ourProto;""" % (self.descriptor.name, self.descriptor.name)
+
+builtinNames = {
+    IDLType.Tags.bool: 'bool',
+    IDLType.Tags.int8: 'int8_t',
+    IDLType.Tags.int16: 'int16_t',
+    IDLType.Tags.int32: 'int32_t',
+    IDLType.Tags.int64: 'int64_t',
+    IDLType.Tags.uint8: 'uint8_t',
+    IDLType.Tags.uint16: 'uint16_t',
+    IDLType.Tags.uint32: 'uint32_t',
+    IDLType.Tags.uint64: 'uint64_t',
+    IDLType.Tags.float: 'float',
+    IDLType.Tags.double: 'double'
+}
+
+class ConcreteObjectUnwrapper():
+    """
+    A class for unwrapping an object named by the "source" argument
+    based on the passed-in descriptor and storing it in a variable
+    called by the name in the "target" argument.
+
+    codeOnFailure is the code to run if unwrapping fails.
+    """
+    def __init__(self, descriptor, source, target, codeOnFailure):
+        self.substitution = { "type" : descriptor.nativeClass,
+                              "protoID" : "id::" + descriptor.name,
+                              "source" : source,
+                              "target" : target,
+                              "codeOnFailure" : codeOnFailure }
+
+    def __str__(self):
+        return string.Template("""
+  {
+    nsresult rv = UnwrapObject<${protoID}>(cx, ${source}, &${target});
+    if (NS_FAILED(rv)) {
+      ${codeOnFailure}
+    }
+  }""").substitute(self.substitution)
+
+class FailureFatalConcreteObjectUnwrapper(ConcreteObjectUnwrapper):
+    """
+    As ConcreteObjectUnwrapper, but defaulting to throwing if unwrapping fails
+    """
+    def __init__(self, descriptor, source, target):
+        ConcreteObjectUnwrapper.__init__(self, descriptor, source, target,
+                                         "return Throw(cx, rv);")
+
+def getArgumentConversionTemplate(type, descriptor):
+    if type.isSequence() or type.isArray():
+        raise TypeError("Can't handle sequence or array arguments yet")
+
+    if descriptor is not None:
+        assert(type.isInterface())
+        # This is an interface that we implement as a concrete class
+        # or an XPCOM interface.
+        if type.nullable():
+            nameSuffix = ""
+        else:
+            nameSuffix = "_ptr"
+        template = "  ${typeName} *${name}%s;\n" % nameSuffix
+
+        template += "  if (${argVal}.isObject()) {"
+        if descriptor.concrete:
+            template += str(FailureFatalConcreteObjectUnwrapper(
+                    descriptor,
+                    "&${argVal}.toObject()",
+                    "${name}"+nameSuffix)).replace("\n", "\n  ") + "\n"
+        else:
+            raise TypeError("Can't handle this interface type yet, becase we "
+                            "have no support for unwrapping non-concrete types: " + type)
+        if type.nullable():
+            template += (
+                "  } else if (${argVal}.isNullOrUndefined()) {\n"
+                "    ${name}%s = NULL;\n" % nameSuffix)
+
+        template += (
+            "  } else {\n"
+            "    return Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);\n"
+            "  }\n")
+
+        if not type.nullable():
+            template += "  ${typeName} &${name} = *${name}_ptr;\n"
+            
+        return template
+
+    if type.isInterface():
+        raise TypeError("Interface type with no descriptor: " + type)
+
+    if type.isString():
+        # XXXbz Need to figure out string behavior?  Also, how to
+        # detect them?  Also, nullability?
+
+        return (
+            "  xpc_qsDOMString ${name}(cx, ${argVal}, ${argPtr},\n"
+            "                       xpc_qsDOMString::eDefaultNullBehavior,\n"
+            "                       xpc_qsDOMString::eDefaultUndefinedBehavior);\n"
+            "  if (!${name}.IsValid()) {\n"
+            "    return false;\n"
+            "  }\n")
+
+    if type.isEnum():
+        if type.nullable():
+            raise TypeError("We don't support nullable enumerated arguments "
+                            "yet")
+        enum = type.inner.identifier.name
+        return (
+            "  %(enumtype)s ${name};\n"
+            "  {\n"
+            "    bool ok;\n"
+            "    $name = (%(enumtype)s) FindEnumStringIndex(cx, ${argVal}, %(values)s, &ok);\n"
+            "    if (!ok) {\n"
+            "      return false;\n"
+            "    }\n"
+            "  }" % { "enumtype" : enum + "::value",
+                      "values" : enum + "::strings" })
+
+    if type.isCallback():
+        # XXXbz we're going to assume that callback types are always
+        # nullable and always have [TreatNonCallableAsNull] for now.
+        return (
+            "  JSObject *${name};\n"
+            "  if (${argVal}.isObject() && JS_ObjectIsCallable(cx, &${argVal}.toObject())) {\n"
+            "    ${name} = &${argVal}.toObject();\n"
+            "  } else {\n"
+            "    ${name} = NULL;\n"
+            "  }\n")
+
+    if type.isAny():
+        return "  JS::Value ${name} = ${argVal};\n"
+
+    if not type.isPrimitive():
+        raise TypeError("Need conversion for argument type '%s'" % type)
+
+    tag = type.tag()
+    replacements = dict()
+    if type.nullable():
+        replacements["declareArg"] = (
+            "  Nullable<${typeName}> ${name};\n"
+            "  if (${argVal}.isNullOrUndefined()) {\n"
+            "    ${name}.SetNull();\n"
+            "  } else"
+            )
+        replacements["finalValueSetter"] = "${name}.SetValue"
+    else:
+        replacements["declareArg"] = "  ${typeName} ${name};\n"
+        replacements["finalValueSetter"] = "${name} = "
+
+    replacements["intermediateCast"] = ""
+        
+    if tag == IDLType.Tags.bool:
+        replacements["jstype"] = "JSBool"
+        replacements["converter"] = "JS_ValueToBoolean"
+    elif tag in [IDLType.Tags.int8, IDLType.Tags.uint8, IDLType.Tags.int16,
+                 IDLType.Tags.int32, IDLType.Tags.uint32]:
+        # XXXbz need to add support for [EnforceRange] and [Clamp]
+        # The output of JS_ValueToECMAInt32 is determined as follows:
+        #   1) The value is converted to a double
+        #   2) Anything that's not a finite double returns 0
+        #   3) The double is rounded towards zero to the nearest integer
+        #   4) The resulting integer is reduced mod 2^32.  The output of this
+        #      operation is an integer in the range [0, 2^32).
+        #   5) If the resulting number is >= 2^31, 2^32 is subtracted from it.
+        #
+        # The result of all this is a number in the range [-2^31, 2^31)
+        #
+        # WebIDL conversions for the 8-bit, 16-bit, and 32-bit integer types
+        # are defined in the same way, except that step 4 uses reduction mod
+        # 2^8 and 2^16 for the 8-bit and 16-bit types respectively, and step 5
+        # is only done for the signed types.
+        #
+        # C/C++ define integer conversion semantics to unsigned types as taking
+        # your input integer mod (1 + largest value representable in the
+        # unsigned type).  Since 2^32 is zero mod 2^8, 2^16, and 2^32,
+        # converting to the unsigned int of the relevant width will correctly
+        # perform step 4; in particular, the 2^32 possibly subtracted in step 5
+        # will become 0.
+        #
+        # Once we have step 4 done, we're just going to assume 2s-complement
+        # representation and cast directly to the type we really want.
+        #
+        # So we can cast directly for all unsigned types and for int32_t; for
+        # the smaller-width signed types we need to cast through the
+        # corresponding unsigned type.
+        replacements["jstype"] = "int32_t"
+        replacements["converter"] = "JS_ValueToECMAInt32"
+        if tag is IDLType.Tags.int8:
+            replacements["intermediateCast"] = "(uint8_t)"
+        elif tag is IDLType.Tags.int16:
+            replacements["intermediateCast"] = "(uint16_t)"
+        else:
+            replacements["intermediateCast"] = ""
+    elif tag is IDLType.Tags.int64:
+        # XXXbz this may not match what WebIDL says to do in terms of reducing
+        # mod 2^64.  Should we check?
+        replacements["jstype"] = "PRInt64"
+        replacements["converter"] = "xpc_qsValueToInt64"
+    elif tag is IDLType.Tags.uint64:
+        # XXXbz this may not match what WebIDL says to do in terms of reducing
+        # mod 2^64.  Should we check?
+        replacements["jstype"] = "PRUint64"
+        replacements["converter"] = "xpc_qsValueToUint64"
+    elif tag in [IDLType.Tags.float, IDLType.Tags.double]:
+        replacements["jstype"] = "jsdouble"
+        replacements["converter"] = "JS_ValueToNumber"
+    else:
+        raise TypeError("Unknown primitive type '%s'" % type);
+
+    # We substitute the %(name)s things here.  Our caller will
+    # substitute the ${name} things.
+    return ("  %(jstype)s ${name}_jstype;\n"
+            "%(declareArg)s" # No leading whitespace or newline here, on purpose
+            "  if (%(converter)s(cx, ${argVal}, &${name}_jstype)) {\n"
+            "    %(finalValueSetter)s((${typeName})%(intermediateCast)s${name}_jstype);\n"
+            "  } else {\n"
+            "    return false;\n"
+            "  }\n" % replacements)
+
+class CGArgumentConverter(CGThing):
+    """
+    A class that takes an IDL argument object, its index in the
+    argument list, and the argv and argc strings and generates code to
+    unwrap the argument to the right native type.
+    """
+    def __init__(self, argument, index, argv, argc, descriptorProvider):
+        CGThing.__init__(self)
+        self.argument = argument
+        # XXXbz should optional jsval args get JSVAL_VOID? What about
+        # others?
+        self.replacementVariables = {
+            "index" : index,
+            "argc" : argc,
+            "argv" : argv,
+            "defaultValue" : "JSVAL_NULL",
+            "name" : "arg%d" % index
+            }
+        if argument.optional:
+            self.replacementVariables["argVal"] = string.Template(
+                "(${index} < ${argc} ? ${argv}[${index}] : ${defaultValue})"
+                ).substitute(self.replacementVariables)
+            self.replacementVariables["argPtr"] = string.Template(
+                "(${index} < ${argc} ? &${argv}[${index}] : NULL)"
+                ).substitute(self.replacementVariables)
+        else:
+            self.replacementVariables["argVal"] = string.Template(
+                "${argv}[${index}]"
+                ).substitute(self.replacementVariables)
+            self.replacementVariables["argPtr"] = (
+                "&" + self.replacementVariables["argVal"])
+        self.descriptor = None
+        if argument.type.isPrimitive():
+            self.replacementVariables["typeName"] = builtinNames[argument.type.tag()]
+        elif argument.type.isInterface() and not argument.type.isArrayBuffer():
+            descriptor = descriptorProvider.getDescriptor(
+                argument.type.inner.identifier.name)
+            self.descriptor = descriptor
+            self.replacementVariables["typeName"] = descriptor.typeName
+
+    def define(self):
+        return string.Template(
+            "\n" + getArgumentConversionTemplate(self.argument.type,
+                                                 self.descriptor)
+            ).substitute(self.replacementVariables)
+
+def getWrapTemplateForTypeImpl(type, result, descriptorProvider,
+                               resultAlreadyAddRefed):
+    if type.isSequence() or type.isArray():
+        raise TypeError("Can't handle sequence or array return values yet")
+
+    if type.isVoid():
+        return """
+  ${jsvalRef} = JSVAL_VOID;
+  return true;"""
+
+    if type.isInterface() and not type.isArrayBuffer():
+        descriptor = descriptorProvider.getDescriptor(type.inner.identifier.name)
+        wrappingCode = ("""
+  if (!%s) {
+    ${jsvalRef} = JSVAL_NULL;
+    return true;
+  }""" % result) if type.nullable() else ""
+        if descriptor.concrete:
+            wrappingCode += """
+  if (WrapNewBindingObject(cx, obj, %s, ${jsvalPtr})) {
+    return true;
+  }""" % result
+            if descriptor.workers:
+                # Worker bindings can only fail to wrap as a new-binding object
+                # if they already threw an exception
+                wrappingCode += """
+  MOZ_ASSERT(JS_IsExceptionPending(cx));
+  return false;"""
+            else:
+                # Try old-style wrapping for non-worker bindings
+                wrappingCode += """
+  return HandleNewBindingWrappingFailure(cx, obj, %s, ${jsvalPtr});""" % result
+        else:
+            raise TypeError("Don't know how to wrap non-concrete types yet")
+        return wrappingCode
+
+    if type.isString():
+        if type.nullable():
+            return """
+  return xpc::StringToJsval(cx, %s, ${jsvalPtr});""" % result
+        else:
+            return """
+  return xpc::NonVoidStringToJsval(cx, %s, ${jsvalPtr});""" % result
+
+    if type.isEnum():
+        if type.nullable():
+            raise TypeError("We don't support nullable enumerated return types "
+                            "yet")
+        return """
+  MOZ_ASSERT(uint32_t(%(result)s) < ArrayLength(%(strings)s));
+  JSString* result_str = JS_NewStringCopyZ(cx, %(strings)s[uint32_t(%(result)s)]);
+  if (!result_str) {
+    return false;
+  }
+  ${jsvalRef} = JS::StringValue(result_str);
+  return true;""" % { "result" : result,
+                      "strings" : type.inner.identifier.name + "::strings" }
+
+    if type.isCallback() and not type.isInterface():
+        # XXXbz we're going to assume that callback types are always
+        # nullable and always have [TreatNonCallableAsNull] for now.
+        return """
+  ${jsvalRef} = JS::ObjectOrNullValue(%s);
+  return true;""" % result
+
+    if type.tag() == IDLType.Tags.any:
+        return """
+  ${jsvalRef} = %s;\n
+  return true;""" % result
+
+    if not type.isPrimitive():
+        raise TypeError("Need to learn to wrap %s" % type)
+
+    if type.nullable():
+        return """
+  if (%s.IsNull()) {
+    ${jsvalRef} = JSVAL_NULL;
+    return true;
+  }
+%s""" % (result, getWrapTemplateForTypeImpl(type.inner, "%s.Value()" % result,
+                                            descriptorProvider,
+                                            resultAlreadyAddRefed))
+    
+    tag = type.tag()
+    
+    if tag in [IDLType.Tags.int8, IDLType.Tags.uint8, IDLType.Tags.int16,
+               IDLType.Tags.uint16, IDLType.Tags.int32]:
+        return """
+  ${jsvalRef} = INT_TO_JSVAL(int32_t(%s));
+  return true;""" % result
+
+    elif tag in [IDLType.Tags.int64, IDLType.Tags.uint64, IDLType.Tags.float,
+                 IDLType.Tags.double]:
+        # XXXbz will cast to double do the "even significand" thing that webidl
+        # calls for for 64-bit ints?  Do we care?
+        return """
+  return JS_NewNumberValue(cx, double(%s), ${jsvalPtr});""" % result
+
+    elif tag == IDLType.Tags.uint32:
+        return """
+  ${jsvalRef} = UINT_TO_JSVAL(%s);
+  return true;""" % result
+
+    elif tag == IDLType.Tags.bool:
+        return """
+  ${jsvalRef} = BOOLEAN_TO_JSVAL(%s);
+  return true;""" % result
+
+    else:
+        raise TypeError("Need to learn to wrap primitive: %s" % type)
+
+def getWrapTemplateForType(type, descriptorProvider, resultAlreadyAddRefed):
+    return getWrapTemplateForTypeImpl(type, "result", descriptorProvider,
+                                      resultAlreadyAddRefed)
+
+class CGCallGenerator(CGThing):
+    """
+    A class to generate an actual call to a C++ object.  Assumes that the C++
+    object is stored in a variable named "self".
+    """
+    def __init__(self, errorReport, argCount, returnType, resultAlreadyAddRefed,
+                 descriptorProvider, nativeMethodName):
+        CGThing.__init__(self)
+
+        isFallible = errorReport is not None
+
+        # XXXbz return values that have to go in outparams go here?
+        args = CGList([CGGeneric("arg" + str(i)) for i in range(argCount)], ", ")
+        resultOutParam = returnType is not None and returnType.isString()
+        # Return values that go in outparams go here
+        if resultOutParam:
+            args.append(CGGeneric("result"))
+        if isFallible:
+            args.append(CGGeneric("rv"))
+
+        if returnType is None or returnType.isVoid():
+            # Nothing to declare
+            result = None
+        elif returnType.isPrimitive() and returnType.tag() in builtinNames:
+            result = CGGeneric(builtinNames[returnType.tag()])
+            if returnType.nullable():
+                result = CGWrapper(result, pre="Nullable<", post=">")
+        elif returnType.isString():
+            result = CGGeneric("nsString")
+        elif returnType.isEnum():
+            if returnType.nullable():
+                raise TypeError("We don't support nullable enum return values")
+            result = CGGeneric(returnType.inner.identifier.name + "::value")
+        elif returnType.isInterface() and not returnType.isArrayBuffer():
+            result = CGGeneric(descriptorProvider.getDescriptor(
+                returnType.inner.identifier.name).typeName)
+            if resultAlreadyAddRefed:
+                result = CGWrapper(result, pre="nsRefPtr<", post=">")
+            else:
+                result = CGWrapper(result, post="*")
+        elif returnType.isCallback():
+            # XXXbz we're going to assume that callback types are always
+            # nullable for now.
+            result = CGGeneric("JSObject*")
+        elif returnType.tag() is IDLType.Tags.any:
+            result = CGGeneric("jsval")
+        else:
+            raise TypeError("Don't know how to declare return value for %s" %
+                            returnType)
+
+        # Build up our actual call
+        self.cgRoot = CGList([], "\n")
+
+        call = CGGeneric(nativeMethodName)
+        call = CGWrapper(call, pre="self->")
+        call = CGList([call, CGWrapper(args, pre="(", post=");")])
+        if result is not None:
+            result = CGWrapper(result, post=" result;")
+            self.cgRoot.prepend(result)
+            if not resultOutParam:
+                call = CGWrapper(call, pre="result = ")
+
+        call = CGWrapper(call, pre="#if 0\n", post="\n#endif")
+        self.cgRoot.append(call)
+
+        if isFallible:
+            self.cgRoot.prepend(CGGeneric("nsresult rv = NS_OK;"))
+            self.cgRoot.append(CGGeneric("if (NS_FAILED(rv)) {"))
+            self.cgRoot.append(CGIndenter(CGGeneric(errorReport)))
+            self.cgRoot.append(CGGeneric("}"))
+
+    def define(self):
+        return self.cgRoot.define()
+
+class PerSignatureCall(CGThing):
+    """
+    This class handles the guts of generating code for a particular
+    call signature.  A call signature consists of three things:
+
+    1) A return type, which can be None to indicate that there is no
+       actual return value (e.g. this is an attribute setter) or an
+       IDLType if there's an IDL type involved (including |void|).
+    2) An argument list, which is allowed to be empty.
+    3) A name of a native method to call.
+
+    We also need to know whether this is a method or a getter/setter
+    to do error reporting correctly.
+
+    The idlNode parameter can be either a method or an attr. We can query
+    |idlNode.identifier| in both cases, so we can be agnostic between the two.
+    """
+    # XXXbz For now each entry in the argument list is either an
+    # IDLArgument or a FakeArgument, but longer-term we may want to
+    # have ways of flagging things like JSContext* or optional_argc in
+    # there.
+    
+    # XXXbz void methods have a signature with a isVoid() type object
+    #       as first element.  Methods with no args have length-0 arg
+    #       lists as second element in signaure.
+    # XXXbz if isInterface() true on a type, type.inner is the interface object
+    # XXXbz is isPrimitive() true on a type, then .tag() will return an
+    #       IDLType.Tags value.  So you can compare
+    #       type.tag() == IDLType.Tags.int8 or whatever.
+    def __init__(self, returnType, arguments, nativeMethodName,
+                 descriptor, idlNode, extendedAttributes):
+        CGThing.__init__(self)
+        self.returnType = returnType
+        self.descriptor = descriptor
+        self.idlNode = idlNode
+        self.extendedAttributes = extendedAttributes
+        # Default to already_AddRefed on the main thread, raw pointer in workers
+        self.resultAlreadyAddRefed = not descriptor.workers
+        
+        self.argCount = len(arguments)
+        cgThings = [CGArgumentConverter(arguments[i], i, self.getArgv(),
+                                        self.getArgc(), self.descriptor) for
+                    i in range(self.argCount)]
+        cgThings.append(CGGeneric("\n"))
+        cgThings.append(CGIndenter(CGCallGenerator(
+                    self.getErrorReport() if self.isFallible() else None,
+                    self.argCount, returnType, self.resultAlreadyAddRefed,
+                    descriptor, nativeMethodName)))
+        self.cgRoot = CGList(cgThings)
+
+    def getArgv(self):
+        assert(False) # Override me
+    def getArgc(self):
+        assert(False) # Override me
+    def getErrorReport(self):
+        assert(False) # Override me
+
+    def isFallible(self):
+        return not 'infallible' in self.extendedAttributes
+
+    def wrap_return_value(self):
+        resultTemplateValues = {'jsvalRef': '*vp', 'jsvalPtr': 'vp'}
+        return string.Template(
+            getWrapTemplateForType(self.returnType, self.descriptor,
+                                   self.resultAlreadyAddRefed)
+            ).substitute(resultTemplateValues)
+
+    def getErrorReport(self):
+        return 'return ThrowMethodFailedWithDetails(cx, rv, "%s", "%s");'\
+               % (self.descriptor.interface.identifier.name,
+                  self.idlNode.identifier.name)
+
+    def define(self):
+        return (self.cgRoot.define() + self.wrap_return_value())
+
+class PerSignatureMethodCall(PerSignatureCall):
+    def __init__(self, returnType, arguments, nativeMethodName, descriptor,
+                 method, extendedAttributes):
+        PerSignatureCall.__init__(self, returnType, arguments, nativeMethodName,
+                                  descriptor, method, extendedAttributes)
+
+        # Insert our argv-unwrapping at the beginning of our CGThing list
+        if len(arguments) > 0:
+            requiredArgs = len(arguments)
+            while requiredArgs and arguments[requiredArgs-1].optional:
+                requiredArgs -= 1
+            if requiredArgs > 0:
+                argv = [CGGeneric(
+                        "// XXXbz is this the right place for this check?  Or should it be more\n"
+                        "// up-front somewhere, not per-signature?\n"
+                        "if (argc < %d) {\n"
+                        "  return Throw(cx, NS_ERROR_XPC_NOT_ENOUGH_ARGS);\n"
+                        "}" % requiredArgs)]
+            else:
+                argv = []
+            argv.append(CGGeneric("JS::Value *argv = JS_ARGV(cx, vp);"))
+            self.cgRoot.prepend(CGWrapper(CGIndenter(CGList(argv, "\n")),
+                                          pre="\n",
+                                          post="\n"))
+
+    def getArgv(self):
+        return "argv" if self.argCount > 0 else ""
+    def getArgc(self):
+        return "argc"
+
+class GetterSetterCall(PerSignatureCall):
+    def __init__(self, returnType, arguments, nativeMethodName, descriptor,
+                 attr, extendedAttributes):
+        PerSignatureCall.__init__(self, returnType, arguments, nativeMethodName,
+                                  descriptor, attr, extendedAttributes)
+    def getArgv(self):
+        return "vp"
+
+class GetterCall(GetterSetterCall):
+    def __init__(self, returnType, nativeMethodName, descriptor, attr,
+                 extendedAttributes):
+        GetterSetterCall.__init__(self, returnType, [], nativeMethodName,
+                                  descriptor, attr, extendedAttributes)
+    def getArgc(self):
+        return "0"
+
+class FakeArgument():
+    def __init__(self, type):
+        self.type = type
+        self.optional = False
+
+class SetterCall(GetterSetterCall):
+    def __init__(self, argType, nativeMethodName, descriptor, attr,
+                 extendedAttributes):
+        GetterSetterCall.__init__(self, None, [FakeArgument(argType)],
+                                  nativeMethodName, descriptor, attr,
+                                  extendedAttributes)
+    def wrap_return_value(self):
+        # We have no return value
+        return "\n  return true;"
+    def getArgc(self):
+        return "1"
+
+class CGAbstractBindingMethod(CGAbstractStaticMethod):
+    def __init__(self, descriptor, name, returnType, args):
+        CGAbstractStaticMethod.__init__(self, descriptor, name,
+                                        returnType, args)
+    def definition_body(self):
+        return (self.unwrap_this() + self.generate_code())
+
+    def unwrap_this(self):
+        return (("""
+  %s *self;""" % self.descriptor.nativeClass) +
+                str(FailureFatalConcreteObjectUnwrapper(self.descriptor,
+                                                        "obj", "self")))
+
+    def generate_code(self):
+        assert(False) # Override me
+
+def MakeNativeName(name):
+    return name[0].upper() + name[1:]
+
+class CGNativeBindingMethod(CGAbstractBindingMethod):
+    """ Class common to all interface methods and attributes. """
+
+    def __init__(self, descriptor, name, returnType, args, baseName):
+        self.extendedAttributes = {}
+        if baseName in descriptor.infallible:
+            self.extendedAttributes['infallible'] = True
+        CGAbstractBindingMethod.__init__(self, descriptor, name, returnType,
+                                         args)
+
+class CGNativeMethod(CGNativeBindingMethod):
+    def __init__(self, descriptor, method):
+        self.method = method
+        baseName = method.identifier.name
+        args = [Argument('JSContext*', 'cx'), Argument('uintN', 'argc'),
+                Argument('JS::Value*', 'vp')]
+        CGNativeBindingMethod.__init__(self, descriptor, baseName, 'JSBool',
+                                       args, baseName)
+    def unwrap_this(self):
+         return """
+  JSObject *obj = JS_THIS_OBJECT(cx, vp);
+  if (!obj)
+    return false;""" + CGAbstractBindingMethod.unwrap_this(self)
+
+    def generate_code(self):
+        signatures = self.method.signatures()
+        nativeName = MakeNativeName(self.method.identifier.name)
+        callGenerators = [PerSignatureMethodCall(s[0], s[1], nativeName,
+                                                 self.descriptor, self.method,
+                                                 self.extendedAttributes)
+                          for s in signatures]
+        if len(callGenerators) != 1:
+            raise TypeError("Don't know how to handle overloads yet.  Will need to generate code to pick the right overload based on the arguments, then jump to the right generated code")
+
+        return callGenerators[0].define();
+
+class CGNativeGetter(CGNativeBindingMethod):
+    def __init__(self, descriptor, attr):
+        self.attr = attr
+        baseName = attr.identifier.name
+        args = [Argument('JSContext*', 'cx'), Argument('JSObject*', 'obj'),
+                Argument('jsid', 'id'), Argument('JS::Value*', 'vp')]
+        CGNativeBindingMethod.__init__(self, descriptor, 'get_' + baseName,
+                                       'JSBool', args, baseName)
+    def generate_code(self):
+        nativeMethodName = "Get" + MakeNativeName(self.attr.identifier.name)
+        return GetterCall(self.attr.type, nativeMethodName, self.descriptor,
+                          self.attr, self.extendedAttributes).define()
+
+class CGNativeSetter(CGNativeBindingMethod):
+    def __init__(self, descriptor, attr):
+        self.attr = attr
+        baseName = attr.identifier.name
+        args = [Argument('JSContext*', 'cx'), Argument('JSObject*', 'obj'),
+                Argument('jsid', 'id'), Argument('JSBool', 'strict'),
+                Argument('JS::Value*', 'vp')]
+        CGNativeBindingMethod.__init__(self, descriptor, 'set_' + baseName,
+                                       'JSBool', args, baseName)
+    def generate_code(self):
+        nativeMethodName = "Set" + MakeNativeName(self.attr.identifier.name)
+        return SetterCall(self.attr.type, nativeMethodName, self.descriptor,
+                          self.attr, self.extendedAttributes).define()
+
+def getEnumValueName(value):
+    # Some enum values can be empty strings.  Others might have weird
+    # characters in them.  Deal with the former by returning "_empty",
+    # deal with possible name collisions from that by throwing if the
+    # enum value is actually "_empty", and throw on any value
+    # containing chars other than [a-z] for now.
+    if value == "_empty":
+        raise SyntaxError('"_empty" is not an IDL enum value we support yet')
+    if value == "":
+        return "_empty"
+    if not re.match("^[a-z]+$", value):
+        raise SyntaxError('Enum value "' + value + '" contains characters '
+                          'outside [a-z]')
+    return value
+
+class CGEnum(CGThing):
+    def __init__(self, enum):
+        CGThing.__init__(self)
+        self.enum = enum
+
+    def declare(self):
+        return """
+  enum value {
+    %s
+  };
+
+  const char* strings [] = {
+    %s,
+    NULL
+  };
+
+""" % (",\n    ".join(map(getEnumValueName, self.enum.values())),
+       ",\n    ".join(['"' + val + '"' for val in self.enum.values()]))
+
+    def define(self):
+        # We only go in the header
+        return "";
+
+class ClassItem:
+    """ Use with CGClass """
+    def __init__(self, name, visibility):
+        self.name = name
+        self.visibility = visibility
+    def declare(self, cgClass):
+        assert False
+    def define(self, cgClass):
+        assert False
+
+class ClassBase(ClassItem):
+    def __init__(self, name, visibility='public'):
+        ClassItem.__init__(self, name, visibility)
+    def declare(self, cgClass):
+        return '%s %s' % (self.visibility, self.name)
+    def define(self, cgClass):
+        # Only in the header
+        return ''
+
+class ClassMethod(ClassItem):
+    def __init__(self, name, returnType, args, inline=False, static=False,
+                 virtual=False, const=False, bodyInHeader=False,
+                 templateArgs=None, visibility='public', body=None):
+        self.returnType = returnType
+        self.args = args
+        self.inline = inline or bodyInHeader
+        self.static = static
+        self.virtual = virtual
+        self.const = const
+        self.bodyInHeader = bodyInHeader
+        self.templateArgs = templateArgs
+        self.body = body
+        ClassItem.__init__(self, name, visibility)
+
+    def getDecorators(self, declaring):
+        decorators = []
+        if self.inline:
+            decorators.append('inline')
+        if declaring:
+            if self.static:
+                decorators.append('static')
+            if self.virtual:
+                decorators.append('virtual')
+        if decorators:
+            return ' '.join(decorators) + ' '
+        return ''
+
+    def getBody(self):
+        # Override me or pass a string to constructor
+        assert self.body is not None
+        return self.body
+
+    def declare(self, cgClass):
+        templateClause = 'template <%s>\n' % ', '.join(self.templateArgs) \
+                         if self.bodyInHeader and self.templateArgs else ''
+        args = ', '.join([str(a) for a in self.args])
+        if self.bodyInHeader:
+            body = '  ' + self.getBody();
+            body = body.replace('\n', '\n  ').rstrip(' ')
+            body = '\n{\n' + body + '\n}'
+        else:
+           body = ';'
+
+        return string.Template("""${templateClause}${decorators}${returnType}
+${name}(${args})${const}${body}
+""").substitute({ 'templateClause': templateClause,
+                  'decorators': self.getDecorators(True),
+                  'returnType': self.returnType,
+                  'name': self.name,
+                  'const': ' const' if self.const else '',
+                  'args': args,
+                  'body': body })
+
+    def define(self, cgClass):
+        if self.bodyInHeader:
+            return ''
+
+        templateArgs = cgClass.templateArgs
+        if templateArgs:
+            if cgClass.templateSpecialization:
+                templateArgs = \
+                    templateArgs[len(cgClass.templateSpecialization):]
+
+        if templateArgs:
+            templateClause = \
+                'template <%s>\n' % ', '.join([str(a) for a in templateArgs])
+        else:
+            templateClause = ''
+
+        args = ', '.join([str(a) for a in self.args])
+
+        body = '  ' + self.getBody()
+        body = body.replace('\n', '\n  ').rstrip(' ')
+
+        return string.Template("""${templateClause}${decorators}${returnType}
+${className}::${name}(${args})${const}
+{
+${body}
+}\n
+""").substitute({ 'templateClause': templateClause,
+                  'decorators': self.getDecorators(False),
+                  'returnType': self.returnType,
+                  'className': cgClass.getNameString(),
+                  'name': self.name,
+                  'args': args,
+                  'const': ' const' if self.const else '',
+                  'body': body })
+
+class ClassMember(ClassItem):
+    def __init__(self, name, type, visibility="private", static=False,
+                 body=None):
+        self.type = type;
+        self.static = static
+        self.body = body
+        ClassItem.__init__(self, name, visibility)
+
+    def getBody(self):
+        assert self.body is not None
+        return self.body
+
+    def declare(self, cgClass):
+        return '%s%s %s;\n' % ('static ' if self.static else '', self.type,
+                               self.name)
+
+    def define(self, cgClass):
+        if not self.static:
+            return ''
+        return '%s %s::%s = %s;\n' % (self.type, cgClass.getNameString(),
+                                      self.name, self.getBody())
+
+class ClassTypedef(ClassItem):
+    def __init__(self, name, type, visibility="public"):
+        self.type = type
+        ClassItem.__init__(self, name, visibility)
+
+    def declare(self, cgClass):
+        return 'typedef %s %s;\n' % (self.type, self.name)
+
+    def define(self, cgClass):
+        # Only goes in the header
+        return ''
+
+class ClassEnum(ClassItem):
+    def __init__(self, name, entries, values=None, visibility="public"):
+        self.entries = entries
+        self.values = values
+        ClassItem.__init__(self, name, visibility)
+
+    def declare(self, cgClass):
+        entries = []
+        for i in range(0, len(self.entries)):
+            if i >= len(self.values):
+                entry = '%s' % self.entries[i]
+            else:
+                entry = '%s = %s' % (self.entries[i], self.values[i])
+            entries.append(entry)
+        name = '' if not self.name else ' ' + self.name
+        return 'enum%s\n{\n  %s\n};\n' % (name, ',\n  '.join(entries))
+
+    def define(self, cgClass):
+        # Only goes in the header
+        return ''
+
+class CGClass(CGThing):
+    def __init__(self, name, bases=[], members=[], methods=[], typedefs = [],
+                 enums=[], templateArgs=[], templateSpecialization=[],
+                 isStruct=False, indent=''):
+        CGThing.__init__(self)
+        self.name = name
+        self.bases = bases
+        self.members = members
+        self.methods = methods
+        self.typedefs = typedefs
+        self.enums = enums
+        self.templateArgs = templateArgs
+        self.templateSpecialization = templateSpecialization
+        self.isStruct = isStruct
+        self.indent = indent
+        self.defaultVisibility ='public' if isStruct else 'private'
+
+    def getNameString(self):
+        className = self.name
+        if self.templateSpecialization:
+            className = className + \
+                '<%s>' % ', '.join([str(a) for a
+                                    in self.templateSpecialization])
+        return className
+
+    def declare(self):
+        result = ''
+        if self.templateArgs:
+            templateArgs = [str(a) for a in self.templateArgs]
+            templateArgs = templateArgs[len(self.templateSpecialization):]
+            result = result + self.indent + 'template <%s>\n' \
+                     % ','.join([str(a) for a in templateArgs])
+
+        type = 'struct' if self.isStruct else 'class'
+
+        if self.templateSpecialization:
+            specialization = \
+                '<%s>' % ', '.join([str(a) for a in self.templateSpecialization])
+        else:
+            specialization = ''
+
+        result = result + '%s%s %s%s' \
+                 % (self.indent, type, self.name, specialization)
+
+        if self.bases:
+            result = result + ' : %s' % ', '.join([d.declare(self) for d in self.bases])
+
+        result = result + '\n%s{\n' % self.indent
+
+        def declareMembers(cgClass, memberList, defaultVisibility, itemCount,
+                           separator=''):
+            members = { 'private': [], 'protected': [], 'public': [] }
+
+            for member in memberList:
+                members[member.visibility].append(member)
+
+
+            if defaultVisibility == 'public':
+                order = [ 'public', 'protected', 'private' ]
+            else:
+                order = [ 'private', 'protected', 'public' ]
+
+            result = ''
+
+            lastVisibility = defaultVisibility
+            for visibility in order:
+                list = members[visibility]
+                if list:
+                    if visibility != lastVisibility:
+                        if itemCount:
+                            result = result + '\n'
+                        result = result + visibility + ':\n'
+                        itemCount = 0
+                    for member in list:
+                        if itemCount == 0:
+                            result = result + '  '
+                        else:
+                            result = result + separator + '  '
+                        declaration = member.declare(cgClass)
+                        declaration = declaration.replace('\n', '\n  ')
+                        declaration = declaration.rstrip(' ')
+                        result = result + declaration
+                        itemCount = itemCount + 1
+                    lastVisibility = visibility
+            return (result, lastVisibility, itemCount)
+
+        order = [(self.enums, ''), (self.typedefs, ''), (self.members, ''),
+                 (self.methods, '\n')]
+
+        lastVisibility = self.defaultVisibility
+        itemCount = 0
+        for (memberList, separator) in order:
+            (memberString, lastVisibility, itemCount) = \
+                declareMembers(self, memberList, lastVisibility, itemCount,
+                               separator)
+            if self.indent:
+                memberString = self.indent + memberString
+                memberString = memberString.replace('\n', '\n' + self.indent)
+                memberString = memberString.rstrip(' ')
+            result = result + memberString
+
+        result = result + self.indent + '};\n\n'
+        return result
+
+    def define(self):
+        def defineMembers(cgClass, memberList, itemCount, separator=''):
+            result = ''
+            for member in memberList:
+                if itemCount != 0:
+                    result = result + separator
+                result = result + member.define(cgClass)
+                itemCount = itemCount + 1
+            return (result, itemCount)
+
+        order = [(self.members, '\n'), (self.methods, '\n')]
+
+        result = ''
+        itemCount = 0
+        for (memberList, separator) in order:
+            (memberString, itemCount) = defineMembers(self, memberList,
+                                                      itemCount, separator)
+            result = result + memberString
+        return result
+
+class CGPrototypeTraitsClass(CGClass):
+    def __init__(self, descriptor, indent=''):
+        templateArgs = [Argument('prototypes::ID', 'PrototypeID')]
+        templateSpecialization = ['prototypes::id::' + descriptor.name]
+        enums = [ClassEnum('', ['Depth'],
+                           [descriptor.interface.inheritanceDepth()])]
+        typedefs = [ClassTypedef('NativeClass', descriptor.nativeClass)]
+        CGClass.__init__(self, 'PrototypeTraits', indent=indent,
+                         templateArgs=templateArgs,
+                         templateSpecialization=templateSpecialization,
+                         enums=enums, typedefs=typedefs, isStruct=True)
+
+class CGPrototypeIDMapClass(CGClass):
+    def __init__(self, descriptor, indent=''):
+        templateArgs = [Argument('class', 'ConcreteClass')]
+        templateSpecialization = [descriptor.nativeClass]
+        enums = [ClassEnum('', ['PrototypeID'],
+                           ['prototypes::id::' + descriptor.name])]
+        CGClass.__init__(self, 'PrototypeIDMap', indent=indent,
+                         templateArgs=templateArgs,
+                         templateSpecialization=templateSpecialization,
+                         enums=enums, isStruct=True)
+
+class CGClassForwardDeclare(CGThing):
+    def __init__(self, name, isStruct=False):
+        CGThing.__init__(self)
+        self.name = name
+        self.isStruct = isStruct
+    def declare(self):
+        type = 'struct' if self.isStruct else 'class'
+        return '%s %s;\n' % (type, self.name)
+    def define(self):
+        # Header only
+        return ''
+
+def stripTrailingWhitespace(text):
+    lines = text.splitlines()
+    for i in range(len(lines)):
+        lines[i] = lines[i].rstrip()
+    return '\n'.join(lines)
+
+class CGDescriptor(CGThing):
+    def __init__(self, descriptor):
+        CGThing.__init__(self)
+
+        # XXXbholley - Not everything should actually have a jsclass.
+        cgThings = [CGNativeMethod(descriptor, m) for m in
+                    descriptor.interface.members if m.isMethod()]
+        cgThings.extend([CGNativeGetter(descriptor, a) for a in
+                         descriptor.interface.members if a.isAttr()])
+        cgThings.extend([CGNativeSetter(descriptor, a) for a in
+                         descriptor.interface.members if
+                         a.isAttr() and not a.readonly])
+
+        # Always have a finalize hook, regardless of whether the class wants a
+        # custom hook.
+        cgThings.append(CGClassFinalizeHook(descriptor))
+
+        # Only generate a trace hook if the class wants a custom hook.
+        if (descriptor.customTrace):
+            cgThings.append(CGClassTraceHook(descriptor))
+
+        if descriptor.interface.hasInterfaceObject:
+            cgThings.append(CGConstructorJSClass(descriptor))
+
+        cgThings.extend([CGDOMJSClass(descriptor),
+                         CGPrototypeJSClass(descriptor),
+                         CGCreateProtoObjectMethod(descriptor),
+                         CGIndenter(CGGetProtoObjectMethod(descriptor))])
+
+        cgThings = CGList(cgThings)
+        cgThings = CGWrapper(cgThings, post='\n')
+        self.cgRoot = CGWrapper(CGNamespace(descriptor.name, cgThings),
+                                post='\n')
+
+    def declare(self):
+        return self.cgRoot.declare()
+    def define(self):
+        return self.cgRoot.define()
+
+class CGNamespacedEnum(CGThing):
+    def __init__(self, namespace, enumName, names, values, comment=""):
+
+        if not values:
+            values = []
+
+        # Account for explicit enum values.
+        entries = []
+        for i in range(0, len(names)):
+            if len(values) > i and values[i] is not None:
+                entry = "%s = %s" % (names[i], values[i])
+            else:
+                entry = names[i]
+            entries.append(entry)
+
+        # Append a Count.
+        entries.append('_' + enumName + '_Count')
+
+        # Indent.
+        entries = ['  ' + e for e in entries]
+
+        # Buildthe enum body.
+        enumstr = comment + 'enum %s\n{\n%s\n};\n' % (enumName, ',\n'.join(entries))
+        curr = CGGeneric(declare=enumstr)
+
+        # Add some whitespace padding.
+        curr = CGWrapper(curr, pre='\n',post='\n')
+
+        # Add the namespace.
+        curr = CGNamespace(namespace, curr)
+
+        # Add the typedef
+        typedef = '\ntypedef %s::%s %s;\n\n' % (namespace, enumName, enumName)
+        curr = CGList([curr, CGGeneric(declare=typedef)])
+
+        # Save the result.
+        self.node = curr
+
+    def declare(self):
+        return self.node.declare()
+    def define(self):
+        assert False # Only for headers.
+
+class CGBindingRoot(CGThing):
+    """
+    Root codegen class for binding generation. Instantiate the class, and call
+    declare or define to generate header or cpp code (respectively).
+    """
+    def __init__(self, config, prefix, webIDLFile):
+        descriptors = config.getConcreteDescriptors(webIDLFile)
+
+        forwardDeclares = []
+
+        for x in descriptors:
+            nativeClass = x.nativeClass
+            components = x.nativeClass.split('::')
+            declare = CGClassForwardDeclare(components[-1])
+            if len(components) > 1:
+                declare = CGNamespace.build(components[:-1],
+                                            CGWrapper(declare, declarePre='\n',
+                                                      declarePost='\n'),
+                                            declareOnly=True)
+            forwardDeclares.append(CGWrapper(declare, declarePost='\n'))
+
+        forwardDeclares = CGList(forwardDeclares)
+
+        traitsClasses = [CGPrototypeTraitsClass(d) for d in descriptors]
+
+        # We must have a 1:1 mapping here, skip for prototypes that have more
+        # than one concrete class implementation.
+        traitsClasses.extend([CGPrototypeIDMapClass(d) for d in descriptors
+                              if d.uniqueImplementation])
+
+        # Wrap all of that in our namespaces.
+        traitsClasses = CGNamespace.build(['mozilla', 'dom', 'bindings'],
+                                     CGWrapper(CGList(traitsClasses), pre='\n'))
+        traitsClasses = CGWrapper(traitsClasses, post='\n')
+
+        # Do codegen for all the descriptors and enums.
+        cgthings = [CGWrapper(CGNamespace.build([e.identifier.name],
+                                                CGEnum(e)),
+                              post="\n") for e in config.getEnums(webIDLFile)]
+        cgthings.extend([CGDescriptor(x) for x
+                         in config.getConcreteDescriptors(webIDLFile)])
+        curr = CGList(cgthings)
+
+        # Wrap all of that in our namespaces.
+        curr = CGNamespace.build(['mozilla', 'dom', 'bindings', 'prototypes'],
+                                 CGWrapper(curr, pre="\n"))
+
+        curr = CGList([forwardDeclares, traitsClasses, curr])
+
+        # Add header includes.
+        curr = CGHeaders(config.getConcreteDescriptors(webIDLFile),
+                         ['DOMJSClass.h', 'Utils.h'], curr)
+
+        # Add include guards.
+        curr = CGIncludeGuard(prefix, curr)
+
+        # Add the auto-generated comment.
+        curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
+
+        # Store the final result.
+        self.root = curr
+
+    def declare(self):
+        return stripTrailingWhitespace(self.root.declare())
+    def define(self):
+        return stripTrailingWhitespace(self.root.define())
+
+
+class CGGlobalRoot():
+    """
+    Root codegen class for global code generation. Instantiate the class, and call
+
+    It's possible that we may at some point wish to generate more than just
+    PrototypeList.h. As such, this class eschews the define/declare
+    architecture (and thus does not inherit from CGThing).
+
+    To generate code, call the method associated with the target.
+    """
+    def __init__(self, config, prefix):
+        self.config = config
+        self.prefix = prefix
+
+    def prototypeList_h(self):
+
+        # Prototype ID enum.
+        protos = [d.name for d in self.config.getConcreteDescriptors()]
+        idEnum = CGNamespacedEnum('id', 'ID', protos, [0])
+
+        # Wrap all of that in our namespaces.
+        idEnum = CGNamespace.build(['mozilla', 'dom', 'bindings', 'prototypes'],
+                                   CGWrapper(idEnum, pre='\n'))
+        idEnum = CGWrapper(idEnum, post='\n')
+
+        traitsDecl = CGGeneric(declare="""
+template <prototypes::ID PrototypeID>
+struct PrototypeTraits;
+
+template <class ConcreteClass>
+struct PrototypeIDMap;
+""")
+
+        traitsDecl = CGNamespace.build(['mozilla', 'dom', 'bindings'],
+                                        CGWrapper(traitsDecl, post='\n'))
+
+        curr = CGList([idEnum, traitsDecl])
+
+        # Add include guards.
+        curr = CGIncludeGuard(self.prefix, curr)
+
+        # Add the auto-generated comment.
+        curr = CGWrapper(curr, pre=AUTOGENERATED_WARNING_COMMENT)
+
+        # Do header generation on the reuslt.
+        return stripTrailingWhitespace(curr.declare())
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Configuration.py
@@ -0,0 +1,119 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+autogenerated_comment = "/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n"
+
+class Configuration:
+    """
+    Represents global configuration state based on IDL parse data and
+    the configuration file.
+    """
+    def __init__(self, filename, parseData):
+
+        # Read the configuration file.
+        glbl = {}
+        execfile(filename, glbl)
+        config = glbl['DOMInterfaces']
+
+        # Build descriptors for all the interfaces we have in the parse data.
+        # This allows callers to specify a subset of interfaces by filtering
+        # |parseData|.
+        self.descriptors = []
+        self.interfaces = {}
+        for iface in parseData:
+            if not iface.isInterface(): continue
+            self.interfaces[iface.identifier.name] = iface
+            if iface.identifier.name not in config: continue
+            entry = config[iface.identifier.name]
+            if not isinstance(entry, list):
+                assert isinstance(entry, dict)
+                entry = [entry]
+            self.descriptors.extend([Descriptor(self, iface, x) for x in entry])
+
+        # Mark the descriptors for which only a single nativeClass implements
+        # an interface.
+        for descriptor in self.descriptors:
+            intefaceName = descriptor.interface.identifier.name
+            otherDescriptors = [d for d in self.descriptors
+                                if d.interface.identifier.name == intefaceName]
+            descriptor.uniqueImplementation = len(otherDescriptors) == 1
+
+        self.enums = [e for e in parseData if e.isEnum()]
+
+        # Keep the descriptor list sorted for determinism.
+        self.descriptors.sort(lambda x,y: cmp(x.name, y.name))
+
+    def getInterface(self, ifname):
+        return self.interfaces[ifname]
+    def getAllDescriptors(self, webIDLFile=None):
+        if not webIDLFile:
+            return self.descriptors
+        else:
+            return filter(lambda x: x.interface.filename() == webIDLFile, self.descriptors)
+    def getConcreteDescriptors(self, webIDLFile=None):
+        return filter(lambda x: x.concrete, self.getAllDescriptors(webIDLFile))
+    def getDescriptorsForInterface(self, iface):
+        return filter(lambda x: x.interface is iface, self.descriptors)
+
+    def getEnums(self, webIDLFile):
+        return filter(lambda e: e.filename() == webIDLFile, self.enums)
+
+class Descriptor:
+    """
+    Represents a single descriptor for an interface. See Bindings.conf.
+    """
+    def __init__(self, config, interface, desc):
+        self.config = config
+        self.interface = interface
+
+        # Read the desc, and fill in the relevant defaults.
+        self.concrete = desc['concrete']
+        self.workers = desc.get('workers', False)
+        self.nativeIsISupports = not self.workers
+        if self.concrete:
+            self.nativeClass = desc['nativeClass']
+            self.typeName = self.nativeClass
+        else:
+            self.nativeInterface = desc.get('nativeInterface', 'XXXFillMeInbz!')
+            self.typeName = self.nativeInterface
+
+        headerDefault = self.typeName
+        headerDefault = headerDefault.split("::")[-1] + ".h"
+        self.headerFile = desc.get('headerFile', headerDefault)
+
+        self.customTrace = desc.get('customTrace', self.workers)
+        self.customFinalize = desc.get('customFinalize', self.workers)
+
+        def make_name(name):
+            return name + "_workers" if self.workers else name
+        self.name = make_name(interface.identifier.name)
+
+        infallible = desc.get('infallible', [])
+        if not isinstance(infallible, list):
+            assert isinstance(infallible, string)
+            infallible = [infallible]
+        self.infallible = infallible
+
+        # Build the prototype chain.
+        self.prototypeChain = []
+        parent = interface
+        while parent:
+            self.prototypeChain.insert(0, make_name(parent.identifier.name))
+            parent = parent.parent
+
+    def getDescriptor(self, interfaceName):
+        """
+        Gets the appropriate descriptor for the given interface name given the
+        context of the current descriptor. This selects the appropriate
+        implementation for cases like workers.
+        """
+        iface = self.config.getInterface(interfaceName)
+        descriptors = self.config.getDescriptorsForInterface(iface)
+
+        # The only filter we currently have is workers vs non-workers.
+        matches = filter(lambda x: x.workers is self.workers, descriptors)
+
+        # After filtering, we should have exactly one result.
+        assert len(matches) is 1
+        return matches[0]
new file mode 100644
--- /dev/null
+++ b/dom/bindings/DOMJSClass.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_bindings_DOMJSClass_h
+#define mozilla_dom_bindings_DOMJSClass_h
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+
+#include "mozilla/dom/bindings/PrototypeList.h" // auto-generated
+
+#define DOM_OBJECT_SLOT 0
+
+namespace mozilla {
+namespace dom {
+namespace bindings {
+
+/*
+ * Special JSClass for reflected DOM objects.
+ */
+struct DOMJSClass
+{
+  // It would be nice to just inherit from JSClass, but that precludes pure
+  // compile-time initialization of the form |DOMJSClass = {...};|, since C++
+  // only allows brace initialization for aggregate/POD types.
+  JSClass mBase;
+
+  // A list of interfaces that this object implements, in order of decreasing
+  // derivedness.
+  const prototypes::ID mInterfaceChain[prototypes::id::_ID_Count];
+
+  // We cache the VTable index of GetWrapperCache for objects that support it.
+  //
+  // -1 indicates that GetWrapperCache is not implemented on the underlying object.
+  const int16_t mGetWrapperCacheVTableOffset;
+
+  // We store the DOM object in reserved slot DOM_OBJECT_SLOT.
+  // Sometimes it's an nsISupports and sometimes it's not; this class tells
+  // us which it is.
+  const bool mDOMObjectIsISupports;
+
+  static DOMJSClass* FromJSClass(JSClass *base) {
+    MOZ_ASSERT(base->flags & JSCLASS_IS_DOMJSCLASS);
+    return reinterpret_cast<DOMJSClass*>(base);
+  }
+  static const DOMJSClass* FromJSClass(const JSClass *base) {
+    MOZ_ASSERT(base->flags & JSCLASS_IS_DOMJSCLASS);
+    return reinterpret_cast<const DOMJSClass*>(base);
+  }
+
+  static DOMJSClass* FromJSClass(js::Class *base) {
+    return FromJSClass(Jsvalify(base));
+  }
+  static const DOMJSClass* FromJSClass(const js::Class *base) {
+    return FromJSClass(Jsvalify(base));
+  }
+
+  JSClass* ToJSClass() { return &mBase; }
+};
+
+} // namespace bindings
+} // namespace dom
+} // namespace mozilla
+
+#endif /* mozilla_dom_bindings_DOMJSClass_h */
new file mode 100644
--- /dev/null
+++ b/dom/bindings/GlobalGen.py
@@ -0,0 +1,63 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# We do one global pass over all the WebIDL to generate our prototype enum
+# and generate information for subsequent phases.
+
+import os
+import cStringIO
+import WebIDL
+import cPickle
+from Configuration import *
+from Codegen import CGGlobalRoot, replaceFileIfChanged
+
+def generate_prototype_list(config, outputprefix):
+
+    filename = outputprefix + '.h'
+    root = CGGlobalRoot(config, outputprefix)
+    if replaceFileIfChanged(filename, root.prototypeList_h()):
+        print "Generating prototype list: %s" % (filename)
+    else:
+        print "Prototype list hasn't changed - not touching %s" % (filename)
+
+def main():
+
+    # Parse arguments.
+    from optparse import OptionParser
+    usageString = "usage: %prog [options] webidldir [files]"
+    o = OptionParser(usage=usageString)
+    o.add_option("--verbose-errors", action='store_true', default=False,
+                 help="When an error happens, display the Python traceback.")
+    (options, args) = o.parse_args()
+
+    if len(args) < 2:
+        o.error(usageString)
+
+    configFile = args[0]
+    baseDir = args[1]
+    fileList = args[2:]
+
+    # Parse the WebIDL.
+    parser = WebIDL.Parser()
+    for filename in fileList:
+        fullPath = os.path.normpath(os.path.join(baseDir, filename))
+        f = open(fullPath, 'r')
+        lines = f.readlines()
+        f.close()
+        parser.parse(''.join(lines), fullPath)
+    parserResults = parser.finish()
+
+    # Write the parser results out to a pickle.
+    resultsFile = open('ParserResults.pkl', 'wb')
+    cPickle.dump(parserResults, resultsFile, -1)
+    resultsFile.close()
+
+    # Load the configuration.
+    config = Configuration(configFile, parserResults)
+
+    # Generate the prototype list header.
+    generate_prototype_list(config, "PrototypeList")
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Makefile.in
@@ -0,0 +1,90 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# # License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# # You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DEPTH            = ../..
+topsrcdir        = @top_srcdir@
+srcdir           = @srcdir@
+VPATH            = @srcdir@
+
+MODULE           = dom
+LIBRARY_NAME     = dombindings_s
+LIBXUL_LIBRARY   = 1
+FORCE_STATIC_LIB = 1
+
+include $(topsrcdir)/config/config.mk
+
+# Need this to find all our DOM source files.
+include $(topsrcdir)/dom/dom-config.mk
+
+include $(topsrcdir)/dom/webidl/WebIDL.mk
+
+binding_include_path := mozilla/dom/bindings
+binding_header_files := $(subst .webidl,Binding.h,$(webidl_files))
+binding_cpp_files := $(subst .webidl,Binding.cpp,$(webidl_files))
+
+CPPSRCS = \
+  $(binding_cpp_files) \
+  Utils.cpp \
+  $(NULL)
+
+EXPORTS_NAMESPACES = $(binding_include_path)
+
+EXPORTS_$(binding_include_path) = \
+  DOMJSClass.h \
+  PrototypeList.h \
+  Utils.h \
+  $(binding_header_files) \
+  $(NULL)
+
+LOCAL_INCLUDES += -I$(topsrcdir)/js/xpconnect/src
+
+include $(topsrcdir)/config/rules.mk
+
+bindinggen_dependencies := \
+  BindingGen.py \
+  Bindings.conf \
+  Configuration.py \
+  Codegen.py \
+  ParserResults.pkl \
+  $(NULL)
+
+$(binding_header_files): %Binding.h: $(bindinggen_dependencies) \
+                                     $(webidl_base)/%.webidl \
+                                     $(NULL)
+	$(PYTHON) $(topsrcdir)/config/pythonpath.py \
+    -I$(topsrcdir)/other-licenses/ply -I$(srcdir)/parser \
+    $(srcdir)/BindingGen.py header $(srcdir)/Bindings.conf $*Binding \
+    $(webidl_base)/$*.webidl
+
+$(binding_cpp_files): %Binding.cpp: $(bindinggen_dependencies) \
+                                    $(webidl_base)/%.webidl \
+                                    $(NULL)
+	$(PYTHON) $(topsrcdir)/config/pythonpath.py \
+    -I$(topsrcdir)/other-licenses/ply -I$(srcdir)/parser \
+    $(srcdir)/BindingGen.py cpp $(srcdir)/Bindings.conf $*Binding \
+    $(webidl_base)/$*.webidl
+
+PrototypeList.h: ParserResults.pkl
+
+globalgen_dependencies := \
+  GlobalGen.py \
+  Bindings.conf \
+  Configuration.py \
+  Codegen.py \
+  $(NULL)
+
+ParserResults.pkl: $(globalgen_dependencies) \
+                   $(addprefix $(webidl_base)/, $(webidl_files))
+	$(PYTHON_PATH) -I$(topsrcdir)/other-licenses/ply -I$(srcdir)/parser \
+    $(srcdir)/GlobalGen.py $(srcdir)/Bindings.conf $(webidl_base) \
+    $(webidl_files)
+
+GARBAGE += \
+  $(binding_header_files) \
+  $(binding_cpp_files) \
+  PrototypeList.h \
+  ParserResults.pkl \
+  webidlyacc.py \
+  parser.out \
+  $(NULL)
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Utils.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* vim: set ts=2 sw=2 et tw=79: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Utils.h"
+
+namespace mozilla {
+namespace dom {
+namespace bindings {
+
+JSObject*
+CreateProtoObject(JSContext *cx, JSObject *parentProto,
+                  JSClass *protoClass,
+                  JSClass *constructorClass,
+                  JSFunctionSpec *methods,
+                  JSPropertySpec *properties,
+                  JSObject *global,
+                  const char* name)
+{
+  MOZ_ASSERT(bool(name) == bool(constructorClass),
+             "Must have name precisely when we have an interface object");
+
+  JSObject* ourProto = JS_NewObject(cx, protoClass, parentProto, global);
+  if (!ourProto) {
+    return NULL;
+  }
+
+  JSObject* functionProto = JS_GetFunctionPrototype(cx, global);
+  if (!functionProto) {
+    return NULL;
+  }
+
+  if (methods && !JS_DefineFunctions(cx, ourProto, methods)) {
+    return NULL;
+  }
+
+  if (properties && !JS_DefineProperties(cx, ourProto, properties)) {
+    return NULL;
+  }
+
+  if (constructorClass) {
+    JSObject* constructor =
+      JS_NewObject(cx, constructorClass, functionProto, global);
+    if (!constructor) {
+      return NULL;
+    }
+
+    if (!JS_LinkConstructorAndPrototype(cx, constructor, ourProto)) {
+      return NULL;
+    }
+
+    if (!JS_DefineProperty(cx, global, name, OBJECT_TO_JSVAL(constructor),
+                           NULL, NULL, 0)) {
+      return NULL;
+    }
+  }
+
+  return ourProto;
+}
+
+template <class T>
+bool
+HandleNewBindingWrappingFailure(JSContext *cx, JSObject *scope,
+                                T *value, jsval *vp)
+{
+  if (JS_IsExceptionPending(cx)) {
+    return false;
+  }
+
+  XPCLazyCallContext lccx(JS_CALLER, cx, scope);
+
+  nsCOMPtr<nsISupports> canonical;
+  if (NS_SUCCEEDED(CallQueryInterface(value, getter_AddRefs(canonical)))) {
+    xpcObjectHelper helper(canonical);
+    nsresult rv;
+    if (XPCConvert::NativeInterface2JSObject(lccx, vp, NULL, helper, NULL, NULL,
+                                             true, OBJ_IS_NOT_GLOBAL, &rv)) {
+      return true;
+    }
+    
+    if (!JS_IsExceptionPending(cx)) {
+      return Throw(cx, rv);
+    }
+  }
+
+  return Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);
+}
+
+} // namespace bindings
+} // namespace dom
+} // namespace mozilla
+
new file mode 100644
--- /dev/null
+++ b/dom/bindings/Utils.h
@@ -0,0 +1,272 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
+/* vim: set ts=2 sw=2 et tw=79: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_bindings_Utils_h__
+#define mozilla_dom_bindings_Utils_h__
+
+#include "mozilla/dom/bindings/DOMJSClass.h"
+
+#include "jsapi.h"
+
+#include "XPCQuickStubs.h"
+#include "XPCWrapper.h"
+
+/* All DOM globals must have a slot at DOM_PROTOTYPE_SLOT */
+#define DOM_PROTOTYPE_SLOT (JSCLASS_GLOBAL_SLOT_COUNT + 1)
+
+namespace mozilla {
+namespace dom {
+namespace bindings {
+
+inline bool
+Throw(JSContext* cx, nsresult rv)
+{
+  // XXX Introduce exception machinery.
+  JS_ReportError(cx, "Exception thrown (nsresult = %x).", rv);
+  return false;
+}
+
+inline bool
+ThrowMethodFailedWithDetails(JSContext* cx, nsresult rv,
+                             const char* /* ifaceName */,
+                             const char* /* memberName */)
+{
+  return Throw(cx, rv);
+}
+
+inline bool
+IsDOMClass(JSClass *clasp)
+{
+  return clasp->flags & JSCLASS_IS_DOMJSCLASS;
+}
+
+template <class T>
+inline T*
+UnwrapDOMObject(JSObject *obj)
+{
+  MOZ_ASSERT(IsDOMClass(js::GetObjectJSClass(obj)));
+  return static_cast<T*>(js::GetReservedSlot(obj,
+                                             DOM_OBJECT_SLOT).toPrivate());
+}
+
+template <prototypes::ID PrototypeID, class T>
+inline nsresult
+UnwrapObject(JSContext *cx, JSObject *obj, T **value)
+{
+  /* First check to see whether we have a DOM object */
+  JSClass *clasp = js::GetObjectJSClass(obj);
+  if (!IsDOMClass(clasp)) {
+    /* Maybe we have a security wrapper or outer window? */
+    if (!js::IsWrapper(obj)) {
+      /* Not a DOM object, not a wrapper, just bail */
+      return NS_ERROR_XPC_BAD_CONVERT_JS;
+    }
+
+    obj = XPCWrapper::Unwrap(cx, obj, false);
+    if (!obj) {
+      return NS_ERROR_XPC_SECURITY_MANAGER_VETO;
+    }
+    MOZ_ASSERT(!js::IsWrapper(obj));
+    clasp = js::GetObjectJSClass(obj);
+    if (!IsDOMClass(clasp)) {
+      /* We don't have a DOM object */
+      return NS_ERROR_XPC_BAD_CONVERT_JS;
+    }
+  }
+
+  MOZ_ASSERT(IsDOMClass(clasp));
+
+  /* This object is a DOM object.  Double-check that it is safely
+     castable to T by checking whether it claims to inherit from the
+     class identified by protoID. */
+  DOMJSClass *domClass = DOMJSClass::FromJSClass(clasp);
+  if (domClass->mInterfaceChain[PrototypeTraits<PrototypeID>::Depth] ==
+      PrototypeID) {
+    *value = UnwrapDOMObject<T>(obj);
+    return NS_OK;
+  }
+
+  /* It's the wrong sort of DOM object */
+  return NS_ERROR_XPC_BAD_CONVERT_JS;
+}
+
+template <class T>
+inline nsresult
+UnwrapObject(JSContext *cx, JSObject *obj, T **value)
+{
+  return UnwrapObject<static_cast<prototypes::ID>(
+           PrototypeIDMap<T>::PrototypeID)>(cx, obj, value);
+}
+
+inline JSObject **
+GetProtoArray(JSObject *global)
+{
+  MOZ_ASSERT(js::GetObjectClass(global)->flags & JSCLASS_DOM_GLOBAL);
+  return static_cast<JSObject**>(
+    js::GetReservedSlot(global, DOM_PROTOTYPE_SLOT).toPrivate());
+}
+
+inline void
+AllocateProtoCache(JSObject *obj)
+{
+  MOZ_ASSERT(js::GetObjectClass(obj)->flags & JSCLASS_DOM_GLOBAL);
+  // Important: The () at the end ensure zero-initialization
+  JSObject** protoArray = new JSObject*[prototypes::id::_ID_Count]();
+  js::SetReservedSlot(obj, DOM_PROTOTYPE_SLOT, JS::PrivateValue(protoArray));
+}
+
+inline void
+DestroyProtoCache(JSObject *obj)
+{
+  JSObject **protoArray = GetProtoArray(obj);
+  delete [] protoArray;
+}
+
+/*
+ * Create a DOM prototype object.
+ *
+ * parentProto is the prototype our new object should use.
+ * protoClass is the JSClass our new object should use.
+ * constructorClass is the class to use for the corresponding interface object.
+ *                  This is null if we should not create an interface object.
+ * methods and properties are to be defined on the prototype; these arguments
+ *                        are allowed to be null if there are no methods or
+ *                        properties respectively.
+ * If constructorClass is non-null, The resulting constructor object will be
+ * defined on the given global with property name |name|, which must also be
+ * non-null.
+ *
+ * The return value is the newly-created prototype object.
+ */
+JSObject*
+CreateProtoObject(JSContext *cx, JSObject *parentProto,
+                  JSClass *protoClass,
+                  JSClass *constructorClass,
+                  JSFunctionSpec *methods,
+                  JSPropertySpec *properties,
+                  JSObject *global,
+                  const char* name);
+
+template <class T>
+inline bool
+WrapNewBindingObject(JSContext *cx, JSObject *scope, T *value, jsval *vp)
+{
+  JSObject *obj = value->GetWrapper();
+  if (obj && js::GetObjectCompartment(obj) == js::GetObjectCompartment(scope)) {
+    *vp = JS::ObjectValue(*obj);
+    return true;
+  }
+
+  // XXXbz figure out the actual security wrapper story here
+  bool triedToWrap;
+  obj = value->WrapObject(cx, scope, &triedToWrap);
+  if (obj) {
+    *vp = JS::ObjectValue(*obj);
+    return true;
+  }
+
+  // At this point, obj is null, so just return false.  We could try to
+  // communicate triedToWrap to the caller, but in practice callers seem to be
+  // testing JS_IsExceptionPending(cx) to figure out whether WrapObject() threw
+  // instead.
+  return false;
+}
+
+// Helper for smart pointers (nsAutoPtr/nsRefPtr/nsCOMPtr).
+template <template <class> class SmartPtr, class T>
+inline bool
+WrapNewBindingObject(JSContext *cx, JSObject *scope, const SmartPtr<T>& value,
+                     jsval *vp)
+{
+  return WrapNewBindingObject(cx, scope, value.get(), vp);
+}
+
+/**
+ * A method to handle new-binding wrap failure, by possibly falling back to
+ * wrapping as a non-new-binding object.
+ */
+template <class T>
+bool
+HandleNewBindingWrappingFailure(JSContext *cx, JSObject *scope,
+                                T *value, jsval *vp);
+
+// Helper for smart pointers (nsAutoPtr/nsRefPtr/nsCOMPtr).
+template <template <class> class SmartPtr, class T>
+inline bool
+HandleNewBindingWrappingFailure(JSContext *cx, JSObject *scope,
+                                const SmartPtr<T>& value, jsval *vp)
+{
+  return HandleNewBindingWrappingFailure(cx, scope, value.get(), vp);
+}
+
+// Support for nullable types
+template <typename T>
+struct Nullable
+{
+private:
+  T mValue;
+  bool mIsNull;
+
+public:
+  Nullable() :
+    mIsNull(true)
+  {}
+
+  Nullable(T aValue) :
+    mValue(aValue), mIsNull(false)
+  {}
+
+  void SetValue(T aValue) {
+    mValue = aValue;
+    mIsNull = false;
+  }
+
+  void SetNull() {
+    mIsNull = true;
+  }
+
+  T Value() {
+    MOZ_ASSERT(!mIsNull);
+    return mValue;
+  }
+
+  bool IsNull() {
+    return mIsNull;
+  }
+};
+
+inline int
+FindEnumStringIndex(JSContext *cx, jsval v, const char** values, bool *ok)
+{
+  JSString* str = JS_ValueToString(cx, v);
+  if (!str) {
+    *ok = false;
+    return 0;
+  }
+  JS::Anchor<JSString*> anchor(str);
+  
+  for (int i = 0; values[i]; ++i) {
+    JSBool equal;
+    if (!JS_StringEqualsAscii(cx, str, values[i], &equal)) {
+      *ok = false;
+      return 0;
+    }
+
+    if (equal) {
+      *ok = true;
+      return i;
+    }
+  }
+
+  *ok = Throw(cx, NS_ERROR_XPC_BAD_CONVERT_JS);
+  return 0;
+}
+
+} // namespace bindings
+} // namespace dom
+} // namespace mozilla
+
+#endif /* mozilla_dom_bindings_Utils_h__ */
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/README
@@ -0,0 +1,1 @@
+A WebIDL parser written in Python to be used in Mozilla.
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/UPSTREAM
@@ -0,0 +1,1 @@
+http://dev.w3.org/cvsweb/~checkout~/2006/webapi/WebIDL/Overview.html?rev=1.409;content-type=text%2Fhtml%3b+charset=utf-8
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/WebIDL.py
@@ -0,0 +1,2506 @@
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is WebIDL Parser.
+#
+# The Initial Developer of the Original Code is
+# the Mozilla Foundation.
+# Portions created by the Initial Developer are Copyright (C) 2011
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Kyle Huey <me@kylehuey.com>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+""" A WebIDL parser. """
+
+from ply import lex, yacc
+import re
+
+# Machinery
+
+def parseInt(literal):
+    string = literal
+    sign = 0
+    base = 0
+
+    if string[0] == '-':
+        sign = -1
+        string = string[1:]
+    else:
+        sign = 1
+
+    if string[0] == '0' and len(string) > 1:
+        if string[1] == 'x' or string[1] == 'X':
+            base = 16
+            string = string[2:]
+        else:
+            base = 8
+            string = string[1:]
+    else:
+        base = 10
+
+    value = int(string, base)
+    return value * sign
+
+# Magic for creating enums
+def M_add_class_attribs(attribs):
+    def foo(name, bases, dict_):
+        for v, k in attribs:
+            dict_[k] = v
+        return type(name, bases, dict_)
+    return foo
+
+def enum(*names):
+    class Foo(object):
+        __metaclass__ = M_add_class_attribs(enumerate(names))
+        def __setattr__(self, name, value):  # this makes it read-only
+            raise NotImplementedError
+    return Foo()
+
+class WebIDLError(Exception):
+    def __init__(self, message, location, warning=False):
+        self.message = message
+        self.location = location
+        self.warning = warning
+
+    def __str__(self):
+        return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
+                               self.message, ", " if self.location else "",
+                               self.location)
+
+class Location(object):
+    _line = None
+
+    def __init__(self, lexer, lineno, lexpos, filename):
+        self._lineno = lineno
+        self._lexpos = lexpos
+        self._lexdata = lexer.lexdata
+        self._file = filename if filename else "<unknown>"
+
+    def __eq__(self, other):
+        return self._lexpos == other._lexpos and \
+               self._file == other._file
+
+    def resolve(self):
+        if self._line:
+            return
+
+        startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
+        endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
+        self._line = self._lexdata[startofline:endofline]
+        self._colno = self._lexpos - startofline
+
+    def pointerline(self):
+        def i():
+            for i in xrange(0, self._colno):
+                yield " "
+            yield "^"
+
+        return "".join(i())
+
+    def get(self):
+        self.resolve()
+        return "%s line %s:%s" % (self._file, self._lineno, self._colno)
+
+    def __str__(self):
+        self.resolve()
+        return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
+                                          self._line, self.pointerline())
+
+class BuiltinLocation(object):
+    def __init__(self, text):
+        self.msg = text
+
+    def get(self):
+        return self.msg
+
+    def __str__(self):
+        return self.get()
+
+
+# Data Model
+
+class IDLObject(object):
+    def __init__(self, location):
+        self.location = location
+
+    def filename(self):
+        return self.location._file
+
+    def isInterface(self):
+        return False
+
+    def isEnum(self):
+        return False
+
+    def isCallback(self):
+        return False
+
+    def isType(self):
+        return False
+
+    def addExtendedAttributes(self, attrs):
+        assert False # Override me!
+
+class IDLScope(IDLObject):
+    def __init__(self, location, parentScope, identifier):
+        IDLObject.__init__(self, location)
+
+        self.parentScope = parentScope
+        if identifier:
+            assert isinstance(identifier, IDLIdentifier)
+            self._name = identifier
+        else:
+            self._name = None
+
+        self._dict = {}
+
+    def __str__(self):
+        return self.QName()
+
+    def QName(self):
+        if self._name:
+            return self._name.QName() + "::"
+        return "::"
+
+    def ensureUnique(self, identifier, object):
+        assert isinstance(identifier, IDLUnresolvedIdentifier)
+        assert not object or isinstance(object, IDLObjectWithIdentifier)
+        assert not object or object.identifier == identifier
+
+        if identifier.name in self._dict:
+            if not object:
+                return
+
+            # ensureUnique twice with the same object is not allowed
+            assert object != self._dict[identifier.name]
+
+            replacement = self.resolveIdentifierConflict(self, identifier,
+                                                         self._dict[identifier.name],
+                                                         object)
+            self._dict[identifier.name] = replacement
+            return
+
+        assert object
+
+        self._dict[identifier.name] = object
+
+    def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
+        # Default to throwing, derived classes can override.
+        conflictdesc = "\n\t%s at %s\n\t%s at %s" % \
+          (originalObject, originalObject.location, newObject, newObject.location)
+
+        raise WebIDLError(
+            "Multiple unresolvable definitions of identifier '%s' in scope '%s%s"
+            % (identifier.name, str(self), conflictdesc), "")
+
+    def _lookupIdentifier(self, identifier):
+        return self._dict[identifier.name]
+
+    def lookupIdentifier(self, identifier):
+        assert isinstance(identifier, IDLIdentifier)
+        assert identifier.scope == self
+        return self._lookupIdentifier(identifier)
+
+class IDLIdentifier(IDLObject):
+    def __init__(self, location, scope, name):
+        IDLObject.__init__(self, location)
+
+        self.name = name
+        assert isinstance(scope, IDLScope)
+        self.scope = scope
+
+    def __str__(self):
+        return self.QName()
+
+    def QName(self):
+        return self.scope.QName() + self.name
+
+    def __hash__(self):
+        return self.QName().__hash__()
+
+    def __eq__(self, other):
+        return self.QName == other.QName()
+
+    def object(self):
+        return self.scope.lookupIdentifier(self)
+
+class IDLUnresolvedIdentifier(IDLObject):
+    def __init__(self, location, name, allowDoubleUnderscore = False):
+        IDLObject.__init__(self, location)
+
+        assert len(name) > 0
+
+        if name[:2] == "__" and not allowDoubleUnderscore:
+            raise WebIDLError("Identifiers beginning with __ are reserved",
+                              location)
+        if name[0] == '_' and not allowDoubleUnderscore:
+            name = name[1:]
+        if name in ["prototype", "constructor", "toString"]:
+            raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
+                              location)
+
+        self.name = name
+
+    def __str__(self):
+        return self.QName()
+
+    def QName(self):
+        return "<unresolved scope>::" + self.name
+
+    def resolve(self, scope, object):
+        assert isinstance(scope, IDLScope)
+        assert not object or isinstance(object, IDLObjectWithIdentifier)
+        assert not object or object.identifier == self
+
+        scope.ensureUnique(self, object)
+
+        identifier = IDLIdentifier(self.location, scope, self.name)
+        if object:
+            object.identifier = identifier
+        return identifier
+
+class IDLObjectWithIdentifier(IDLObject):
+    def __init__(self, location, parentScope, identifier):
+        IDLObject.__init__(self, location)
+
+        assert isinstance(identifier, IDLUnresolvedIdentifier)
+
+        self.identifier = identifier
+
+        if parentScope:
+            self.resolve(parentScope)
+
+    def resolve(self, parentScope):
+        assert isinstance(parentScope, IDLScope)
+        assert isinstance(self.identifier, IDLUnresolvedIdentifier)
+        self.identifier.resolve(parentScope, self)
+
+class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
+    def __init__(self, location, parentScope, identifier):
+        assert isinstance(identifier, IDLUnresolvedIdentifier)
+
+        IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
+        IDLScope.__init__(self, location, parentScope, self.identifier)
+
+class IDLParentPlaceholder(IDLObjectWithIdentifier):
+    def __init__(self, location, identifier):
+        assert isinstance(identifier, IDLUnresolvedIdentifier)
+        IDLObjectWithIdentifier.__init__(self, location, None, identifier)
+
+    def finish(self, scope):
+        try:
+            scope._lookupIdentifier(self.identifier)
+        except:
+            raise WebIDLError("Unresolved type '%s'." % self.identifier, self.location)
+
+        iface = self.identifier.resolve(scope, None)
+        return scope.lookupIdentifier(iface)
+
+class IDLInterface(IDLObjectWithScope):
+    def __init__(self, location, parentScope, name, parent, members):
+        assert isinstance(parentScope, IDLScope)
+        assert isinstance(name, IDLUnresolvedIdentifier)
+        assert not parent or isinstance(parent, IDLParentPlaceholder)
+
+        self.parent = parent
+        self._callback = False
+
+        self.members = list(members) # clone the list
+        assert iter(self.members) # Assert it's iterable
+
+        IDLObjectWithScope.__init__(self, location, parentScope, name)
+
+    def __str__(self):
+        return "Interface '%s'" % self.identifier.name
+
+    def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
+        assert isinstance(scope, IDLScope)
+        assert isinstance(originalObject, IDLInterfaceMember)
+        assert isinstance(newObject, IDLInterfaceMember)
+
+        if originalObject.tag != IDLInterfaceMember.Tags.Method or \
+           newObject.tag != IDLInterfaceMember.Tags.Method:
+            # Call the base class method, which will throw
+            IDLScope.resolveIdentifierConflict(self, identifier, originalObject,
+                                               newObject)
+            assert False # Not reached
+
+        retval = originalObject.addOverload(newObject)
+        self.members.remove(newObject)
+        return retval
+
+    def finish(self, scope):
+        assert not self.parent or isinstance(self.parent, IDLParentPlaceholder)
+        parent = self.parent.finish(scope) if self.parent else None
+        assert not parent or isinstance(parent, IDLInterface)
+
+        self.parent = parent
+
+        assert iter(self.members)
+
+        if self.parent:
+            assert iter(self.parent.members)
+
+            members = list(self.parent.members)
+            members.extend(self.members)
+            self.members = members
+
+        for member in self.members:
+            member.finish(scope)
+
+        SpecialType = enum(
+            'NamedGetter',
+            'NamedSetter',
+            'NamedCreator',
+            'NamedDeleter',
+            'IndexedGetter',
+            'IndexedSetter',
+            'IndexedCreator',
+            'IndexedDeleter'
+        )
+
+        specialMembersSeen = [False for i in range(8)]
+
+        members = list(self.members)
+        for member in members:
+            # Resolve members in our scope (that were not previously resolved!)
+            if not self.parent or not member in self.parent.members:
+                member.resolve(self)
+
+            if member.tag == IDLInterfaceMember.Tags.Method:
+                if member.isGetter():
+                    if member.isNamed():
+                        if specialMembersSeen[SpecialType.NamedGetter]:
+                            raise WebIDLError("Multiple named getters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.NamedGetter] = True
+                    else:
+                        assert member.isIndexed()
+                        if specialMembersSeen[SpecialType.IndexedGetter]:
+                            raise WebIDLError("Multiple indexed getters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.IndexedGetter] = True
+                if member.isSetter():
+                    if member.isNamed():
+                        if specialMembersSeen[SpecialType.NamedSetter]:
+                            raise WebIDLError("Multiple named setters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.NamedSetter] = True
+                    else:
+                        assert member.isIndexed()
+                        if specialMembersSeen[SpecialType.IndexedSetter]:
+                            raise WebIDLError("Multiple indexed setters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.IndexedSetter] = True
+                if member.isCreator():
+                    if member.isNamed():
+                        if specialMembersSeen[SpecialType.NamedCreator]:
+                            raise WebIDLError("Multiple named creators on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.NamedCreator] = True
+                    else:
+                        assert member.isIndexed()
+                        if specialMembersSeen[SpecialType.IndexedCreator]:
+                            raise WebIDLError("Multiple indexed creators on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.IndexedCreator] = True
+                if member.isDeleter():
+                    if member.isNamed():
+                        if specialMembersSeen[SpecialType.NamedDeleter]:
+                            raise WebIDLError("Multiple named deleters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.NamedDeleter] = True
+                    else:
+                        assert member.isIndexed()
+                        if specialMembersSeen[SpecialType.IndexedDeleter]:
+                            raise WebIDLError("Multiple indexed Deleters on %s" % (self),
+                                              self.location)
+                        specialMembersSeen[SpecialType.IndexedDeleter] = True
+
+    def isInterface(self):
+        return True
+
+    def setCallback(self, value):
+        self._callback = value
+
+    def isCallback(self):
+        return self._callback
+
+    def addExtendedAttributes(self, attrs):
+        pass
+
+    def inheritanceDepth(self):
+        depth = 0
+        parent = self.parent
+        while parent:
+            depth = depth + 1
+            parent = parent.parent
+        return depth
+
+    def hasInterfaceObject(self):
+        return True
+
+class IDLEnum(IDLObjectWithIdentifier):
+    def __init__(self, location, parentScope, name, values):
+        assert isinstance(parentScope, IDLScope)
+        assert isinstance(name, IDLUnresolvedIdentifier)
+
+        if len(values) != len(set(values)):
+            raise WebIDLError("Enum %s has multiple identical strings" % name.name, location)
+
+        IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
+        self._values = values
+
+    def values(self):
+        return self._values
+
+    def finish(self, scope):
+        pass
+
+    def isEnum(self):
+        return True
+
+    def addExtendedAttributes(self, attrs):
+        assert len(attrs) == 0
+
+class IDLType(IDLObject):
+    Tags = enum(
+        # The integer types
+        'int8',
+        'uint8',
+        'int16',
+        'uint16',
+        'int32',
+        'uint32',
+        'int64',
+        'uint64',
+        # Additional primitive types
+        'bool',
+        'float',
+        'double',
+        # Other types
+        'any',
+        'domstring',
+        'object',
+        'date',
+        'void',
+        # Funny stuff
+        'interface',
+        'dictionary',
+        'enum'
+        )
+
+    def __init__(self, location, name):
+        IDLObject.__init__(self, location)
+        self.name = name
+        self.builtin = False
+
+    def __eq__(self, other):
+        return other and self.name == other.name and self.builtin == other.builtin
+
+    def __str__(self):
+        return str(self.name)
+
+    def isType(self):
+        return True
+
+    def nullable(self):
+        return False
+
+    def isPrimitive(self):
+        return False
+
+    def isString(self):
+        return False
+
+    def isVoid(self):
+        return self.name == "Void"
+
+    def isSequence(self):
+        return False
+
+    def isArray(self):
+        return False
+
+    def isArrayBuffer(self):
+        return False
+
+    def isDictionary(self):
+        return False
+
+    def isInterface(self):
+        return False
+
+    def isComplete(self):
+        return True
+
+    def tag(self):
+        assert False # Override me!
+
+    def addExtendedAttributes(self, attrs):
+        assert len(attrs) == 0
+
+class IDLUnresolvedType(IDLType):
+    """
+        Unresolved types are interface types 
+    """
+
+    def __init__(self, location, name):
+        IDLType.__init__(self, location, name)
+
+    def isComplete(self):
+        return False
+
+    def complete(self, scope):
+        obj = None
+        try:
+            obj = scope._lookupIdentifier(self.name)
+        except:
+            raise WebIDLError("Unresolved type '%s'." % self.name, self.location)
+
+        assert obj
+        if obj.isType():
+            return obj
+
+        name = self.name.resolve(scope, None)
+        return IDLWrapperType(self.location, obj)
+
+class IDLNullableType(IDLType):
+    def __init__(self, location, innerType):
+        assert not innerType.isVoid()
+        assert not innerType.nullable()
+        assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
+
+        IDLType.__init__(self, location, innerType.name)
+        self.inner = innerType
+        self.builtin = False
+
+    def __eq__(self, other):
+        return isinstance(other, IDLNullableType) and self.inner == other.inner
+
+    def __str__(self):
+        return self.inner.__str__() + "OrNull"
+
+    def nullable(self):
+        return True
+
+    def isPrimitive(self):
+        return self.inner.isPrimitive()
+
+    def isString(self):
+        return self.inner.isString()
+
+    def isVoid(self):
+        return False
+
+    def isSequence(self):
+        return self.inner.isSequence()
+
+    def isArray(self):
+        return self.inner.isArray()
+
+    def isDictionary(self):
+        return self.inner.isDictionary()
+
+    def isInterface(self):
+        return self.inner.isInterface()
+
+    def isEnum(self):
+        return self.inner.isEnum()
+
+    def tag(self):
+        return self.inner.tag()
+
+class IDLSequenceType(IDLType):
+    def __init__(self, location, parameterType):
+        assert not parameterType.isVoid()
+
+        IDLType.__init__(self, location, parameterType.name)
+        self.inner = parameterType
+        self.builtin = False
+
+    def __eq__(self, other):
+        return isinstance(other, IDLSequenceType) and self.inner == other.inner
+
+    def __str__(self):
+        return self.inner.__str__() + "Sequence"
+
+    def nullable(self):
+        return False
+
+    def isPrimitive(self):
+        return self.inner.isPrimitive()
+
+    def isString(self):
+        return self.inner.isString()
+
+    def isVoid(self):
+        return False
+
+    def isSequence(self):
+        return True
+
+    def isArray(self):
+        return self.inner.isArray()
+
+    def isDictionary(self):
+        return self.inner.isDictionary()
+
+    def isInterface(self):
+        return self.inner.isInterface()
+
+    def isEnum(self):
+        return self.inner.isEnum();
+
+    def tag(self):
+        return self.inner.tag()
+
+class IDLArrayType(IDLType):
+    def __init__(self, location, parameterType):
+        assert not parameterType.isVoid()
+        if parameterType.isSequence():
+            raise WebIDLError("Array type cannot parameterize over a sequence type",
+                              location)
+        if parameterType.isDictionary():
+            raise WebIDLError("Array type cannot parameterize over a dictionary type",
+                              location)
+
+        IDLType.__init__(self, location, parameterType.name)
+        self.inner = parameterType
+        self.builtin = False
+
+    def __eq__(self, other):
+        return isinstance(other, IDLArrayType) and self.inner == other.inner
+
+    def __str__(self):
+        return self.inner.__str__() + "Array"
+
+    def nullable(self):
+        return False
+
+    def isPrimitive(self):
+        return self.inner.isPrimitive()
+
+    def isString(self):
+        return self.inner.isString()
+
+    def isVoid(self):
+        return False
+
+    def isSequence(self):
+        assert not self.inner.isSequence()
+        return self.inner.isSequence()
+
+    def isArray(self):
+        return True
+
+    def isDictionary(self):
+        assert not self.inner.isDictionary()
+        return self.inner.isDictionary()
+
+    def isInterface(self):
+        return self.inner.isInterface()
+
+    def isEnum(self):
+        return self.inner.isEnum()
+
+    def tag(self):
+        return self.inner.tag()
+
+class IDLTypedefType(IDLType, IDLObjectWithIdentifier):
+    def __init__(self, location, innerType, name):
+        IDLType.__init__(self, location, innerType.name)
+
+        identifier = IDLUnresolvedIdentifier(location, name)
+
+        IDLObjectWithIdentifier.__init__(self, location, None, identifier)
+
+        self.inner = innerType
+        self.name = name
+        self.builtin = False
+
+    def __eq__(self, other):
+        return isinstance(other, IDLTypedefType) and self.inner == other.inner
+
+    def __str__(self):
+        return self.identifier.name
+
+    def nullable(self):
+        return self.inner.nullable()
+
+    def isPrimitive(self):
+        return self.inner.isPrimitive()
+
+    def isString(self):
+        return self.inner.isString()
+
+    def isVoid(self):
+        return self.inner.isVoid()
+
+    def isSequence(self):
+        return self.inner.isSequence()
+
+    def isArray(self):
+        return self.inner.isArray()
+
+    def isDictionary(self):
+        return self.inner.isDictionary()
+
+    def isInterface(self):
+        return self.inner.isInterface()
+
+    def resolve(self, parentScope):
+        assert isinstance(parentScope, IDLScope)
+        IDLObjectWithIdentifier.resolve(self, parentScope)
+
+    def tag(self):
+        return self.inner.tag()
+
+class IDLWrapperType(IDLType):
+    def __init__(self, location, inner):
+        IDLType.__init__(self, location, inner.identifier.name)
+        self.inner = inner
+        self.name = inner.identifier
+        self.builtin = False
+
+    def __eq__(self, other):
+        return other and self.name == other.name and self.builtin == other.builtin
+
+    def __str__(self):
+        return str(self.name.name)
+
+    def nullable(self):
+        return False
+
+    def isPrimitive(self):
+        return False
+
+    def isString(self):
+        return False
+
+    def isVoid(self):
+        return False
+
+    def isSequence(self):
+        return False
+
+    def isArray(self):
+        return False
+
+    def isDictionary(self):
+        return False
+
+    def isInterface(self):
+        return isinstance(self.inner, IDLInterface)
+
+    def isEnum(self):
+        return isinstance(self.inner, IDLEnum)
+
+    def isComplete(self):
+        return True
+
+    def tag(self):
+        if isInterface():
+            return IDLType.Tags.interface
+        elif isEnum():
+            return IDLType.Tags.enum
+        else:
+            assert False
+
+class IDLBuiltinType(IDLType):
+
+    Types = enum(
+        # The integer types
+        'byte',
+        'octet',
+        'short',
+        'unsigned_short',
+        'long',
+        'unsigned_long',
+        'long_long',
+        'unsigned_long_long',
+        # Additional primitive types
+        'boolean',
+        'float',
+        'double',
+        # Other types
+        'any',
+        'domstring',
+        'object',
+        'date',
+        'void',
+        # Funny stuff
+        'ArrayBuffer'
+        )
+
+    TagLookup = {
+            Types.byte: IDLType.Tags.int8,
+            Types.octet: IDLType.Tags.uint8,
+            Types.short: IDLType.Tags.int16,
+            Types.unsigned_short: IDLType.Tags.uint16,
+            Types.long: IDLType.Tags.int32,
+            Types.unsigned_long: IDLType.Tags.uint32,
+            Types.long_long: IDLType.Tags.int64,
+            Types.unsigned_long_long: IDLType.Tags.uint64,
+            Types.boolean: IDLType.Tags.bool,
+            Types.float: IDLType.Tags.float,
+            Types.double: IDLType.Tags.double,
+            Types.any: IDLType.Tags.any,
+            Types.domstring: IDLType.Tags.domstring,
+            Types.object: IDLType.Tags.object,
+            Types.date: IDLType.Tags.date,
+            Types.void: IDLType.Tags.void,
+            Types.ArrayBuffer: IDLType.Tags.interface
+        }
+
+    def __init__(self, location, name, type):
+        IDLType.__init__(self, location, name)
+        self.builtin = True
+        self.type = type
+
+    def isPrimitive(self):
+        return self.type <= IDLBuiltinType.Types.double
+
+    def isString(self):
+        return self.type == IDLBuiltinType.Types.domstring
+
+    def isInteger(self):
+        return self.type <= IDLBuiltinType.Types.unsigned_long_long
+
+    def isArrayBuffer(self):
+        return self.type == IDLBuiltinType.Types.ArrayBuffer
+
+    def isInterface(self):
+        # ArrayBuffers are interface types per the TypedArray spec,
+        # but we handle them as builtins because SpiderMonkey implements
+        # ArrayBuffers.
+        return self.type == IDLBuiltinType.Types.ArrayBuffer
+
+    def isFloat(self):
+        return self.type == IDLBuiltinType.Types.float or \
+               self.type == IDLBuiltinType.Types.double
+
+    def isAny(self):
+        return self.type == IDLBuiltinType.Types.any
+
+    def tag(self):
+        return IDLBuiltinType.TagLookup[self.type]
+
+BuiltinTypes = {
+      IDLBuiltinType.Types.byte:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
+                         IDLBuiltinType.Types.byte),
+      IDLBuiltinType.Types.octet:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
+                         IDLBuiltinType.Types.octet),
+      IDLBuiltinType.Types.short:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
+                         IDLBuiltinType.Types.short),
+      IDLBuiltinType.Types.unsigned_short:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
+                         IDLBuiltinType.Types.unsigned_short),
+      IDLBuiltinType.Types.long:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
+                         IDLBuiltinType.Types.long),
+      IDLBuiltinType.Types.unsigned_long:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
+                         IDLBuiltinType.Types.unsigned_long),
+      IDLBuiltinType.Types.long_long:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
+                         IDLBuiltinType.Types.long_long),
+      IDLBuiltinType.Types.unsigned_long_long:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
+                         IDLBuiltinType.Types.unsigned_long_long),
+      IDLBuiltinType.Types.boolean:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
+                         IDLBuiltinType.Types.boolean),
+      IDLBuiltinType.Types.float:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
+                         IDLBuiltinType.Types.float),
+      IDLBuiltinType.Types.double:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
+                         IDLBuiltinType.Types.double),
+      IDLBuiltinType.Types.any:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
+                         IDLBuiltinType.Types.any),
+      IDLBuiltinType.Types.domstring:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
+                         IDLBuiltinType.Types.domstring),
+      IDLBuiltinType.Types.object:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
+                         IDLBuiltinType.Types.object),
+      IDLBuiltinType.Types.date:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Date",
+                         IDLBuiltinType.Types.date),
+      IDLBuiltinType.Types.void:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
+                         IDLBuiltinType.Types.void),
+      IDLBuiltinType.Types.ArrayBuffer:
+          IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
+                         IDLBuiltinType.Types.ArrayBuffer)
+    }
+
+
+integerTypeSizes = {
+        IDLBuiltinType.Types.byte: (-128, 127),
+        IDLBuiltinType.Types.octet:  (0, 255),
+        IDLBuiltinType.Types.short: (-32768, 32767),
+        IDLBuiltinType.Types.unsigned_short: (0, 65535),
+        IDLBuiltinType.Types.long: (-2147483648, 2147483647),
+        IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
+        IDLBuiltinType.Types.long_long: (-9223372036854775808,
+                                         9223372036854775807),
+        IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
+    }
+
+def matchIntegerValueToType(value):
+    for type, extremes in integerTypeSizes.items():
+        (min, max) = extremes
+        if value <= max and value >= min:
+            return BuiltinTypes[type]
+
+    return None
+
+def checkDistinguishability(argset1, argset2):
+    assert isinstance(argset1, list) and isinstance(argset2, list)
+
+class IDLValue(IDLObject):
+    def __init__(self, location, type, value):
+        IDLObject.__init__(self, location)
+        self.type = type
+        assert isinstance(type, IDLType)
+
+        self.value = value
+
+    def coerceToType(self, type, location):
+        if type == self.type:
+            return self # Nothing to do
+
+        # If the type allows null, rerun this matching on the inner type
+        if type.nullable():
+            innerValue = self.coerceToType(type.inner, location)
+            return IDLValue(self.location, type, innerValue.value)
+
+        # Else, see if we can coerce to 'type'.
+        if self.type.isInteger():
+            if not self.type.isInteger():
+                raise WebIDLError("Cannot coerce type %s to type %s." %
+                                  (self.type, type), location)
+
+            # We're both integer types.  See if we fit.
+
+            (min, max) = integerTypeSizes[type.type]
+            if self.value <= max and self.value >= min:
+                # Promote
+                return IDLValue(self.location, type, self.value)
+            else:
+                raise WebIDLError("Value %s is out of range for type %s." %
+                                  (self.value, type), location)
+        else:
+            pass
+
+        assert False # Not implemented!
+
+class IDLNullValue(IDLObject):
+    def __init__(self, location):
+        IDLObject.__init__(self, location)
+        self.type = None
+        self.value = None
+
+    def coerceToType(self, type, location):
+        if not isinstance(type, IDLNullableType):
+            raise WebIDLError("Cannot coerce null value to type %s." % type,
+                              location)
+
+        nullValue = IDLNullValue(self.location)
+        nullValue.type = type
+        return nullValue
+        
+
+class IDLInterfaceMember(IDLObjectWithIdentifier):
+
+    Tags = enum(
+        'Const',
+        'Attr',
+        'Method'
+    )
+
+    def __init__(self, location, identifier, tag):
+        IDLObjectWithIdentifier.__init__(self, location, None, identifier)
+        self.tag = tag
+
+    def isMethod(self):
+        return self.tag == IDLInterfaceMember.Tags.Method
+
+    def isAttr(self):
+        return self.tag == IDLInterfaceMember.Tags.Attr
+
+    def isConst(self):
+        return self.tag == IDLInterfaceMember.Tags.Const
+
+class IDLConst(IDLInterfaceMember):
+    def __init__(self, location, identifier, type, value):
+        IDLInterfaceMember.__init__(self, location, identifier,
+                                    IDLInterfaceMember.Tags.Const)
+
+        assert isinstance(type, IDLType)
+        self.type = type
+
+        # The value might not match the type
+        coercedValue = value.coerceToType(self.type, location)
+        assert coercedValue
+
+        self.value = coercedValue
+
+    def __str__(self):
+        return "'%s' const '%s'" % (self.type, self.identifier)
+
+    def finish(self, scope):
+        assert self.type.isComplete()
+
+class IDLAttribute(IDLInterfaceMember):
+    def __init__(self, location, identifier, type, readonly, inherit):
+        IDLInterfaceMember.__init__(self, location, identifier,
+                                    IDLInterfaceMember.Tags.Attr)
+
+        assert isinstance(type, IDLType)
+        self.type = type
+        self.readonly = readonly
+        self.inherit = inherit
+
+        if readonly and inherit:
+            raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
+                              self.location)
+
+    def __str__(self):
+        return "'%s' attribute '%s'" % (self.type, self.identifier)
+
+    def finish(self, scope):
+        if not self.type.isComplete():
+            type = self.type.complete(scope)
+
+            assert not isinstance(type, IDLUnresolvedType)
+            assert not isinstance(type.name, IDLUnresolvedIdentifier)
+            self.type = type
+
+class IDLArgument(IDLObjectWithIdentifier):
+    def __init__(self, location, identifier, type, optional=False, variadic=False):
+        IDLObjectWithIdentifier.__init__(self, location, None, identifier)
+
+        assert isinstance(type, IDLType)
+        self.type = type
+
+        self.optional = optional
+        self.variadic = variadic
+
+        assert not variadic or optional
+
+    def addExtendedAttributes(self, attrs):
+        assert len(attrs) == 0
+
+class IDLCallbackType(IDLType, IDLObjectWithScope):
+    def __init__(self, location, parentScope, identifier, returnType, arguments):
+        assert isinstance(returnType, IDLType)
+
+        IDLType.__init__(self, location, identifier.name)
+
+        self._returnType = returnType
+        # Clone the list
+        self._arguments = list(arguments)
+
+        IDLObjectWithScope.__init__(self, location, parentScope, identifier)
+
+        for (returnType, arguments) in self.signatures():
+            for argument in arguments:
+                argument.resolve(self)
+
+    def isCallback(self):
+        return True
+
+    def signatures(self):
+        return [(self._returnType, self._arguments)]
+
+    def finish(self, scope):
+        if not self._returnType.isComplete():
+            type = returnType.complete(scope)
+
+            assert not isinstance(type, IDLUnresolvedType)
+            assert not isinstance(type.name, IDLUnresolvedIdentifier)
+            self._returnType = type
+
+        for argument in self._arguments:
+            if argument.type.isComplete():
+                continue
+
+            type = argument.type.complete(scope)
+
+            assert not isinstance(type, IDLUnresolvedType)
+            assert not isinstance(type.name, IDLUnresolvedIdentifier)
+            argument.type = type
+
+class IDLMethod(IDLInterfaceMember, IDLScope):
+
+    Special = enum(
+        'None',
+        'Getter',
+        'Setter',
+        'Creator',
+        'Deleter',
+        'LegacyCaller',
+        'Stringifier',
+        'Static'
+    )
+
+    TypeSuffixModifier = enum(
+        'None',
+        'QMark',
+        'Brackets'
+    )
+
+    NamedOrIndexed = enum(
+        'Neither',
+        'Named',
+        'Indexed'
+    )
+
+    def __init__(self, location, identifier, returnType, arguments,
+                 static, getter, setter, creator, deleter, specialType, legacycaller,
+                 stringifier):
+        IDLInterfaceMember.__init__(self, location, identifier,
+                                    IDLInterfaceMember.Tags.Method)
+
+        self._hasOverloads = False
+
+        assert isinstance(returnType, IDLType)
+        self._returnType = [returnType]
+
+        assert isinstance(static, bool)
+        self._static = static
+        assert isinstance(getter, bool)
+        self._getter = getter
+        assert isinstance(setter, bool)
+        self._setter = setter
+        assert isinstance(creator, bool)
+        self._creator = creator
+        assert isinstance(deleter, bool)
+        self._deleter = deleter
+        assert isinstance(legacycaller, bool)
+        self._legacycaller = legacycaller
+        assert isinstance(stringifier, bool)
+        self._stringifier = stringifier
+        self._specialType = specialType
+
+        # Clone the list
+        self._arguments = [list(arguments)]
+
+        self.assertSignatureConstraints()
+
+    def __str__(self):
+        return "Method '%s'" % self.identifier
+
+    def assertSignatureConstraints(self):
+        if self._getter or self._deleter:
+            assert len(self._arguments) == 1
+            assert self._arguments[0][0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or \
+                   self._arguments[0][0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]
+            assert not self._arguments[0][0].optional and not self._arguments[0][0].variadic
+            assert not self._returnType[0].isVoid()
+
+        if self._setter or self._creator:
+            assert len(self._arguments[0]) == 2
+            assert self._arguments[0][0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or \
+                   self._arguments[0][0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]
+            assert not self._arguments[0][0].optional and not self._arguments[0][0].variadic
+            assert not self._arguments[0][1].optional and not self._arguments[0][1].variadic
+            assert self._arguments[0][1].type == self._returnType[0]
+
+        if self._stringifier:
+            assert len(self._arguments[0]) == 0
+            assert self._returnType[0] == BuiltinTypes[IDLBuiltinType.Types.domstring]
+
+        inOptionalArguments = False
+        sawVariadicArgument = False
+
+        assert len(self._arguments) == 1
+        arguments = self._arguments[0]
+
+        for argument in arguments:
+            # Only the last argument can be variadic
+            assert not sawVariadicArgument
+            # Once we see an optional argument, there can't be any non-optional
+            # arguments.
+            if inOptionalArguments:
+                assert argument.optional
+            inOptionalArguments = argument.optional
+            sawVariadicArgument = argument.variadic
+
+    def isStatic(self):
+        return self._static
+
+    def isGetter(self):
+        return self._getter
+
+    def isSetter(self):
+        return self._setter
+
+    def isCreator(self):
+        return self._creator
+
+    def isDeleter(self):
+        return self._deleter
+
+    def isNamed(self):
+        assert self._specialType == IDLMethod.NamedOrIndexed.Named or \
+               self._specialType == IDLMethod.NamedOrIndexed.Indexed
+        return self._specialType == IDLMethod.NamedOrIndexed.Named
+
+    def isIndexed(self):
+        assert self._specialType == IDLMethod.NamedOrIndexed.Named or \
+               self._specialType == IDLMethod.NamedOrIndexed.Indexed
+        return self._specialType == IDLMethod.NamedOrIndexed.Indexed
+
+    def isLegacycaller(self):
+        return self._legacycaller
+
+    def isStringifier(self):
+        return self._stringifier
+
+    def hasOverloads(self):
+        return self._hasOverloads
+
+    def resolve(self, parentScope):
+        assert isinstance(parentScope, IDLScope)
+        IDLObjectWithIdentifier.resolve(self, parentScope)
+        IDLScope.__init__(self, self.location, parentScope, self.identifier)
+        for (returnType, arguments) in self.signatures():
+            for argument in arguments:
+                argument.resolve(self)
+
+    def addOverload(self, method):
+        checkDistinguishability(self._arguments, method._arguments)
+
+        assert len(method._returnType) == 1
+        assert len(method._arguments) == 1
+
+        self._returnType.extend(method._returnType)
+        self._arguments.extend(method._arguments)
+
+        self._hasOverloads = True
+
+        if self.isStatic() != method.isStatic():
+            raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method1.identifier,
+                              method.location)
+
+        if self.isLegacycaller() != method.isLegacycaller():
+            raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method1.identifier,
+                              method.location)
+
+        # Can't overload special things!
+        assert not self.isGetter()
+        assert not method.isGetter()
+        assert not self.isSetter()
+        assert not method.isSetter()
+        assert not self.isCreator()
+        assert not method.isCreator()
+        assert not self.isDeleter()
+        assert not method.isDeleter()
+        assert not self.isStringifier()
+        assert not method.isStringifier()
+
+        return self
+
+    def signatures(self):
+        assert len(self._returnType) == len(self._arguments)
+        return zip(self._returnType, self._arguments)
+
+    def finish(self, scope):
+        for index, returnType in enumerate(self._returnType):
+            if returnType.isComplete():
+                continue
+
+            type = returnType.complete(scope)
+
+            assert not isinstance(type, IDLUnresolvedType)
+            assert not isinstance(type.name, IDLUnresolvedIdentifier)
+            self._returnType[index] = type
+
+        for arguments in self._arguments:
+            for argument in arguments:
+                if argument.type.isComplete():
+                    continue
+
+                type = argument.type.complete(scope)
+
+                assert not isinstance(type, IDLUnresolvedType)
+                assert not isinstance(type.name, IDLUnresolvedIdentifier)
+                argument.type = type
+
+# Parser
+
+class Tokenizer(object):
+    tokens = [
+        "INTEGER",
+        "FLOAT",
+        "IDENTIFIER",
+        "STRING",
+        "WHITESPACE",
+        "BRACESWITHSTUFF",
+        "OTHER"
+        ]
+
+    def t_INTEGER(self, t):
+        r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
+        try:
+            t.value = parseInt(t.value)
+        except:
+            raise WebIDLError("Invalid integer literal",
+                              Location(lexer=self.lexer,
+                                       lineno=self.lexer.lineno,
+                                       lexpos=self.lexer.lexpos,
+                                       filename=self._filename))
+        return t
+
+    def t_FLOAT(self, t):
+        r'-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+)'
+        assert False
+        return t
+
+    def t_IDENTIFIER(self, t):
+        r'[A-Z_a-z][0-9A-Z_a-z]*'
+        t.type = self.keywords.get(t.value, 'IDENTIFIER')
+        return t
+
+    def t_STRING(self, t):
+        r'"[^"]*"'
+        t.value = t.value[1:-1]
+        return t
+
+    def t_WHITESPACE(self, t):
+        r'[\t\n\r ]+|[\t\n\r ]*((//[^\n]*|/\*.*?\*/)[\t\n\r ]*)+'
+        pass
+
+    def t_ELLIPSIS(self, t):
+        r'\.\.\.'
+        t.type = self.keywords.get(t.value)
+        return t
+
+    def t_BRACESWITHSTUFF(self, t):
+        r'\[[^\]]+?\]'
+        return t
+
+    def t_OTHER(self, t):
+        r'[^\t\n\r 0-9A-Z_a-z]'
+        t.type = self.keywords.get(t.value, 'OTHER')
+        return t
+
+    keywords = {
+        "module": "MODULE",
+        "interface": "INTERFACE",
+        "partial": "PARTIAL",
+        "dictionary": "DICTIONARY",
+        "exception": "EXCEPTION",
+        "enum": "ENUM",
+        "callback": "CALLBACK",
+        "typedef": "TYPEDEF",
+        "implements": "IMPLEMENTS",
+        "const": "CONST",
+        "null": "NULL",
+        "true": "TRUE",
+        "false": "FALSE",
+        "stringifier": "STRINGIFIER",
+        "attribute": "ATTRIBUTE",
+        "readonly": "READONLY",
+        "inherit": "INHERIT",
+        "static": "STATIC",
+        "getter": "GETTER",
+        "setter": "SETTER",
+        "creator": "CREATOR",
+        "deleter": "DELETER",
+        "legacycaller": "LEGACYCALLER",
+        "optional": "OPTIONAL",
+        "...": "ELLIPSIS",
+        "::": "SCOPE",
+        "Date": "DATE",
+        "DOMString": "DOMSTRING",
+        "any": "ANY",
+        "boolean": "BOOLEAN",
+        "byte": "BYTE",
+        "double": "DOUBLE",
+        "float": "FLOAT_",
+        "long": "LONG",
+        "object": "OBJECT",
+        "octet": "OCTET",
+        "optional": "OPTIONAL",
+        "sequence": "SEQUENCE",
+        "short": "SHORT",
+        "unsigned": "UNSIGNED",
+        "void": "VOID",
+        ":": "COLON",
+        ";": "SEMICOLON",
+        "{": "LBRACE",
+        "}": "RBRACE",
+        "(": "LPAREN",
+        ")": "RPAREN",
+        "[": "LBRACKET",
+        "]": "RBRACKET",
+        "?": "QUESTIONMARK",
+        ",": "COMMA",
+        "=": "EQUALS",
+        "<": "LT",
+        ">": "GT",
+        "ArrayBuffer": "ARRAYBUFFER"
+        }
+
+    tokens.extend(keywords.values())
+
+    def t_error(self, t):
+        raise WebIDLError("Unrecognized Input",
+               Location(lexer=self.lexer,
+                        lineno=self.lexer.lineno,
+                        lexpos=self.lexer.lexpos,
+                        filename = self.filename))
+
+    def __init__(self):
+        self.lexer = lex.lex(object=self,
+                             lextab='webidllex',
+                             reflags=re.DOTALL)
+
+class Parser(Tokenizer):
+    def getLocation(self, p, i):
+        return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
+
+    def globalScope(self):
+        return self._globalScope
+
+    # The p_Foo functions here must match the WebIDL spec's grammar.
+    # It's acceptable to split things at '|' boundaries.
+    def p_Definitions(self, p):
+        """ 
+            Definitions : ExtendedAttributeList Definition Definitions
+        """
+        if p[2]:
+            p[0] = [p[2]]
+            p[2].addExtendedAttributes(p[1])
+        else:
+            assert not p[1]
+            p[0] = []
+
+        p[0].extend(p[3])
+
+    def p_DefinitionsEmpty(self, p):
+        """
+            Definitions :
+        """
+        p[0] = []
+
+    def p_Definition(self, p):
+        """
+            Definition : CallbackOrInterface
+                       | PartialInterface
+                       | Dictionary
+                       | Exception
+                       | Enum
+                       | Typedef
+                       | ImplementsStatement
+        """
+        p[0] = p[1]
+        assert p[1] # We might not have implemented something ...
+
+    def p_CallbackOrInterfaceCallback(self, p):
+        """
+            CallbackOrInterface : CALLBACK CallbackRestOrInterface
+        """
+        if p[2].isInterface():
+            assert isinstance(p[2], IDLInterface)
+            p[2].setCallback(True)
+
+        p[0] = p[2]
+
+    def p_CallbackOrInterfaceInterface(self, p):
+        """
+            CallbackOrInterface : Interface
+        """
+        p[0] = p[1]
+
+    def p_CallbackRestOrInterface(self, p):
+        """
+            CallbackRestOrInterface : CallbackRest
+                                    | Interface
+        """
+        assert p[1]
+        p[0] = p[1]
+
+    def p_Interface(self, p):
+        """
+            Interface : INTERFACE IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
+        """
+        location = self.getLocation(p, 1)
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
+
+        members = p[5]
+        p[0] = IDLInterface(location, self.globalScope(), identifier, p[3], members)
+
+    def p_PartialInterface(self, p):
+        """
+            PartialInterface : PARTIAL INTERFACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
+        """
+        pass
+
+    def p_Inheritance(self, p):
+        """
+            Inheritance : COLON ScopedName
+        """
+        p[0] = IDLParentPlaceholder(self.getLocation(p, 2), p[2])
+
+    def p_InheritanceEmpty(self, p):
+        """
+            Inheritance :
+        """
+        pass
+
+    def p_InterfaceMembers(self, p):
+        """
+            InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
+        """
+        p[0] = [p[2]] if p[2] else []
+        assert not p[1] # Not implemented yet!
+        p[0].extend(p[3])
+
+    def p_InterfaceMembersEmpty(self, p):
+        """
+            InterfaceMembers :
+        """
+        p[0] = []
+
+    def p_InterfaceMember(self, p):
+        """
+            InterfaceMember : Const
+                            | AttributeOrOperation
+        """
+        p[0] = p[1]
+
+    def p_Dictionary(self, p):
+        """
+            Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
+        """
+        pass
+
+    def p_DictionaryMembers(self, p):
+        """
+            DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
+                             |
+        """
+        pass
+
+    def p_DictionaryMember(self, p):
+        """
+            DictionaryMember : Type IDENTIFIER DefaultValue SEMICOLON
+        """
+        pass
+
+    def p_DefaultValue(self, p):
+        """
+            DefaultValue : EQUALS ConstValue
+                         |
+        """
+        pass
+
+    def p_Exception(self, p):
+        """
+            Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
+        """
+        pass
+
+    def p_Enum(self, p):
+        """
+            Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
+        """
+        location = self.getLocation(p, 1)
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
+
+        values = p[4]
+        assert values
+        p[0] = IDLEnum(location, self.globalScope(), identifier, values)
+
+    def p_EnumValueList(self, p):
+        """
+            EnumValueList : STRING EnumValues
+        """
+        p[0] = [p[1]]
+        p[0].extend(p[2])
+
+    def p_EnumValues(self, p):
+        """
+            EnumValues : COMMA STRING EnumValues
+        """
+        p[0] = [p[2]]
+        p[0].extend(p[3])
+
+    def p_EnumValuesEmpty(self, p):
+        """
+            EnumValues :
+        """
+        p[0] = []
+
+    def p_CallbackRest(self, p):
+        """
+            CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
+        """
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
+        p[0] = IDLCallbackType(self.getLocation(p, 1), self.globalScope(),
+                               identifier, p[3], p[5])
+
+    def p_ExceptionMembers(self, p):
+        """
+            ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
+                             |
+        """
+        pass
+
+    def p_Typedef(self, p):
+        """
+            Typedef : TYPEDEF Type IDENTIFIER SEMICOLON
+        """
+        typedef = IDLTypedefType(self.getLocation(p, 1), p[2], p[3])
+        typedef.resolve(self.globalScope())
+        p[0] = typedef
+
+    def p_ImplementsStatement(self, p):
+        """
+            ImplementsStatement : ScopedName IMPLEMENTS ScopedName SEMICOLON
+        """
+        pass
+
+    def p_Const(self, p):
+        """
+            Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
+        """
+        location = self.getLocation(p, 1)
+        type = p[2]
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
+        value = p[5]
+        p[0] = IDLConst(location, identifier, type, value)
+
+    def p_ConstValueBoolean(self, p):
+        """
+            ConstValue : BooleanLiteral
+        """
+        location = self.getLocation(p, 1)
+        booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
+        p[0] = IDLValue(location, booleanType, p[1])
+
+    def p_ConstValueInteger(self, p):
+        """
+            ConstValue : INTEGER
+        """
+        location = self.getLocation(p, 1)
+
+        # We don't know ahead of time what type the integer literal is.
+        # Determine the smallest type it could possibly fit in and use that.
+        integerType = matchIntegerValueToType(p[1])
+        if integerType == None:
+            raise WebIDLError("Integer literal out of range", location)
+
+        p[0] = IDLValue(location, integerType, p[1])
+
+    def p_ConstValueFloat(self, p):
+        """
+            ConstValue : FLOAT
+        """
+        assert False
+        pass
+
+    def p_ConstValueString(self, p):
+        """
+            ConstValue : STRING
+        """
+        assert False
+        pass
+
+    def p_ConstValueNull(self, p):
+        """
+            ConstValue : NULL
+        """
+        p[0] = IDLNullValue(self.getLocation(p, 1))
+
+    def p_BooleanLiteralTrue(self, p):
+        """
+            BooleanLiteral : TRUE
+        """
+        p[0] = True
+
+    def p_BooleanLiteralFalse(self, p):
+        """
+            BooleanLiteral : FALSE
+        """
+        p[0] = False
+
+    def p_AttributeOrOperationStringifier(self, p):
+        """
+            AttributeOrOperation : STRINGIFIER StringifierAttributeOrOperation
+        """
+        assert False # Not implemented
+        pass
+
+    def p_AttributeOrOperation(self, p):
+        """
+            AttributeOrOperation : Attribute
+                                 | Operation
+        """
+        p[0] = p[1]
+
+    def p_StringifierAttributeOrOperation(self, p):
+        """
+            StringifierAttributeOrOperation : Attribute
+                                            | OperationRest
+                                            | SEMICOLON
+        """
+        pass
+
+    def p_Attribute(self, p):
+        """
+            Attribute : Inherit ReadOnly ATTRIBUTE AttributeType IDENTIFIER SEMICOLON
+        """
+        location = self.getLocation(p, 3)
+        inherit = p[1]
+        readonly = p[2]
+        type = p[4]
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 5), p[5])
+        p[0] = IDLAttribute(location, identifier, type, readonly, inherit)
+
+    def p_ReadOnly(self, p):
+        """
+            ReadOnly : READONLY
+        """
+        p[0] = True
+
+    def p_ReadOnlyEmpty(self, p):
+        """
+            ReadOnly :
+        """
+        p[0] = False
+
+    def p_Inherit(self, p):
+        """
+            Inherit : INHERIT
+        """
+        p[0] = True
+
+    def p_InheritEmpty(self, p):
+        """
+            Inherit :
+        """
+        p[0] = False
+
+    def p_Operation(self, p):
+        """
+            Operation : Qualifiers OperationRest
+        """
+        qualifiers = p[1]
+
+        # Disallow duplicates in the qualifier set
+        if not len(set(qualifiers)) == len(qualifiers):
+            raise WebIDLError("Duplicate qualifiers are not allowed",
+                              self.getLocation(p, 1))
+
+        static = True if IDLMethod.Special.Static in p[1] else False
+        # If static is there that's all that's allowed.  This is disallowed
+        # by the parser, so we can assert here.
+        assert not static or len(qualifiers) == 1
+
+        getter = True if IDLMethod.Special.Getter in p[1] else False
+        setter = True if IDLMethod.Special.Setter in p[1] else False
+        creator = True if IDLMethod.Special.Creator in p[1] else False
+        deleter = True if IDLMethod.Special.Deleter in p[1] else False
+        legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
+
+        if getter or deleter:
+            if setter or creator:
+                raise WebIDLError("getter and deleter are incompatible with setter and creator",
+                                  self.getLocation(p, 1))
+
+        (returnType, identifier, arguments) = p[2]
+
+        assert isinstance(returnType, IDLType)
+
+        specialType = IDLMethod.NamedOrIndexed.Neither
+
+        if getter or deleter:
+            if len(arguments) != 1:
+                raise WebIDLError("%s has wrong number of arguments" %
+                                  ("getter" if getter else "deleter"),
+                                  self.getLocation(p, 2))
+            argType = arguments[0].type
+            if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
+                specialType = IDLMethod.NamedOrIndexed.Named
+            elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
+                specialType = IDLMethod.NamedOrIndexed.Indexed
+            else:
+                raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
+                                  ("getter" if getter else "deleter"),
+                                  arguments[0].location)
+            if arguments[0].optional or arguments[0].variadic:
+                raise WebIDLError("%s cannot have %s argument" %
+                                  ("getter" if getter else "deleter",
+                                   "optional" if arguments[0].optional else "variadic"),
+                                   arguments[0].location)
+            if returnType.isVoid():
+                raise WebIDLError("%s cannot have void return type" %
+                                  ("getter" if getter else "deleter"),
+                                  self.getLocation(p, 2))
+        if setter or creator:
+            if len(arguments) != 2:
+                raise WebIDLError("%s has wrong number of arguments" %
+                                  ("setter" if setter else "creator"),
+                                  self.getLocation(p, 2))
+            argType = arguments[0].type
+            if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
+                specialType = IDLMethod.NamedOrIndexed.Named
+            elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
+                specialType = IDLMethod.NamedOrIndexed.Indexed
+            else:
+                raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
+                                  ("setter" if setter else "creator"),
+                                  arguments[0].location)
+            if arguments[0].optional or arguments[0].variadic:
+                raise WebIDLError("%s cannot have %s argument" %
+                                  ("setter" if setter else "creator",
+                                   "optional" if arguments[0].optional else "variadic"),
+                                   arguments[0].location)
+            if arguments[1].optional or arguments[1].variadic:
+                raise WebIDLError("%s cannot have %s argument" %
+                                  ("setter" if setter else "creator",
+                                   "optional" if arguments[1].optional else "variadic"),
+                                   arguments[1].location)
+            if returnType.isVoid():
+                raise WebIDLError("%s cannot have void return type" %
+                                  ("setter" if setter else "creator"),
+                                  self.getLocation(p, 2))
+            if not arguments[1].type == returnType:
+                raise WebIDLError("%s return type and second argument type must match" %
+                                  ("setter" if setter else "creator"),
+                                  self.getLocation(p, 2))
+
+        inOptionalArguments = False
+        variadicArgument = False
+        for argument in arguments:
+            # Only the last argument can be variadic
+            if variadicArgument:
+                raise WebIDLError("Only the last argument can be variadic",
+                                  variadicArgument.location)
+            # Once we see an optional argument, there can't be any non-optional
+            # arguments.
+            if inOptionalArguments and not argument.optional:
+                raise WebIDLError("Cannot have a non-optional argument following an optional argument",
+                                  argument.location)
+            inOptionalArguments = argument.optional
+            variadicArgument = argument if argument.variadic else None
+
+        # identifier might be None.  This is only permitted for special methods.
+        # NB: Stringifiers are handled elsewhere.
+        if not identifier:
+            if not getter and not setter and not creator and \
+               not deleter and not legacycaller:
+                raise WebIDLError("Identifier required for non-special methods",
+                                  self.getLocation(p, 2))
+
+            location = BuiltinLocation("<auto-generated-identifier>")
+            identifier = IDLUnresolvedIdentifier(location, "__%s%s%s%s%s%s" %
+                ("named" if specialType == IDLMethod.NamedOrIndexed.Named else \
+                 "indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
+                 "getter" if getter else "",
+                 "setter" if setter else "",
+                 "deleter" if deleter else "",
+                 "creator" if creator else "",
+                 "legacycaller" if legacycaller else ""), allowDoubleUnderscore=True)
+
+        method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
+                           static, getter, setter, creator, deleter, specialType,
+                           legacycaller, False)
+        p[0] = method
+
+    def p_QualifiersStatic(self, p):
+        """
+            Qualifiers : STATIC
+        """
+        p[0] = [IDLMethod.Special.Static]
+
+    def p_QualifiersSpecials(self, p):
+        """
+            Qualifiers : Specials
+        """
+        p[0] = p[1]
+
+    def p_Specials(self, p):
+        """
+            Specials : Special Specials
+        """
+        p[0] = [p[1]]
+        p[0].extend(p[2])
+
+    def p_SpecialsEmpty(self, p):
+        """
+            Specials :
+        """
+        p[0] = []
+
+    def p_SpecialGetter(self, p):
+        """
+            Special : GETTER
+        """
+        p[0] = IDLMethod.Special.Getter
+
+    def p_SpecialSetter(self, p):
+        """
+            Special : SETTER
+        """
+        p[0] = IDLMethod.Special.Setter
+
+    def p_SpecialCreator(self, p):
+        """
+            Special : CREATOR
+        """
+        p[0] = IDLMethod.Special.Creator
+
+    def p_SpecialDeleter(self, p):
+        """
+            Special : DELETER
+        """
+        p[0] = IDLMethod.Special.Deleter
+
+    def p_SpecialLegacyCaller(self, p):
+        """
+            Special : LEGACYCALLER
+        """
+        p[0] = IDLMethod.Special.LegacyCaller
+
+    def p_OperationRest(self, p):
+        """
+            OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
+        """
+        p[0] = (p[1], p[2], p[4])
+
+    def p_OptionalIdentifier(self, p):
+        """
+            OptionalIdentifier : IDENTIFIER
+        """
+        p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
+
+    def p_OptionalIdentifierEmpty(self, p):
+        """
+            OptionalIdentifier :
+        """
+        pass
+
+    def p_ArgumentList(self, p):
+        """
+            ArgumentList : Argument Arguments
+        """
+        p[0] = [p[1]] if p[1] else []
+        p[0].extend(p[2])
+
+    def p_ArgumentListEmpty(self, p):
+        """
+            ArgumentList :
+        """
+        p[0] = []
+
+    def p_Arguments(self, p):
+        """
+            Arguments : COMMA Argument Arguments
+        """
+        p[0] = [p[2]] if p[2] else []
+        p[0].extend(p[3])
+
+    def p_ArgumentsEmpty(self, p):
+        """
+            Arguments :
+        """
+        p[0] = []
+
+    def p_Argument(self, p):
+        """
+            Argument : ExtendedAttributeList Optional Type Ellipsis IDENTIFIER
+        """
+        t = p[3]
+        assert isinstance(t, IDLType)
+        identifier = IDLUnresolvedIdentifier(self.getLocation(p, 5), p[5])
+
+        optional = p[2]
+        variadic = p[4]
+
+        if variadic:
+            if optional:
+                raise WebIDLError("Variadic arguments should not be marked optional.",
+                                  self.getLocation(p, 2))
+            optional = variadic
+
+        p[0] = IDLArgument(self.getLocation(p, 5), identifier, t, optional, variadic)
+        p[0].addExtendedAttributes(p[1])
+
+    def p_Optional(self, p):
+        """
+            Optional : OPTIONAL
+        """
+        p[0] = True
+
+    def p_OptionalEmpty(self, p):
+        """
+            Optional :
+        """
+        p[0] = False
+
+    def p_Ellipsis(self, p):
+        """
+            Ellipsis : ELLIPSIS
+        """
+        p[0] = True
+
+    def p_EllipsisEmpty(self, p):
+        """
+            Ellipsis :
+        """
+        p[0] = False
+
+    def p_ExceptionMember(self, p):
+        """
+            ExceptionMember : Const
+                            | ExceptionField
+        """
+        pass
+
+    def p_ExceptionField(self, p):
+        """
+            ExceptionField : AttributeType IDENTIFIER SEMICOLON
+        """
+        pass
+
+    def p_ExtendedAttributeList(self, p):
+        """
+            ExtendedAttributeList : BRACESWITHSTUFF
+        """
+        text = p[1][1:-1]
+
+        # Parse the extended attribute list
+        _extendedAttributeRegex = re.compile(r'')
+
+        pass
+
+    def p_ExtendedAttributeListEmpty(self, p):
+        """
+            ExtendedAttributeList :
+        """
+        p[0] = []
+
+    def p_Other(self, p):
+        """
+            Other : INTEGER
+                  | FLOAT
+                  | IDENTIFIER
+                  | STRING
+                  | OTHER
+                  | ELLIPSIS
+                  | COLON
+                  | SCOPE
+                  | SEMICOLON
+                  | LT
+                  | EQUALS
+                  | GT
+                  | QUESTIONMARK
+                  | DATE
+                  | DOMSTRING
+                  | ANY
+                  | ATTRIBUTE
+                  | BOOLEAN
+                  | BYTE
+                  | LEGACYCALLER
+                  | CONST
+                  | CREATOR
+                  | DELETER
+                  | DOUBLE
+                  | EXCEPTION
+                  | FALSE
+                  | FLOAT_
+                  | GETTER
+                  | IMPLEMENTS
+                  | INHERIT
+                  | INTERFACE
+                  | LONG
+                  | MODULE
+                  | NULL
+                  | OBJECT
+                  | OCTET
+                  | OPTIONAL
+                  | SEQUENCE
+                  | SETTER
+                  | SHORT
+                  | STATIC
+                  | STRINGIFIER
+                  | TRUE
+                  | TYPEDEF
+                  | UNSIGNED
+                  | VOID
+        """
+        pass
+
+    def p_OtherOrComma(self, p):
+        """
+            OtherOrComma : Other
+                         | COMMA
+        """
+        pass
+
+    def p_TypeAttributeType(self, p):
+        """
+            Type : AttributeType
+        """
+        p[0] = p[1]
+
+    def p_TypeSequenceType(self, p):
+        """
+            Type : SequenceType
+        """
+        p[0] = p[1]
+
+    def p_SequenceType(self, p):
+        """
+            SequenceType : SEQUENCE LT Type GT Null
+        """
+        innerType = p[3]
+        type = IDLSequenceType(self.getLocation(p, 1), innerType)
+        if p[5]:
+            type = IDLNullableType(self.getLocation(p, 5), type)
+        p[0] = type
+
+    def p_AttributeTypePrimitive(self, p):
+        """
+            AttributeType : PrimitiveOrStringType TypeSuffix
+                          | ARRAYBUFFER TypeSuffix
+                          | OBJECT TypeSuffix
+                          | ANY TypeSuffixStartingWithArray
+        """
+        if p[1] == "object":
+            type = BuiltinTypes[IDLBuiltinType.Types.object]
+        elif p[1] == "any":
+            type = BuiltinTypes[IDLBuiltinType.Types.any]
+        elif p[1] == "ArrayBuffer":
+            type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
+        else:
+            type = BuiltinTypes[p[1]]
+
+        for (modifier, modifierLocation) in p[2]:
+            assert modifier == IDLMethod.TypeSuffixModifier.QMark or \
+                   modifier == IDLMethod.TypeSuffixModifier.Brackets
+
+            if modifier == IDLMethod.TypeSuffixModifier.QMark:
+                type = IDLNullableType(modifierLocation, type)
+            elif modifier == IDLMethod.TypeSuffixModifier.Brackets:
+                type = IDLArrayType(modifierLocation, type)
+
+        p[0] = type
+
+    def p_AttributeTypeScopedName(self, p):
+        """
+            AttributeType : ScopedName TypeSuffix
+        """
+        assert isinstance(p[1], IDLUnresolvedIdentifier)
+
+        try:
+            if self.globalScope()._lookupIdentifier(p[1]):
+                obj = self.globalScope()._lookupIdentifier(p[1])
+                if obj.isType():
+                    p[0] = obj
+                else:
+                    p[0] = IDLWrapperType(self.getLocation(p, 1), p[1])
+                return
+        except:
+            pass
+
+        p[0] = IDLUnresolvedType(self.getLocation(p, 1), p[1])
+
+    def p_AttributeTypeDate(self, p):
+        """
+            AttributeType : DATE TypeSuffix
+        """
+        assert False
+        pass
+
+    def p_ConstType(self, p):
+        """
+            ConstType : PrimitiveOrStringType Null
+        """
+        type = BuiltinTypes[p[1]]
+        if p[2]:
+            type = IDLNullableType(self.getLocation(p, 1), type)
+        p[0] = type
+
+    def p_PrimitiveOrStringTypeUint(self, p):
+        """
+            PrimitiveOrStringType : UnsignedIntegerType
+        """
+        p[0] = p[1]
+
+    def p_PrimitiveOrStringTypeBoolean(self, p):
+        """
+            PrimitiveOrStringType : BOOLEAN
+        """
+        p[0] = IDLBuiltinType.Types.boolean
+
+    def p_PrimitiveOrStringTypeByte(self, p):
+        """
+            PrimitiveOrStringType : BYTE
+        """
+        p[0] = IDLBuiltinType.Types.byte
+
+    def p_PrimitiveOrStringTypeOctet(self, p):
+        """
+            PrimitiveOrStringType : OCTET
+        """
+        p[0] = IDLBuiltinType.Types.octet
+
+    def p_PrimitiveOrStringTypeFloat(self, p):
+        """
+            PrimitiveOrStringType : FLOAT
+        """
+        p[0] = IDLBuiltinType.Types.float
+
+    def p_PrimitiveOrStringTypeDouble(self, p):
+        """
+            PrimitiveOrStringType : DOUBLE
+        """
+        p[0] = IDLBuiltinType.Types.double
+
+    def p_PrimitiveOrStringTypeDOMString(self, p):
+        """
+            PrimitiveOrStringType : DOMSTRING
+        """
+        p[0] = IDLBuiltinType.Types.domstring
+
+    def p_UnsignedIntegerTypeUnsigned(self, p):
+        """
+            UnsignedIntegerType : UNSIGNED IntegerType
+        """
+        p[0] = p[2] + 1 # Adding one to a given signed integer type
+                        # gets you the unsigned type.
+
+    def p_UnsignedIntegerType(self, p):
+        """
+            UnsignedIntegerType : IntegerType
+        """
+        p[0] = p[1]
+
+    def p_IntegerTypeShort(self, p):
+        """
+            IntegerType : SHORT
+        """
+        p[0] = IDLBuiltinType.Types.short
+
+    def p_IntegerTypeLong(self, p):
+        """
+            IntegerType : LONG OptionalLong
+        """
+        if p[2]:
+            p[0] = IDLBuiltinType.Types.long_long
+        else:
+            p[0] = IDLBuiltinType.Types.long
+
+    def p_OptionalLong(self, p):
+        """
+            OptionalLong : LONG
+        """
+        p[0] = True
+
+    def p_OptionalLongEmpty(self, p):
+        """
+            OptionalLong :
+        """
+        p[0] = False
+
+    def p_TypeSuffixBrackets(self, p):
+        """
+            TypeSuffix : LBRACKET RBRACKET TypeSuffix
+        """
+        p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
+        p[0].extend(p[3])
+
+    def p_TypeSuffixQMark(self, p):
+        """
+            TypeSuffix : QUESTIONMARK TypeSuffixStartingWithArray
+        """
+        p[0] = [(IDLMethod.TypeSuffixModifier.QMark, self.getLocation(p, 1))]
+        p[0].extend(p[2])
+
+    def p_TypeSuffixEmpty(self, p):
+        """
+            TypeSuffix :
+        """
+        p[0] = []
+
+    def p_TypeSuffixStartingWithArray(self, p):
+        """
+            TypeSuffixStartingWithArray : LBRACKET RBRACKET TypeSuffix
+        """
+        p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
+        p[0].extend(p[3])
+
+    def p_TypeSuffixStartingWithArrayEmpty(self, p):
+        """
+            TypeSuffixStartingWithArray :
+        """
+        p[0] = []
+
+    def p_Null(self, p):
+        """
+            Null : QUESTIONMARK
+                 |
+        """
+        if len(p) > 1:
+            p[0] = True
+        else:
+            p[0] = False
+
+    def p_ReturnTypeType(self, p):
+        """
+            ReturnType : Type
+        """
+        p[0] = p[1]
+
+    def p_ReturnTypeVoid(self, p):
+        """
+            ReturnType : VOID
+        """
+        p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
+
+    def p_ScopedName(self, p):
+        """
+            ScopedName : AbsoluteScopedName
+                       | RelativeScopedName
+        """
+        p[0] = p[1]
+
+    def p_AbsoluteScopedName(self, p):
+        """
+            AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
+        """
+        assert False
+        pass
+
+    def p_RelativeScopedName(self, p):
+        """
+            RelativeScopedName : IDENTIFIER ScopedNameParts
+        """
+        assert not p[2] # Not implemented!
+
+        p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
+
+    def p_ScopedNameParts(self, p):
+        """
+            ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
+        """
+        assert False
+        pass
+
+    def p_ScopedNamePartsEmpty(self, p):
+        """
+            ScopedNameParts :
+        """
+        p[0] = None
+
+    def p_ExtendedAttributeNoArgs(self, p):
+        """
+            ExtendedAttributeNoArgs : IDENTIFIER
+        """
+        pass
+
+    def p_ExtendedAttributeArgList(self, p):
+        """
+            ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
+        """
+        pass
+
+    def p_ExtendedAttributeIdent(self, p):
+        """
+            ExtendedAttributeIdent : IDENTIFIER EQUALS IDENTIFIER
+        """
+        pass
+
+    def p_ExtendedAttributeScopedName(self, p):
+        """
+            ExtendedAttributeScopedName : IDENTIFIER EQUALS ScopedName
+        """
+        pass
+
+    def p_ExtendedAttributeNamedArgList(self, p):
+        """
+            ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
+        """
+        pass
+
+    def p_error(self, p):
+        if not p:
+            raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both", None)
+        else:
+            raise WebIDLError("invalid syntax", Location(self.lexer, p.lineno, p.lexpos, self._filename))
+
+    def __init__(self):
+        Tokenizer.__init__(self)
+        self.parser = yacc.yacc(module=self,
+                                tabmodule='webidlyacc',
+                                errorlog=yacc.NullLogger())
+        self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
+        self._productions = []
+
+        self._filename = "<builtin>"
+        self.lexer.input(Parser._builtins)
+        self._filename = None
+
+        self.parser.parse(lexer=self.lexer)
+
+    def parse(self, t, filename=None):
+        self.lexer.input(t)
+
+        #for tok in iter(self.lexer.token, None):
+        #    print tok
+
+        self._filename = filename
+        self._productions.extend(self.parser.parse(lexer=self.lexer))
+        self._filename = None
+
+    def finish(self):
+        for production in self._productions:
+            production.finish(self.globalScope())
+
+        return self._productions
+
+    def reset(self):
+        return Parser()
+
+    # Builtin IDL defined by WebIDL
+    _builtins = """
+        typedef unsigned long long DOMTimeStamp;
+    """
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/__init__.py
@@ -0,0 +1,1 @@
+__all__ = ['WebIDL']
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/ply/COPYING
@@ -0,0 +1,28 @@
+Copyright (C) 2001-2009,
+David M. Beazley (Dabeaz LLC)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.  
+* Redistributions in binary form must reproduce the above copyright notice, 
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.  
+* Neither the name of the David Beazley or Dabeaz LLC may be used to
+  endorse or promote products derived from this software without
+  specific prior written permission. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/ply/README
@@ -0,0 +1,9 @@
+David Beazley's PLY (Python Lex-Yacc)
+http://www.dabeaz.com/ply/
+
+Licensed under BSD.
+
+This directory contains just the code and license from PLY version 3.3;
+the full distribution (see the URL) also contains examples, tests,
+documentation, and a longer README.
+
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/ply/ply/__init__.py
@@ -0,0 +1,4 @@
+# PLY package
+# Author: David Beazley (dave@dabeaz.com)
+
+__all__ = ['lex','yacc']
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/ply/ply/lex.py
@@ -0,0 +1,1058 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.  
+# * Redistributions in binary form must reproduce the above copyright notice, 
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.  
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+#   endorse or promote products derived from this software without
+#  specific prior written permission. 
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+
+__version__    = "3.3"
+__tabversion__ = "3.2"       # Version of table file used
+
+import re, sys, types, copy, os
+
+# This tuple contains known string types
+try:
+    # Python 2.6
+    StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+    # Python 3.0
+    StringTypes = (str, bytes)
+
+# Extract the code attribute of a function. Different implementations
+# are for Python 2/3 compatibility.
+
+if sys.version_info[0] < 3:
+    def func_code(f):
+        return f.func_code
+else:
+    def func_code(f):
+        return f.__code__
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+
+class LexError(Exception):
+    def __init__(self,message,s):
+         self.args = (message,)
+         self.text = s
+
+# Token class.  This class is used to represent the tokens produced.
+class LexToken(object):
+    def __str__(self):
+        return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+    def __repr__(self):
+        return str(self)
+
+# This object is a stand-in for a logging object created by the 
+# logging module.  
+
+class PlyLogger(object):
+    def __init__(self,f):
+        self.f = f
+    def critical(self,msg,*args,**kwargs):
+        self.f.write((msg % args) + "\n")
+
+    def warning(self,msg,*args,**kwargs):
+        self.f.write("WARNING: "+ (msg % args) + "\n")
+
+    def error(self,msg,*args,**kwargs):
+        self.f.write("ERROR: " + (msg % args) + "\n")
+
+    info = critical
+    debug = critical
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+    def __getattribute__(self,name):
+        return self
+    def __call__(self,*args,**kwargs):
+        return self
+
+# -----------------------------------------------------------------------------
+#                        === Lexing Engine ===
+#
+# The following Lexer class implements the lexer runtime.   There are only
+# a few public methods and attributes:
+#
+#    input()          -  Store a new string in the lexer
+#    token()          -  Get the next token
+#    clone()          -  Clone the lexer
+#
+#    lineno           -  Current line number
+#    lexpos           -  Current position in the input string
+# -----------------------------------------------------------------------------
+
+class Lexer:
+    def __init__(self):
+        self.lexre = None             # Master regular expression. This is a list of
+                                      # tuples (re,findex) where re is a compiled
+                                      # regular expression and findex is a list
+                                      # mapping regex group numbers to rules
+        self.lexretext = None         # Current regular expression strings
+        self.lexstatere = {}          # Dictionary mapping lexer states to master regexs
+        self.lexstateretext = {}      # Dictionary mapping lexer states to regex strings
+        self.lexstaterenames = {}     # Dictionary mapping lexer states to symbol names
+        self.lexstate = "INITIAL"     # Current lexer state
+        self.lexstatestack = []       # Stack of lexer states
+        self.lexstateinfo = None      # State information
+        self.lexstateignore = {}      # Dictionary of ignored characters for each state
+        self.lexstateerrorf = {}      # Dictionary of error functions for each state
+        self.lexreflags = 0           # Optional re compile flags
+        self.lexdata = None           # Actual input data (as a string)
+        self.lexpos = 0               # Current position in input text
+        self.lexlen = 0               # Length of the input text
+        self.lexerrorf = None         # Error rule (if any)
+        self.lextokens = None         # List of valid tokens
+        self.lexignore = ""           # Ignored characters
+        self.lexliterals = ""         # Literal characters that can be passed through
+        self.lexmodule = None         # Module
+        self.lineno = 1               # Current line number
+        self.lexoptimize = 0          # Optimized mode
+
+    def clone(self,object=None):
+        c = copy.copy(self)
+
+        # If the object parameter has been supplied, it means we are attaching the
+        # lexer to a new object.  In this case, we have to rebind all methods in
+        # the lexstatere and lexstateerrorf tables.
+
+        if object:
+            newtab = { }
+            for key, ritem in self.lexstatere.items():
+                newre = []
+                for cre, findex in ritem:
+                     newfindex = []
+                     for f in findex:
+                         if not f or not f[0]:
+                             newfindex.append(f)
+                             continue
+                         newfindex.append((getattr(object,f[0].__name__),f[1]))
+                newre.append((cre,newfindex))
+                newtab[key] = newre
+            c.lexstatere = newtab
+            c.lexstateerrorf = { }
+            for key, ef in self.lexstateerrorf.items():
+                c.lexstateerrorf[key] = getattr(object,ef.__name__)
+            c.lexmodule = object
+        return c
+
+    # ------------------------------------------------------------
+    # writetab() - Write lexer information to a table file
+    # ------------------------------------------------------------
+    def writetab(self,tabfile,outputdir=""):
+        if isinstance(tabfile,types.ModuleType):
+            return
+        basetabfilename = tabfile.split(".")[-1]
+        filename = os.path.join(outputdir,basetabfilename)+".py"
+        tf = open(filename,"w")
+        tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+        tf.write("_tabversion   = %s\n" % repr(__version__))
+        tf.write("_lextokens    = %s\n" % repr(self.lextokens))
+        tf.write("_lexreflags   = %s\n" % repr(self.lexreflags))
+        tf.write("_lexliterals  = %s\n" % repr(self.lexliterals))
+        tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+
+        tabre = { }
+        # Collect all functions in the initial state
+        initial = self.lexstatere["INITIAL"]
+        initialfuncs = []
+        for part in initial:
+            for f in part[1]:
+                if f and f[0]:
+                    initialfuncs.append(f)
+
+        for key, lre in self.lexstatere.items():
+             titem = []
+             for i in range(len(lre)):
+                  titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
+             tabre[key] = titem
+
+        tf.write("_lexstatere   = %s\n" % repr(tabre))
+        tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+        taberr = { }
+        for key, ef in self.lexstateerrorf.items():
+             if ef:
+                  taberr[key] = ef.__name__
+             else:
+                  taberr[key] = None
+        tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+        tf.close()
+
+    # ------------------------------------------------------------
+    # readtab() - Read lexer information from a tab file
+    # ------------------------------------------------------------
+    def readtab(self,tabfile,fdict):
+        if isinstance(tabfile,types.ModuleType):
+            lextab = tabfile
+        else:
+            if sys.version_info[0] < 3:
+                exec("import %s as lextab" % tabfile)
+            else:
+                env = { }
+                exec("import %s as lextab" % tabfile, env,env)
+                lextab = env['lextab']
+
+        if getattr(lextab,"_tabversion","0.0") != __version__:
+            raise ImportError("Inconsistent PLY version")
+
+        self.lextokens      = lextab._lextokens
+        self.lexreflags     = lextab._lexreflags
+        self.lexliterals    = lextab._lexliterals
+        self.lexstateinfo   = lextab._lexstateinfo
+        self.lexstateignore = lextab._lexstateignore
+        self.lexstatere     = { }
+        self.lexstateretext = { }
+        for key,lre in lextab._lexstatere.items():
+             titem = []
+             txtitem = []
+             for i in range(len(lre)):
+                  titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
+                  txtitem.append(lre[i][0])
+             self.lexstatere[key] = titem
+             self.lexstateretext[key] = txtitem
+        self.lexstateerrorf = { }
+        for key,ef in lextab._lexstateerrorf.items():
+             self.lexstateerrorf[key] = fdict[ef]
+        self.begin('INITIAL')
+
+    # ------------------------------------------------------------
+    # input() - Push a new string into the lexer
+    # ------------------------------------------------------------
+    def input(self,s):
+        # Pull off the first character to see if s looks like a string
+        c = s[:1]
+        if not isinstance(c,StringTypes):
+            raise ValueError("Expected a string")
+        self.lexdata = s
+        self.lexpos = 0
+        self.lexlen = len(s)
+
+    # ------------------------------------------------------------
+    # begin() - Changes the lexing state
+    # ------------------------------------------------------------
+    def begin(self,state):
+        if not state in self.lexstatere:
+            raise ValueError("Undefined state")
+        self.lexre = self.lexstatere[state]
+        self.lexretext = self.lexstateretext[state]
+        self.lexignore = self.lexstateignore.get(state,"")
+        self.lexerrorf = self.lexstateerrorf.get(state,None)
+        self.lexstate = state
+
+    # ------------------------------------------------------------
+    # push_state() - Changes the lexing state and saves old on stack
+    # ------------------------------------------------------------
+    def push_state(self,state):
+        self.lexstatestack.append(self.lexstate)
+        self.begin(state)
+
+    # ------------------------------------------------------------
+    # pop_state() - Restores the previous state
+    # ------------------------------------------------------------
+    def pop_state(self):
+        self.begin(self.lexstatestack.pop())
+
+    # ------------------------------------------------------------
+    # current_state() - Returns the current lexing state
+    # ------------------------------------------------------------
+    def current_state(self):
+        return self.lexstate
+
+    # ------------------------------------------------------------
+    # skip() - Skip ahead n characters
+    # ------------------------------------------------------------
+    def skip(self,n):
+        self.lexpos += n
+
+    # ------------------------------------------------------------
+    # opttoken() - Return the next token from the Lexer
+    #
+    # Note: This function has been carefully implemented to be as fast
+    # as possible.  Don't make changes unless you really know what
+    # you are doing
+    # ------------------------------------------------------------
+    def token(self):
+        # Make local copies of frequently referenced attributes
+        lexpos    = self.lexpos
+        lexlen    = self.lexlen
+        lexignore = self.lexignore
+        lexdata   = self.lexdata
+
+        while lexpos < lexlen:
+            # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+            if lexdata[lexpos] in lexignore:
+                lexpos += 1
+                continue
+
+            # Look for a regular expression match
+            for lexre,lexindexfunc in self.lexre:
+                m = lexre.match(lexdata,lexpos)
+                if not m: continue
+
+                # Create a token for return
+                tok = LexToken()
+                tok.value = m.group()
+                tok.lineno = self.lineno
+                tok.lexpos = lexpos
+
+                i = m.lastindex
+                func,tok.type = lexindexfunc[i]
+
+                if not func:
+                   # If no token type was set, it's an ignored token
+                   if tok.type:
+                      self.lexpos = m.end()
+                      return tok
+                   else:
+                      lexpos = m.end()
+                      break
+
+                lexpos = m.end()
+
+                # If token is processed by a function, call it
+
+                tok.lexer = self      # Set additional attributes useful in token rules
+                self.lexmatch = m
+                self.lexpos = lexpos
+
+                newtok = func(tok)
+
+                # Every function must return a token, if nothing, we just move to next token
+                if not newtok:
+                    lexpos    = self.lexpos         # This is here in case user has updated lexpos.
+                    lexignore = self.lexignore      # This is here in case there was a state change
+                    break
+
+                # Verify type of the token.  If not in the token map, raise an error
+                if not self.lexoptimize:
+                    if not newtok.type in self.lextokens:
+                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func_code(func).co_filename, func_code(func).co_firstlineno,
+                            func.__name__, newtok.type),lexdata[lexpos:])
+
+                return newtok
+            else:
+                # No match, see if in literals
+                if lexdata[lexpos] in self.lexliterals:
+                    tok = LexToken()
+                    tok.value = lexdata[lexpos]
+                    tok.lineno = self.lineno
+                    tok.type = tok.value
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos + 1
+                    return tok
+
+                # No match. Call t_error() if defined.
+                if self.lexerrorf:
+                    tok = LexToken()
+                    tok.value = self.lexdata[lexpos:]
+                    tok.lineno = self.lineno
+                    tok.type = "error"
+                    tok.lexer = self
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos
+                    newtok = self.lexerrorf(tok)
+                    if lexpos == self.lexpos:
+                        # Error method didn't change text position at all. This is an error.
+                        raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                    lexpos = self.lexpos
+                    if not newtok: continue
+                    return newtok
+
+                self.lexpos = lexpos
+                raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+        self.lexpos = lexpos + 1
+        if self.lexdata is None:
+             raise RuntimeError("No input string given with input()")
+        return None
+
+    # Iterator interface
+    def __iter__(self):
+        return self
+
+    def next(self):
+        t = self.token()
+        if t is None:
+            raise StopIteration
+        return t
+
+    __next__ = next
+
+# -----------------------------------------------------------------------------
+#                           ==== Lex Builder ===
+#
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack.  This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+    try:
+        raise RuntimeError
+    except RuntimeError:
+        e,b,t = sys.exc_info()
+        f = t.tb_frame
+        while levels > 0:
+            f = f.f_back                   
+            levels -= 1
+        ldict = f.f_globals.copy()
+        if f.f_globals != f.f_locals:
+            ldict.update(f.f_locals)
+
+        return ldict
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist,namelist):
+    result = []
+    for f,name in zip(funclist,namelist):
+         if f and f[0]:
+             result.append((name, f[1]))
+         else:
+             result.append(f)
+    return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+     result = []
+     for n in namelist:
+          if n and n[0]:
+              result.append((fdict[n[0]],n[1]))
+          else:
+              result.append(n)
+     return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression.  Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict,toknames):
+    if not relist: return []
+    regex = "|".join(relist)
+    try:
+        lexre = re.compile(regex,re.VERBOSE | reflags)
+
+        # Build the index to function map for the matching engine
+        lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+        lexindexnames = lexindexfunc[:]
+
+        for f,i in lexre.groupindex.items():
+            handle = ldict.get(f,None)
+            if type(handle) in (types.FunctionType, types.MethodType):
+                lexindexfunc[i] = (handle,toknames[f])
+                lexindexnames[i] = f
+            elif handle is not None:
+                lexindexnames[i] = f
+                if f.find("ignore_") > 0:
+                    lexindexfunc[i] = (None,None)
+                else:
+                    lexindexfunc[i] = (None, toknames[f])
+        
+        return [(lexre,lexindexfunc)],[regex],[lexindexnames]
+    except Exception:
+        m = int(len(relist)/2)
+        if m == 0: m = 1
+        llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
+        rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
+        return llist+rlist, lre+rre, lnames+rnames
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token.  For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+    nonstate = 1
+    parts = s.split("_")
+    for i in range(1,len(parts)):
+         if not parts[i] in names and parts[i] != 'ANY': break
+    if i > 1:
+       states = tuple(parts[1:i])
+    else:
+       states = ('INITIAL',)
+
+    if 'ANY' in states:
+       states = tuple(names)
+
+    tokenname = "_".join(parts[i:])
+    return (states,tokenname)
+
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+    def __init__(self,ldict,log=None,reflags=0):
+        self.ldict      = ldict
+        self.error_func = None
+        self.tokens     = []
+        self.reflags    = reflags
+        self.stateinfo  = { 'INITIAL' : 'inclusive'}
+        self.files      = {}
+        self.error      = 0
+
+        if log is None:
+            self.log = PlyLogger(sys.stderr)
+        else:
+            self.log = log
+
+    # Get all of the basic information
+    def get_all(self):
+        self.get_tokens()
+        self.get_literals()
+        self.get_states()
+        self.get_rules()
+        
+    # Validate all of the information
+    def validate_all(self):
+        self.validate_tokens()
+        self.validate_literals()
+        self.validate_rules()
+        return self.error
+
+    # Get the tokens map
+    def get_tokens(self):
+        tokens = self.ldict.get("tokens",None)
+        if not tokens:
+            self.log.error("No token list is defined")
+            self.error = 1
+            return
+
+        if not isinstance(tokens,(list, tuple)):
+            self.log.error("tokens must be a list or tuple")
+            self.error = 1
+            return
+        
+        if not tokens:
+            self.log.error("tokens is empty")
+            self.error = 1
+            return
+
+        self.tokens = tokens
+
+    # Validate the tokens
+    def validate_tokens(self):
+        terminals = {}
+        for n in self.tokens:
+            if not _is_identifier.match(n):
+                self.log.error("Bad token name '%s'",n)
+                self.error = 1
+            if n in terminals:
+                self.log.warning("Token '%s' multiply defined", n)
+            terminals[n] = 1
+
+    # Get the literals specifier
+    def get_literals(self):
+        self.literals = self.ldict.get("literals","")
+
+    # Validate literals
+    def validate_literals(self):
+        try:
+            for c in self.literals:
+                if not isinstance(c,StringTypes) or len(c) > 1:
+                    self.log.error("Invalid literal %s. Must be a single character", repr(c))
+                    self.error = 1
+                    continue
+
+        except TypeError:
+            self.log.error("Invalid literals specification. literals must be a sequence of characters")
+            self.error = 1
+
+    def get_states(self):
+        self.states = self.ldict.get("states",None)
+        # Build statemap
+        if self.states:
+             if not isinstance(self.states,(tuple,list)):
+                  self.log.error("states must be defined as a tuple or list")
+                  self.error = 1
+             else:
+                  for s in self.states:
+                        if not isinstance(s,tuple) or len(s) != 2:
+                               self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
+                               self.error = 1
+                               continue
+                        name, statetype = s
+                        if not isinstance(name,StringTypes):
+                               self.log.error("State name %s must be a string", repr(name))
+                               self.error = 1
+                               continue
+                        if not (statetype == 'inclusive' or statetype == 'exclusive'):
+                               self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
+                               self.error = 1
+                               continue
+                        if name in self.stateinfo:
+                               self.log.error("State '%s' already defined",name)
+                               self.error = 1
+                               continue
+                        self.stateinfo[name] = statetype
+
+    # Get all of the symbols with a t_ prefix and sort them into various
+    # categories (functions, strings, error functions, and ignore characters)
+
+    def get_rules(self):
+        tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
+
+        # Now build up a list of functions and a list of strings
+
+        self.toknames = { }        # Mapping of symbols to token names
+        self.funcsym =  { }        # Symbols defined as functions
+        self.strsym =   { }        # Symbols defined as strings
+        self.ignore   = { }        # Ignore strings by state
+        self.errorf   = { }        # Error functions by state
+
+        for s in self.stateinfo:
+             self.funcsym[s] = []
+             self.strsym[s] = []
+
+        if len(tsymbols) == 0:
+            self.log.error("No rules of the form t_rulename are defined")
+            self.error = 1
+            return
+
+        for f in tsymbols:
+            t = self.ldict[f]
+            states, tokname = _statetoken(f,self.stateinfo)
+            self.toknames[f] = tokname
+
+            if hasattr(t,"__call__"):
+                if tokname == 'error':
+                    for s in states:
+                        self.errorf[s] = t
+                elif tokname == 'ignore':
+                    line = func_code(t).co_firstlineno
+                    file = func_code(t).co_filename
+                    self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
+                    self.error = 1
+                else:
+                    for s in states: 
+                        self.funcsym[s].append((f,t))
+            elif isinstance(t, StringTypes):
+                if tokname == 'ignore':
+                    for s in states:
+                        self.ignore[s] = t
+                    if "\\" in t:
+                        self.log.warning("%s contains a literal backslash '\\'",f)
+
+                elif tokname == 'error':
+                    self.log.error("Rule '%s' must be defined as a function", f)
+                    self.error = 1
+                else:
+                    for s in states: 
+                        self.strsym[s].append((f,t))
+            else:
+                self.log.error("%s not defined as a function or string", f)
+                self.error = 1
+
+        # Sort the functions by line number
+        for f in self.funcsym.values():
+            if sys.version_info[0] < 3:
+                f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
+            else:
+                # Python 3.0
+                f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
+
+        # Sort the strings by regular expression length
+        for s in self.strsym.values():
+            if sys.version_info[0] < 3:
+                s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+            else:
+                # Python 3.0
+                s.sort(key=lambda x: len(x[1]),reverse=True)
+
+    # Validate all of the t_rules collected 
+    def validate_rules(self):
+        for state in self.stateinfo:
+            # Validate all rules defined by functions
+
+            
+
+            for fname, f in self.funcsym[state]:
+                line = func_code(f).co_firstlineno
+                file = func_code(f).co_filename
+                self.files[file] = 1
+
+                tokname = self.toknames[fname]
+                if isinstance(f, types.MethodType):
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                nargs = func_code(f).co_argcount
+                if nargs > reqargs:
+                    self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                if nargs < reqargs:
+                    self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                if not f.__doc__:
+                    self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                try:
+                    c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
+                    if c.match(""):
+                        self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
+                        self.error = 1
+                except re.error:
+                    _etype, e, _etrace = sys.exc_info()
+                    self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
+                    if '#' in f.__doc__:
+                        self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
+                    self.error = 1
+
+            # Validate all rules defined by strings
+            for name,r in self.strsym[state]:
+                tokname = self.toknames[name]
+                if tokname == 'error':
+                    self.log.error("Rule '%s' must be defined as a function", name)
+                    self.error = 1
+                    continue
+
+                if not tokname in self.tokens and tokname.find("ignore_") < 0:
+                    self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
+                    self.error = 1
+                    continue
+
+                try:
+                    c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
+                    if (c.match("")):
+                         self.log.error("Regular expression for rule '%s' matches empty string",name)
+                         self.error = 1
+                except re.error:
+                    _etype, e, _etrace = sys.exc_info()
+                    self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
+                    if '#' in r:
+                         self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
+                    self.error = 1
+
+            if not self.funcsym[state] and not self.strsym[state]:
+                self.log.error("No rules defined for state '%s'",state)
+                self.error = 1
+
+            # Validate the error function
+            efunc = self.errorf.get(state,None)
+            if efunc:
+                f = efunc
+                line = func_code(f).co_firstlineno
+                file = func_code(f).co_filename
+                self.files[file] = 1
+
+                if isinstance(f, types.MethodType):
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                nargs = func_code(f).co_argcount
+                if nargs > reqargs:
+                    self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+                    self.error = 1
+
+                if nargs < reqargs:
+                    self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+                    self.error = 1
+
+        for f in self.files:
+            self.validate_file(f)
+
+
+    # -----------------------------------------------------------------------------
+    # validate_file()
+    #
+    # This checks to see if there are duplicated t_rulename() functions or strings
+    # in the parser input file.  This is done using a simple regular expression
+    # match on each line in the given file.  
+    # -----------------------------------------------------------------------------
+
+    def validate_file(self,filename):
+        import os.path
+        base,ext = os.path.splitext(filename)
+        if ext != '.py': return         # No idea what the file is. Return OK
+
+        try:
+            f = open(filename)
+            lines = f.readlines()
+            f.close()
+        except IOError:
+            return                      # Couldn't find the file.  Don't worry about it
+
+        fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+        sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+        counthash = { }
+        linen = 1
+        for l in lines:
+            m = fre.match(l)
+            if not m:
+                m = sre.match(l)
+            if m:
+                name = m.group(1)
+                prev = counthash.get(name)
+                if not prev:
+                    counthash[name] = linen
+                else:
+                    self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
+                    self.error = 1
+            linen += 1
+            
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
+    global lexer
+    ldict = None
+    stateinfo  = { 'INITIAL' : 'inclusive'}
+    lexobj = Lexer()
+    lexobj.lexoptimize = optimize
+    global token,input
+
+    if errorlog is None:
+        errorlog = PlyLogger(sys.stderr)
+
+    if debug:
+        if debuglog is None:
+            debuglog = PlyLogger(sys.stderr)
+
+    # Get the module dictionary used for the lexer
+    if object: module = object
+
+    if module:
+        _items = [(k,getattr(module,k)) for k in dir(module)]
+        ldict = dict(_items)
+    else:
+        ldict = get_caller_module_dict(2)
+
+    # Collect parser information from the dictionary
+    linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
+    linfo.get_all()
+    if not optimize:
+        if linfo.validate_all():
+            raise SyntaxError("Can't build lexer")
+
+    if optimize and lextab:
+        try:
+            lexobj.readtab(lextab,ldict)
+            token = lexobj.token
+            input = lexobj.input
+            lexer = lexobj
+            return lexobj
+
+        except ImportError:
+            pass
+
+    # Dump some basic debugging information
+    if debug:
+        debuglog.info("lex: tokens   = %r", linfo.tokens)
+        debuglog.info("lex: literals = %r", linfo.literals)
+        debuglog.info("lex: states   = %r", linfo.stateinfo)
+
+    # Build a dictionary of valid token names
+    lexobj.lextokens = { }
+    for n in linfo.tokens:
+        lexobj.lextokens[n] = 1
+
+    # Get literals specification
+    if isinstance(linfo.literals,(list,tuple)):
+        lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+    else:
+        lexobj.lexliterals = linfo.literals
+
+    # Get the stateinfo dictionary
+    stateinfo = linfo.stateinfo
+
+    regexs = { }
+    # Build the master regular expressions
+    for state in stateinfo:
+        regex_list = []
+
+        # Add rules defined by functions first
+        for fname, f in linfo.funcsym[state]:
+            line = func_code(f).co_firstlineno
+            file = func_code(f).co_filename
+            regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
+            if debug:
+                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
+
+        # Now add all of the simple rules
+        for name,r in linfo.strsym[state]:
+            regex_list.append("(?P<%s>%s)" % (name,r))
+            if debug:
+                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
+
+        regexs[state] = regex_list
+
+    # Build the master regular expressions
+
+    if debug:
+        debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
+
+    for state in regexs:
+        lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
+        lexobj.lexstatere[state] = lexre
+        lexobj.lexstateretext[state] = re_text
+        lexobj.lexstaterenames[state] = re_names
+        if debug:
+            for i in range(len(re_text)):
+                debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
+
+    # For inclusive states, we need to add the regular expressions from the INITIAL state
+    for state,stype in stateinfo.items():
+        if state != "INITIAL" and stype == 'inclusive':
+             lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+             lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+             lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+    lexobj.lexstateinfo = stateinfo
+    lexobj.lexre = lexobj.lexstatere["INITIAL"]
+    lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+    lexobj.lexreflags = reflags
+
+    # Set up ignore variables
+    lexobj.lexstateignore = linfo.ignore
+    lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+    # Set up error functions
+    lexobj.lexstateerrorf = linfo.errorf
+    lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
+    if not lexobj.lexerrorf:
+        errorlog.warning("No t_error rule is defined")
+
+    # Check state information for ignore and error rules
+    for s,stype in stateinfo.items():
+        if stype == 'exclusive':
+              if not s in linfo.errorf:
+                   errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+              if not s in linfo.ignore and lexobj.lexignore:
+                   errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
+        elif stype == 'inclusive':
+              if not s in linfo.errorf:
+                   linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
+              if not s in linfo.ignore:
+                   linfo.ignore[s] = linfo.ignore.get("INITIAL","")
+
+    # Create global versions of the token() and input() functions
+    token = lexobj.token
+    input = lexobj.input
+    lexer = lexobj
+
+    # If in optimize mode, we write the lextab
+    if lextab and optimize:
+        lexobj.writetab(lextab,outputdir)
+
+    return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+    if not data:
+        try:
+            filename = sys.argv[1]
+            f = open(filename)
+            data = f.read()
+            f.close()
+        except IndexError:
+            sys.stdout.write("Reading from standard input (type EOF to end):\n")
+            data = sys.stdin.read()
+
+    if lexer:
+        _input = lexer.input
+    else:
+        _input = input
+    _input(data)
+    if lexer:
+        _token = lexer.token
+    else:
+        _token = token
+
+    while 1:
+        tok = _token()
+        if not tok: break
+        sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+    def set_doc(f):
+        if hasattr(r,"__call__"):
+            f.__doc__ = r.__doc__
+        else:
+            f.__doc__ = r
+        return f
+    return set_doc
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
new file mode 100644
--- /dev/null
+++ b/dom/bindings/parser/ply/ply/yacc.py
@@ -0,0 +1,3276 @@
+# -----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.  
+# * Redistributions in binary form must reproduce the above copyright notice, 
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.  
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+#   endorse or promote products derived from this software without
+#  specific prior written permission. 
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings.  The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system.  PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist).  However, most of the variables used during table
+# construction are defined in terms of global variables.  Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing.  LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book).  LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style."   Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+__version__    = "3.3"
+__tabversion__ = "3.2"       # Table version
+
+#-----------------------------------------------------------------------------
+#                     === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug   = 1                # Debugging mode.  If set, yacc generates a
+                               # a 'parser.out' file in the current directory
+
+debug_file  = 'parser.out'     # Default name of the debugging file
+tab_module  = 'parsetab'       # Default name of the table module
+default_lr  = 'LALR'           # Default LR table generation method
+
+error_count = 3                # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel   = 0                # Set to True if developing yacc.  This turns off optimized
+                               # implementations of certain functions.
+
+resultlimit = 40               # Size limit of results when running in debug mode.
+
+pickle_protocol = 0            # Protocol to use when writing pickle files
+
+import re, types, sys, os.path
+
+# Compatibility function for python 2.6/3.0
+if sys.version_info[0] < 3:
+    def func_code(f):
+        return f.func_code
+else:
+    def func_code(f):
+        return f.__code__
+
+# Compatibility
+try:
+    MAXINT = sys.maxint
+except AttributeError:
+    MAXINT = sys.maxsize
+
+# Python 2.x/3.0 compatibility.
+def load_ply_lex():
+    if sys.version_info[0] < 3:
+        import lex
+    else:
+        import ply.lex as lex
+    return lex
+
+# This object is a stand-in for a logging object created by the 
+# logging module.   PLY will use this by default to create things
+# such as the parser.out file.  If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+    def __init__(self,f):
+        self.f = f
+    def debug(self,msg,*args,**kwargs):
+        self.f.write((msg % args) + "\n")
+    info     = debug
+
+    def warning(self,msg,*args,**kwargs):
+        self.f.write("WARNING: "+ (msg % args) + "\n")
+
+    def error(self,msg,*args,**kwargs):
+        self.f.write("ERROR: " + (msg % args) + "\n")
+
+    critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+    def __getattribute__(self,name):
+        return self
+    def __call__(self,*args,**kwargs):
+        return self
+        
+# Exception raised for yacc-related errors
+class YaccError(Exception):   pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+    repr_str = repr(r)
+    if '\n' in repr_str: repr_str = repr(repr_str)
+    if len(repr_str) > resultlimit:
+        repr_str = repr_str[:resultlimit]+" ..."
+    result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
+    return result
+
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+    repr_str = repr(r)
+    if '\n' in repr_str: repr_str = repr(repr_str)
+    if len(repr_str) < 16:
+        return repr_str
+    else:
+        return "<%s @ 0x%x>" % (type(r).__name__,id(r))
+
+#-----------------------------------------------------------------------------
+#                        ===  LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself.  These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+#        .type       = Grammar symbol type
+#        .value      = Symbol value
+#        .lineno     = Starting line number
+#        .endlineno  = Ending line number (optional, set automatically)
+#        .lexpos     = Starting lex position
+#        .endlexpos  = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+    def __str__(self):    return self.type
+    def __repr__(self):   return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule.   Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined).   The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol.  The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+    def __init__(self,s,stack=None):
+        self.slice = s
+        self.stack = stack
+        self.lexer = None
+        self.parser= None
+    def __getitem__(self,n):
+        if n >= 0: return self.slice[n].value
+        else: return self.stack[n].value
+
+    def __setitem__(self,n,v):
+        self.slice[n].value = v
+
+    def __getslice__(self,i,j):
+        return [s.value for s in self.slice[i:j]]
+
+    def __len__(self):
+        return len(self.slice)
+
+    def lineno(self,n):
+        return getattr(self.slice[n],"lineno",0)
+
+    def set_lineno(self,n,lineno):
+        self.slice[n].lineno = lineno
+
+    def linespan(self,n):
+        startline = getattr(self.slice[n],"lineno",0)
+        endline = getattr(self.slice[n],"endlineno",startline)
+        return startline,endline
+
+    def lexpos(self,n):
+        return getattr(self.slice[n],"lexpos",0)
+
+    def lexspan(self,n):
+        startpos = getattr(self.slice[n],"lexpos",0)
+        endpos = getattr(self.slice[n],"endlexpos",startpos)
+        return startpos,endpos
+
+    def error(self):
+       raise SyntaxError
+
+
+# -----------------------------------------------------------------------------
+#                               == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+    def __init__(self,lrtab,errorf):
+        self.productions = lrtab.lr_productions
+        self.action      = lrtab.lr_action
+        self.goto        = lrtab.lr_goto
+        self.errorfunc   = errorf
+
+    def errok(self):
+        self.errorok     = 1
+
+    def restart(self):
+        del self.statestack[:]
+        del self.symstack[:]
+        sym = YaccSymbol()
+        sym.type = '$end'
+        self.symstack.append(sym)
+        self.statestack.append(0)
+
+    def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        if debug or yaccdevel:
+            if isinstance(debug,int):
+                debug = PlyLogger(sys.stderr)
+            return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
+        elif tracking:
+            return self.parseopt(input,lexer,debug,tracking,tokenfunc)
+        else:
+            return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
+        
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parsedebug().
+    #
+    # This is the debugging enabled version of parse().  All changes made to the
+    # parsing engine should be made here.   For the non-debugging version,
+    # copy this code to a method parseopt() and delete all of the sections
+    # enclosed in:
+    #
+    #      #--! DEBUG
+    #      statements
+    #      #--! DEBUG
+    #
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+    def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+
+        # --! DEBUG
+        debug.info("PLY: PARSE DEBUG START")
+        # --! DEBUG
+
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            lex = load_ply_lex()
+            lexer = lex.lexer
+
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = "$end"
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            # --! DEBUG
+            debug.debug('')
+            debug.debug('State  : %s', state)
+            # --! DEBUG
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = "$end"
+
+            # --! DEBUG
+            debug.debug('Stack  : %s',
+                        ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+            # --! DEBUG
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    statestack.append(t)
+                    state = t
+                    
+                    # --! DEBUG
+                    debug.debug("Action : Shift and goto state %s", t)
+                    # --! DEBUG
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    # --! DEBUG
+                    if plen:
+                        debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
+                    else:
+                        debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
+                        
+                    # --! DEBUG
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # --! TRACKING
+                        if tracking:
+                           t1 = targ[1]
+                           sym.lineno = t1.lineno
+                           sym.lexpos = t1.lexpos
+                           t1 = targ[-1]
+                           sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+                           sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+                        # --! TRACKING
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            p.callable(pslice)
+                            # --! DEBUG
+                            debug.info("Result : %s", format_result(pslice[0]))
+                            # --! DEBUG
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        # --! TRACKING
+                        if tracking:
+                           sym.lineno = lexer.lineno
+                           sym.lexpos = lexer.lexpos
+                        # --! TRACKING
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.callable(pslice)
+                            # --! DEBUG
+                            debug.info("Result : %s", format_result(pslice[0]))
+                            # --! DEBUG
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    result = getattr(n,"value",None)
+                    # --! DEBUG
+                    debug.info("Done   : Returning %s", format_result(result))
+                    debug.info("PLY: PARSE DEBUG END")
+                    # --! DEBUG
+                    return result
+
+            if t == None:
+
+                # --! DEBUG
+                debug.error('Error  : %s',
+                            ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+                # --! DEBUG
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type == "$end":
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type != "$end":
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type == "$end":
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError("yacc: internal parser error!!!\n")
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parseopt().
+    #
+    # Optimized version of parse() method.  DO NOT EDIT THIS CODE DIRECTLY.
+    # Edit the debug version above, then copy any modifications to the method
+    # below while removing #--! DEBUG sections.
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+
+    def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            lex = load_ply_lex()
+            lexer = lex.lexer
+        
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = '$end'
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = '$end'
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    statestack.append(t)
+                    state = t
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # --! TRACKING
+                        if tracking:
+                           t1 = targ[1]
+                           sym.lineno = t1.lineno
+                           sym.lexpos = t1.lexpos
+                           t1 = targ[-1]
+                           sym.endlineno = getattr(t1,"endlineno",t1.lineno)
+                           sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
+
+                        # --! TRACKING
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            p.callable(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        # --! TRACKING
+                        if tracking:
+                           sym.lineno = lexer.lineno
+                           sym.lexpos = lexer.lexpos
+                        # --! TRACKING
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.callable(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    return getattr(n,"value",None)
+
+            if t == None:
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type == '$end':
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type != '$end':
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type == '$end':
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError("yacc: internal parser error!!!\n")
+
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    # parseopt_notrack().
+    #
+    # Optimized version of parseopt() with line number tracking removed. 
+    # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
+    # code in the #--! TRACKING sections
+    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+    def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+        lookahead = None                 # Current lookahead symbol
+        lookaheadstack = [ ]             # Stack of lookahead symbols
+        actions = self.action            # Local reference to action table (to avoid lookup on self.)
+        goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
+        prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
+        pslice  = YaccProduction(None)   # Production object passed to grammar rules
+        errorcount = 0                   # Used during error recovery 
+
+        # If no lexer was given, we will try to use the lex module
+        if not lexer:
+            lex = load_ply_lex()
+            lexer = lex.lexer
+        
+        # Set up the lexer and parser objects on pslice
+        pslice.lexer = lexer
+        pslice.parser = self
+
+        # If input was supplied, pass to lexer
+        if input is not None:
+            lexer.input(input)
+
+        if tokenfunc is None:
+           # Tokenize function
+           get_token = lexer.token
+        else:
+           get_token = tokenfunc
+
+        # Set up the state and symbol stacks
+
+        statestack = [ ]                # Stack of parsing states
+        self.statestack = statestack
+        symstack   = [ ]                # Stack of grammar symbols
+        self.symstack = symstack
+
+        pslice.stack = symstack         # Put in the production
+        errtoken   = None               # Err token
+
+        # The start state is assumed to be (0,$end)
+
+        statestack.append(0)
+        sym = YaccSymbol()
+        sym.type = '$end'
+        symstack.append(sym)
+        state = 0
+        while 1:
+            # Get the next symbol on the input.  If a lookahead symbol
+            # is already set, we just use that. Otherwise, we'll pull
+            # the next token off of the lookaheadstack or from the lexer
+
+            if not lookahead:
+                if not lookaheadstack:
+                    lookahead = get_token()     # Get the next token
+                else:
+                    lookahead = lookaheadstack.pop()
+                if not lookahead:
+                    lookahead = YaccSymbol()
+                    lookahead.type = '$end'
+
+            # Check the action table
+            ltype = lookahead.type
+            t = actions[state].get(ltype)
+
+            if t is not None:
+                if t > 0:
+                    # shift a symbol on the stack
+                    statestack.append(t)
+                    state = t
+
+                    symstack.append(lookahead)
+                    lookahead = None
+
+                    # Decrease error count on successful shift
+                    if errorcount: errorcount -=1
+                    continue
+
+                if t < 0:
+                    # reduce a symbol on the stack, emit a production
+                    p = prod[-t]
+                    pname = p.name
+                    plen  = p.len
+
+                    # Get production function
+                    sym = YaccSymbol()
+                    sym.type = pname       # Production name
+                    sym.value = None
+
+                    if plen:
+                        targ = symstack[-plen-1:]
+                        targ[0] = sym
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # below as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+                        
+                        try:
+                            # Call the grammar rule with our special slice object
+                            del symstack[-plen:]
+                            del statestack[-plen:]
+                            p.callable(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+    
+                    else:
+
+                        targ = [ sym ]
+
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+                        # The code enclosed in this section is duplicated 
+                        # above as a performance optimization.  Make sure
+                        # changes get made in both locations.
+
+                        pslice.slice = targ
+
+                        try:
+                            # Call the grammar rule with our special slice object
+                            p.callable(pslice)
+                            symstack.append(sym)
+                            state = goto[statestack[-1]][pname]
+                            statestack.append(state)
+                        except SyntaxError:
+                            # If an error was set. Enter error recovery state
+                            lookaheadstack.append(lookahead)
+                            symstack.pop()
+                            statestack.pop()
+                            state = statestack[-1]
+                            sym.type = 'error'
+                            lookahead = sym
+                            errorcount = error_count
+                            self.errorok = 0
+                        continue
+                        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+                if t == 0:
+                    n = symstack[-1]
+                    return getattr(n,"value",None)
+
+            if t == None:
+
+                # We have some kind of parsing error here.  To handle
+                # this, we are going to push the current token onto
+                # the tokenstack and replace it with an 'error' token.
+                # If there are any synchronization rules, they may
+                # catch it.
+                #
+                # In addition to pushing the error token, we call call
+                # the user defined p_error() function if this is the
+                # first syntax error.  This function is only called if
+                # errorcount == 0.
+                if errorcount == 0 or self.errorok:
+                    errorcount = error_count
+                    self.errorok = 0
+                    errtoken = lookahead
+                    if errtoken.type == '$end':
+                        errtoken = None               # End of file!
+                    if self.errorfunc:
+                        global errok,token,restart
+                        errok = self.errok        # Set some special functions available in error recovery
+                        token = get_token
+                        restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
+                        tok = self.errorfunc(errtoken)
+                        del errok, token, restart   # Delete special functions
+
+                        if self.errorok:
+                            # User must have done some kind of panic
+                            # mode recovery on their own.  The
+                            # returned token is the next lookahead
+                            lookahead = tok
+                            errtoken = None
+                            continue
+                    else:
+                        if errtoken:
+                            if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
+                            else: lineno = 0
+                            if lineno:
+                                sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
+                            else:
+                                sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
+                        else:
+                            sys.stderr.write("yacc: Parse error in input. EOF\n")
+                            return
+
+                else:
+                    errorcount = error_count
+
+                # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
+                # entire parse has been rolled back and we're completely hosed.   The token is
+                # discarded and we just keep going.
+
+                if len(statestack) <= 1 and lookahead.type != '$end':
+                    lookahead = None
+                    errtoken = None
+                    state = 0
+                    # Nuke the pushback stack
+                    del lookaheadstack[:]
+                    continue
+
+                # case 2: the statestack has a couple of entries on it, but we're
+                # at the end of the file. nuke the top entry and generate an error token
+
+                # Start nuking entries on the stack
+                if lookahead.type == '$end':
+                    # Whoa. We're really hosed here. Bail out
+                    return
+
+                if lookahead.type != 'error':
+                    sym = symstack[-1]
+                    if sym.type == 'error':
+                        # Hmmm. Error is on top of stack, we'll just nuke input
+                        # symbol and continue
+                        lookahead = None
+                        continue
+                    t = YaccSymbol()
+                    t.type = 'error'
+                    if hasattr(lookahead,"lineno"):
+                        t.lineno = lookahead.lineno
+                    t.value = lookahead
+                    lookaheadstack.append(lookahead)
+                    lookahead = t
+                else:
+                    symstack.pop()
+                    statestack.pop()
+                    state = statestack[-1]       # Potential bug fix
+
+                continue
+
+            # Call an error function here
+            raise RuntimeError("yacc: internal parser error!!!\n")
+
+# -----------------------------------------------------------------------------
+#                          === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar. 
+# -----------------------------------------------------------------------------
+
+import re
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# A grammar rule refers to a specification such as this:
+#
+#       expr : expr PLUS term 
+#
+# Here are the basic attributes defined on all productions
+#
+#       name     - Name of the production.  For example 'expr'
+#       prod     - A list of symbols on the right side ['expr','PLUS','term']
+#       prec     - Production precedence level
+#       number   - Production number.
+#       func     - Function that executes on reduce
+#       file     - File where production function is defined
+#       lineno   - Line number where production function is defined
+#
+# The following attributes are defined or optional.
+#
+#       len       - Length of the production (number of symbols on right hand side)
+#       usyms     - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+    reduced = 0
+    def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
+        self.name     = name
+        self.prod     = tuple(prod)
+        self.number   = number
+        self.func     = func
+        self.callable = None
+        self.file     = file
+        self.line     = line
+        self.prec     = precedence
+
+        # Internal settings used during table construction
+        
+        self.len  = len(self.prod)   # Length of the production
+
+        # Create a list of unique production symbols used in the production
+        self.usyms = [ ]             
+        for s in self.prod:
+            if s not in self.usyms:
+                self.usyms.append(s)
+
+        # List of all LR items for the production
+        self.lr_items = []
+        self.lr_next = None
+
+        # Create a string representation
+        if self.prod:
+            self.str = "%s -> %s" % (self.name," ".join(self.prod))
+        else:
+            self.str = "%s -> <empty>" % self.name
+
+    def __str__(self):
+        return self.str
+
+    def __repr__(self):
+        return "Production("+str(self)+")"
+
+    def __len__(self):
+        return len(self.prod)
+
+    def __nonzero__(self):
+        return 1
+
+    def __getitem__(self,index):
+        return self.prod[index]
+            
+    # Return the nth lr_item from the production (or None if at the end)
+    def lr_item(self,n):
+        if n > len(self.prod): return None
+        p = LRItem(self,n)
+
+        # Precompute the list of productions immediately following.  Hack. Remove later
+        try:
+            p.lr_after = Prodnames[p.prod[n+1]]
+        except (IndexError,KeyError):
+            p.lr_after = []
+        try:
+            p.lr_before = p.prod[n-1]
+        except IndexError:
+            p.lr_before = None
+
+        return p
+    
+    # Bind the production function name to a callable
+    def bind(self,pdict):
+        if self.func:
+            self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files.   It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+    def __init__(self,str,name,len,func,file,line):
+        self.name     = name
+        self.len      = len
+        self.func     = func
+        self.callable = None
+        self.file     = file
+        self.line     = line
+        self.str      = str
+    def __str__(self):
+        return self.str
+    def __repr__(self):
+        return "MiniProduction(%s)" % self.str
+
+    # Bind the production function name to a callable
+    def bind(self,pdict):
+        if self.func:
+            self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule.  For
+# example: 
+#
+#       expr : expr . PLUS term 
+#
+# In the above, the "." represents the current location of the parse.  Here
+# basic attributes:
+#
+#       name       - Name of the production.  For example 'expr'
+#       prod       - A list of symbols on the right side ['expr','.', 'PLUS','term']
+#       number     - Production number.
+#
+#       lr_next      Next LR item. Example, if we are ' expr -> expr . PLUS term'
+#                    then lr_next refers to 'expr -> expr PLUS . term'
+#       lr_index   - LR item index (location of the ".") in the prod list.
+#       lookaheads - LALR lookahead symbols for this item
+#       len        - Length of the production (number of symbols on right hand side)
+#       lr_after    - List of all productions that immediately follow
+#       lr_before   - Grammar symbol immediately before
+# -----------------------------------------------------------------------------
+
+class LRItem(object):
+    def __init__(self,p,n):
+        self.name       = p.name
+        self.prod       = list(p.prod)
+        self.number     = p.number
+        self.lr_index   = n
+        self.lookaheads = { }
+        self.prod.insert(n,".")
+        self.prod       = tuple(self.prod)
+        self.len        = len(self.prod)
+        self.usyms      = p.usyms
+
+    def __str__(self):
+        if self.prod:
+            s = "%s -> %s" % (self.name," ".join(self.prod))
+        else:
+            s = "%s -> <empty>" % self.name
+        return s
+
+    def __repr__(self):
+        return "LRItem("+str(self)+")"
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols.  Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+    i = len(symbols) - 1
+    while i >= 0:
+        if symbols[i] in terminals:
+            return symbols[i]
+        i -= 1
+    return None
+
+# -----------------------------------------------------------------------------
+#                           === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError): pass
+
+class Grammar(object):
+    def __init__(self,terminals):
+        self.Productions  = [None]  # A list of all of the productions.  The first
+                                    # entry is always reserved for the purpose of
+                                    # building an augmented grammar
+
+        self.Prodnames    = { }     # A dictionary mapping the names of nonterminals to a list of all
+                                    # productions of that nonterminal.
+
+        self.Prodmap      = { }     # A dictionary that is only used to detect duplicate
+                                    # productions.
+
+        self.Terminals    = { }     # A dictionary mapping the names of terminal symbols to a
+                                    # list of the rules where they are used.
+
+        for term in terminals:
+            self.Terminals[term] = []
+
+        self.Terminals['error'] = []
+
+        self.Nonterminals = { }     # A dictionary mapping names of nonterminals to a list
+                                    # of rule numbers where they are used.
+
+        self.First        = { }     # A dictionary of precomputed FIRST(x) symbols
+
+        self.Follow       = { }     # A dictionary of precomputed FOLLOW(x) symbols
+
+        self.Precedence   = { }     # Precedence rules for each terminal. Contains tuples of the
+                                    # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+        self.UsedPrecedence = { }   # Precedence rules that were actually used by the grammer.
+                                    # This is only used to provide error checking and to generate
+                                    # a warning about unused precedence rules.
+
+        self.Start = None           # Starting symbol for the grammar
+
+
+    def __len__(self):
+        return len(self.Productions)
+
+    def __getitem__(self,index):
+        return self.Productions[index]
+
+    # -----------------------------------------------------------------------------
+    # set_precedence()
+    #
+    # Sets the precedence for a given terminal. assoc is the associativity such as
+    # 'left','right', or 'nonassoc'.  level is a numeric level.
+    #
+    # -----------------------------------------------------------------------------
+
+    def set_precedence(self,term,assoc,level):
+        assert self.Productions == [None],"Must call set_precedence() before add_production()"
+        if term in self.Precedence:
+            raise GrammarError("Precedence already specified for terminal '%s'" % term)
+        if assoc not in ['left','right','nonassoc']:
+            raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+        self.Precedence[term] = (assoc,level)
+ 
+    # -----------------------------------------------------------------------------
+    # add_production()
+    #
+    # Given an action function, this function assembles a production rule and
+    # computes its precedence level.
+    #
+    # The production rule is supplied as a list of symbols.   For example,
+    # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+    # symbols ['expr','PLUS','term'].
+    #
+    # Precedence is determined by the precedence of the right-most non-terminal
+    # or the precedence of a terminal specified by %prec.
+    #
+    # A variety of error checks are performed to make sure production symbols
+    # are valid and that %prec is used correctly.
+    # -----------------------------------------------------------------------------
+
+    def add_production(self,prodname,syms,func=None,file='',line=0):
+
+        if prodname in self.Terminals:
+            raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
+        if prodname == 'error':
+            raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
+        if not _is_identifier.match(prodname):
+            raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
+
+        # Look for literal tokens 
+        for n,s in enumerate(syms):
+            if s[0] in "'\"":
+                 try:
+                     c = eval(s)
+                     if (len(c) > 1):
+                          raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
+                     if not c in self.Terminals:
+                          self.Terminals[c] = []
+                     syms[n] = c
+                     continue
+                 except SyntaxError:
+                     pass
+            if not _is_identifier.match(s) and s != '%prec':
+                raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
+        
+        # Determine the precedence level
+        if '%prec' in syms:
+            if syms[-1] == '%prec':
+                raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
+            if syms[-2] != '%prec':
+                raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
+            precname = syms[-1]
+            prodprec = self.Precedence.get(precname,None)
+            if not prodprec:
+                raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
+            else:
+                self.UsedPrecedence[precname] = 1
+            del syms[-2:]     # Drop %prec from the rule
+        else:
+            # If no %prec, precedence is determined by the rightmost terminal symbol
+            precname = rightmost_terminal(syms,self.Terminals)
+            prodprec = self.Precedence.get(precname,('right',0)) 
+            
+        # See if the rule is already in the rulemap
+        map = "%s -> %s" % (prodname,syms)
+        if map in self.Prodmap:
+            m = self.Prodmap[map]
+            raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
+                               "Previous definition at %s:%d" % (m.file, m.line))
+
+        # From this point on, everything is valid.  Create a new Production instance
+        pnumber  = len(self.Productions)
+        if not prodname in self.Nonterminals:
+            self.Nonterminals[prodname] = [ ]
+
+        # Add the production number to Terminals and Nonterminals
+        for t in syms:
+            if t in self.Terminals:
+                self.Terminals[t].append(pnumber)
+            else:
+                if not t in self.Nonterminals:
+                    self.Nonterminals[t] = [ ]
+                self.Nonterminals[t].append(pnumber)
+
+        # Create a production and add it to the list of productions
+        p = Production(pnumber,prodname,syms,prodprec,func,file,line)
+        self.Productions.append(p)
+        self.Prodmap[map] = p
+
+        # Add to the global productions list
+        try:
+            self.Prodnames[prodname].append(p)
+        except KeyError:
+            self.Prodnames[prodname] = [ p ]
+        return 0
+
+    # -----------------------------------------------------------------------------
+    # set_start()
+    #
+    # Sets the starting symbol and creates the augmented grammar.  Production 
+    # rule 0 is S' -> start where start is the start symbol.
+    # -----------------------------------------------------------------------------
+
+    def set_start(self,start=None):
+        if not start:
+            start = self.Productions[1].name
+        if start not in self.Nonterminals:
+            raise GrammarError("start symbol %s undefined" % start)
+        self.Productions[0] = Production(0,"S'",[start])
+        self.Nonterminals[start].append(0)
+        self.Start = start
+
+    # -----------------------------------------------------------------------------
+    # find_unreachable()
+    #
+    # Find all of the nonterminal symbols that can't be reached from the starting
+    # symbol.  Returns a list of nonterminals that can't be reached.
+    # -----------------------------------------------------------------------------
+
+    def find_unreachable(self):
+        
+        # Mark all symbols that are reachable from a symbol s
+        def mark_reachable_from(s):
+            if reachable[s]:
+                # We've already reached symbol s.
+                return
+            reachable[s] = 1
+            for p in self.Prodnames.get(s,[]):
+                for r in p.prod:
+                    mark_reachable_from(r)
+
+        reachable   = { }
+        for s in list(self.Terminals) + list(self.Nonterminals):
+            reachable[s] = 0
+
+        mark_reachable_from( self.Productions[0].prod[0] )
+
+        return [s for s in list(self.Nonterminals)
+                        if not reachable[s]]
+    
+    # -----------------------------------------------------------------------------
+    # infinite_cycles()
+    #
+    # This function looks at the various parsing rules and tries to detect
+    # infinite recursion cycles (grammar rules where there is no possible way
+    # to derive a string of only terminals).
+    # -----------------------------------------------------------------------------
+
+    def infinite_cycles(self):
+        terminates = {}
+
+        # Terminals:
+        for t in self.Terminals:
+            terminates[t] = 1
+
+        terminates['$end'] = 1
+
+        # Nonterminals:
+
+        # Initialize to false:
+        for n in self.Nonterminals:
+            terminates[n] = 0
+
+        # Then propagate termination until no change:
+        while 1:
+            some_change = 0
+            for (n,pl) in self.Prodnames.items():
+                # Nonterminal n terminates iff any of its productions terminates.
+                for p in pl:
+                    # Production p terminates iff all of its rhs symbols terminate.
+                    for s in p.prod:
+                        if not terminates[s]:
+                            # The symbol s does not terminate,
+                            # so production p does not terminate.
+                            p_terminates = 0
+                            break
+                    else:
+                        # didn't break from the loop,
+                        # so every symbol s terminates
+                        # so production p terminates.
+                        p_terminates = 1
+
+                    if p_terminates:
+                        # symbol n terminates!
+                        if not terminates[n]:
+                            terminates[n] = 1
+                            some_change = 1
+                        # Don't need to consider any more productions for this n.
+                        break
+
+            if not some_change:
+                break
+
+        infinite = []
+        for (s,term) in terminates.items():
+            if not term:
+                if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+                    # s is used-but-not-defined, and we've already warned of that,
+                    # so it would be overkill to say that it's also non-terminating.
+                    pass
+                else:
+                    infinite.append(s)
+
+        return infinite
+
+
+    # -----------------------------------------------------------------------------
+    # undefined_symbols()
+    #
+    # Find all symbols that were used the grammar, but not defined as tokens or
+    # grammar rules.  Returns a list of tuples (sym, prod) where sym in the symbol
+    # and prod is the production where the symbol was used. 
+    # -----------------------------------------------------------------------------
+    def undefined_symbols(self):
+        result = []
+        for p in self.Productions:
+            if not p: continue
+
+            for s in p.prod:
+                if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+                    result.append((s,p))
+        return result
+
+    # -----------------------------------------------------------------------------
+    # unused_terminals()
+    #
+    # Find all terminals that were defined, but not used by the grammar.  Returns
+    # a list of all symbols.
+    # -----------------------------------------------------------------------------
+    def unused_terminals(self):
+        unused_tok = []
+        for s,v in self.Terminals.items():
+            if s != 'error' and not v:
+                unused_tok.append(s)
+
+        return unused_tok
+
+    # ------------------------------------------------------------------------------
+    # unused_rules()
+    #
+    # Find all grammar rules that were defined,  but not used (maybe not reachable)
+    # Returns a list of productions.
+    # ------------------------------------------------------------------------------
+
+    def unused_rules(self):
+        unused_prod = []
+        for s,v in self.Nonterminals.items():
+            if not v:
+                p = self.Prodnames[s][0]
+                unused_prod.append(p)
+        return unused_prod
+
+    # -----------------------------------------------------------------------------
+    # unused_precedence()
+    #
+    # Returns a list of tuples (term,precedence) corresponding to precedence
+    # rules that were never used by the grammar.  term is the name of the terminal
+    # on which precedence was applied and precedence is a string such as 'left' or
+    # 'right' corresponding to the type of precedence. 
+    # -----------------------------------------------------------------------------
+
+    def unused_precedence(self):
+        unused = []
+        for termname in self.Precedence:
+            if not (termname in self.Terminals or termname in self.UsedPrecedence):
+                unused.append((termname,self.Precedence[termname][0]))
+                
+        return unused
+
+    # -------------------------------------------------------------------------
+    # _first()
+    #
+    # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+    #
+    # During execution of compute_first1, the result may be incomplete.
+    # Afterward (e.g., when called from compute_follow()), it will be complete.
+    # -------------------------------------------------------------------------
+    def _first(self,beta):
+
+        # We are computing First(x1,x2,x3,...,xn)
+        result = [ ]
+        for x in beta:
+            x_produces_empty = 0
+
+            # Add all the non-<empty> symbols of First[x] to the result.
+            for f in self.First[x]:
+                if f == '<empty>':
+                    x_produces_empty = 1
+                else:
+                    if f not in result: result.append(f)
+
+            if x_produces_empty:
+                # We have to consider the next x in beta,
+                # i.e. stay in the loop.
+                pass
+            else:
+                # We don't have to consider any further symbols in beta.
+                break
+        else:
+            # There was no 'break' from the loop,
+            # so x_produces_empty was true for all x in beta,
+            # so beta produces empty as well.
+            result.append('<empty>')
+
+        return result
+
+    # -------------------------------------------------------------------------
+    # compute_first()
+    #
+    # Compute the value of FIRST1(X) for all symbols
+    # -------------------------------------------------------------------------
+    def compute_first(self):
+        if self.First:
+            return self.First
+
+        # Terminals:
+        for t in self.Terminals:
+            self.First[t] = [t]
+
+        self.First['$end'] = ['$end']
+
+        # Nonterminals:
+
+        # Initialize to the empty set:
+        for n in self.Nonterminals:
+            self.First[n] = []
+
+        # Then propagate symbols until no change:
+        while 1:
+            some_change = 0
+            for n in self.Nonterminals:
+                for p in self.Prodnames[n]:
+                    for f in self._first(p.prod):
+                        if f not in self.First[n]:
+                            self.First[n].append( f )
+                            some_change = 1
+            if not some_change:
+                break
+        
+        return self.First
+
+    # ---------------------------------------------------------------------
+    # compute_follow()
+    #
+    # Computes all of the follow sets for every non-terminal symbol.  The
+    # follow set is the set of all symbols that might follow a given
+    # non-terminal.  See the Dragon book, 2nd Ed. p. 189.
+    # ---------------------------------------------------------------------
+    def compute_follow(self,start=None):
+        # If already computed, return the result
+        if self.Follow:
+            return self.Follow
+
+        # If first sets not computed yet, do that first.
+        if not self.First:
+            self.compute_first()
+
+        # Add '$end' to the follow list of the start symbol
+        for k in self.Nonterminals:
+            self.Follow[k] = [ ]
+
+        if not start:
+            start = self.Productions[1].name
+
+        self.Follow[start] = [ '$end' ]
+
+        while 1:
+            didadd = 0
+            for p in self.Productions[1:]:
+                # Here is the production set
+                for i in range(len(p.prod)):
+                    B = p.prod[i]
+                    if B in self.Nonterminals:
+                        # Okay. We got a non-terminal in a production
+                        fst = self._first(p.prod[i+1:])
+                        hasempty = 0
+                        for f in fst:
+                            if f != '<empty>' and f not in self.Follow[B]:
+                                self.Follow[B].append(f)
+                                didadd = 1
+                            if f == '<empty>':
+                                hasempty = 1
+                        if hasempty or i == (len(p.prod)-1):
+                            # Add elements of follow(a) to follow(b)
+                            for f in self.Follow[p.name]:
+                                if f not in self.Follow[B]:
+                                    self.Follow[B].append(f)
+                                    didadd = 1
+            if not didadd: break
+        return self.Follow
+
+
+    # -----------------------------------------------------------------------------
+    # build_lritems()
+    #
+    # This function walks the list of productions and builds a complete set of the
+    # LR items.  The LR items are stored in two ways:  First, they are uniquely
+    # numbered and placed in the list _lritems.  Second, a linked list of LR items
+    # is built for each production.  For example:
+    #
+    #   E -> E PLUS E
+    #
+    # Creates the list
+    #
+    #  [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+    # -----------------------------------------------------------------------------
+
+    def build_lritems(self):
+        for p in self.Productions:
+            lastlri = p
+            i = 0
+            lr_items = []
+            while 1:
+                if i > len(p):
+                    lri = None
+                else:
+                    lri = LRItem(p,i)
+                    # Precompute the list of productions immediately following
+                    try:
+                        lri.lr_after = self.Prodnames[lri.prod[i+1]]
+                    except (IndexError,KeyError):
+                        lri.lr_after = []
+                    try:
+                        lri.lr_before = lri.prod[i-1]
+                    except IndexError:
+                        lri.lr_before = None
+
+                lastlri.lr_next = lri
+                if not lri: break
+                lr_items.append(lri)
+                lastlri = lri
+                i += 1
+            p.lr_items = lr_items
+
+# -----------------------------------------------------------------------------
+#                            == Class LRTable ==
+#
+# This basic class represents a basic table of LR parsing information.  
+# Methods for generating the tables are not defined here.  They are defined
+# in the derived class LRGeneratedTable.
+# -----------------------------------------------------------------------------
+
+class VersionError(YaccError): pass
+
+class LRTable(object):
+    def __init__(self):
+        self.lr_action = None
+        self.lr_goto = None
+        self.lr_productions = None
+        self.lr_method = None
+
+    def read_table(self,module):
+        if isinstance(module,types.ModuleType):
+            parsetab = module
+        else:
+            if sys.version_info[0] < 3:
+                exec("import %s as parsetab" % module)
+            else:
+                env = { }
+                exec("import %s as parsetab" % module, env, env)
+                parsetab = env['parsetab']
+
+        if parsetab._tabversion != __tabversion__:
+            raise VersionError("yacc table file version is out of date")
+
+        self.lr_action = parsetab._lr_action
+        self.lr_goto = parsetab._lr_goto
+
+        self.lr_productions = []
+        for p in parsetab._lr_productions:
+            self.lr_productions.append(MiniProduction(*p))
+
+        self.lr_method = parsetab._lr_method
+        return parsetab._lr_signature
+
+    def read_pickle(self,filename):
+        try:
+            import cPickle as pickle
+        except ImportError:
+            import pickle
+
+        in_f = open(filename,"rb")
+
+        tabversion = pickle.load(in_f)
+        if tabversion != __tabversion__:
+            raise VersionError("yacc table file version is out of date")
+        self.lr_method = pickle.load(in_f)
+        signature      = pickle.load(in_f)
+        self.lr_action = pickle.load(in_f)
+        self.lr_goto   = pickle.load(in_f)
+        productions    = pickle.load(in_f)
+
+        self.lr_productions = []
+        for p in productions:
+            self.lr_productions.append(MiniProduction(*p))
+
+        in_f.close()
+        return signature
+
+    # Bind all production function names to callable objects in pdict
+    def bind_callables(self,pdict):
+        for p in self.lr_productions:
+            p.bind(pdict)
+    
+# -----------------------------------------------------------------------------
+#                           === LR Generator ===
+#
+# The following classes and functions are used to generate LR parsing tables on 
+# a grammar.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+#     F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs:  X    - An input set
+#          R    - A relation
+#          FP   - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X,R,FP):
+    N = { }
+    for x in X:
+       N[x] = 0
+    stack = []
+    F = { }
+    for x in X:
+        if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
+    return F
+
+def traverse(x,N,stack,F,X,R,FP):
+    stack.append(x)
+    d = len(stack)
+    N[x] = d
+    F[x] = FP(x)             # F(X) <- F'(x)
+
+    rel = R(x)               # Get y's related to x
+    for y in rel:
+        if N[y] == 0:
+             traverse(y,N,stack,F,X,R,FP)
+        N[x] = min(N[x],N[y])
+        for a in F.get(y,[]):
+            if a not in F[x]: F[x].append(a)
+    if N[x] == d:
+       N[stack[-1]] = MAXINT
+       F[stack[-1]] = F[x]
+       element = stack.pop()
+       while element != x:
+           N[stack[-1]] = MAXINT
+           F[stack[-1]] = F[x]
+           element = stack.pop()
+
+class LALRError(YaccError): pass
+
+# -----------------------------------------------------------------------------
+#                             == LRGeneratedTable ==
+#
+# This class implements the LR table generation algorithm.  There are no
+# public methods except for write()
+# -----------------------------------------------------------------------------
+
+class LRGeneratedTable(LRTable):
+    def __init__(self,grammar,method='LALR',log=None):
+        if method not in ['SLR','LALR']:
+            raise LALRError("Unsupported method %s" % method)
+
+        self.grammar = grammar
+        self.lr_method = method
+
+        # Set up the logger
+        if not log:
+            log = NullLogger()
+        self.log = log
+
+        # Internal attributes
+        self.lr_action     = {}        # Action table
+        self.lr_goto       = {}        # Goto table
+        self.lr_productions  = grammar.Productions    # Copy of grammar Production array
+        self.lr_goto_cache = {}        # Cache of computed gotos
+        self.lr0_cidhash   = {}        # Cache of closures
+
+        self._add_count    = 0         # Internal counter used to detect cycles
+
+        # Diagonistic information filled in by the table generator
+        self.sr_conflict   = 0
+        self.rr_conflict   = 0
+        self.conflicts     = []        # List of conflicts
+
+        self.sr_conflicts  = []
+        self.rr_conflicts  = []
+
+        # Build the tables
+        self.grammar.build_lritems()
+        self.grammar.compute_first()
+        self.grammar.compute_follow()
+        self.lr_parse_table()
+
+    # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+    def lr0_closure(self,I):
+        self._add_count += 1
+
+        # Add everything in I to J
+        J = I[:]
+        didadd = 1
+        while didadd:
+            didadd = 0
+            for j in J:
+                for x in j.lr_after:
+                    if getattr(x,"lr0_added",0) == self._add_count: continue
+                    # Add B --> .G to J
+                    J.append(x.lr_next)
+                    x.lr0_added = self._add_count
+                    didadd = 1
+
+        return J
+
+    # Compute the LR(0) goto function goto(I,X) where I is a set
+    # of LR(0) items and X is a grammar symbol.   This function is written
+    # in a way that guarantees uniqueness of the generated goto sets
+    # (i.e. the same goto set will never be returned as two different Python
+    # objects).  With uniqueness, we can later do fast set comparisons using
+    # id(obj) instead of element-wise comparison.
+
+    def lr0_goto(self,I,x):
+        # First we look for a previously cached entry
+        g = self.lr_goto_cache.get((id(I),x),None)
+        if g: return g
+
+        # Now we generate the goto set in a way that guarantees uniqueness
+        # of the result
+
+        s = self.lr_goto_cache.get(x,None)
+        if not s:
+            s = { }
+            self.lr_goto_cache[x] = s
+
+        gs = [ ]
+        for p in I:
+            n = p.lr_next
+            if n and n.lr_before == x:
+                s1 = s.get(id(n),None)
+                if not s1:
+                    s1 = { }
+                    s[id(n)] = s1
+                gs.append(n)
+                s = s1
+        g = s.get('$end',None)
+        if not g:
+            if gs:
+                g = self.lr0_closure(gs)
+                s['$end'] = g
+            else:
+                s['$end'] = gs
+        self.lr_goto_cache[(id(I),x)] = g
+        return g
+
+    # Compute the LR(0) sets of item function
+    def lr0_items(self):
+
+        C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
+        i = 0
+        for I in C:
+            self.lr0_cidhash[id(I)] = i
+            i += 1
+
+        # Loop over the items in C and each grammar symbols
+        i = 0
+        while i < len(C):
+            I = C[i]
+            i += 1
+
+            # Collect all of the symbols that could possibly be in the goto(I,X) sets
+            asyms = { }
+            for ii in I:
+                for s in ii.usyms:
+                    asyms[s] = None
+
+            for x in asyms:
+                g = self.lr0_goto(I,x)
+                if not g:  continue
+                if id(g) in self.lr0_cidhash: continue
+                self.lr0_cidhash[id(g)] = len(C)
+                C.append(g)
+
+        return C
+
+    # -----------------------------------------------------------------------------
+    #                       ==== LALR(1) Parsing ====
+    #
+    # LALR(1) parsing is almost exactly the same as SLR except that instead of
+    # relying upon Follow() sets when performing reductions, a more selective
+    # lookahead set that incorporates the state of the LR(0) machine is utilized.
+    # Thus, we mainly just have to focus on calculating the lookahead sets.
+    #
+    # The method used here is due to DeRemer and Pennelo (1982).
+    #
+    # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+    #     Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+    #     Vol. 4, No. 4, Oct. 1982, pp. 615-649
+    #
+    # Further details can also be found in:
+    #
+    #  J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+    #      McGraw-Hill Book Company, (1985).
+    #
+    # -----------------------------------------------------------------------------
+
+    # -----------------------------------------------------------------------------
+    # compute_nullable_nonterminals()
+    #
+    # Creates a dictionary containing all of the non-terminals that might produce
+    # an empty production.
+    # -----------------------------------------------------------------------------
+
+    def compute_nullable_nonterminals(self):
+        nullable = {}
+        num_nullable = 0
+        while 1:
+           for p in self.grammar.Productions[1:]:
+               if p.len == 0:
+                    nullable[p.name] = 1
+                    continue
+               for t in p.prod:
+                    if not t in nullable: break
+               else:
+                    nullable[p.name] = 1
+           if len(nullable) == num_nullable: break
+           num_nullable = len(nullable)
+        return nullable
+
+    # -----------------------------------------------------------------------------
+    # find_nonterminal_trans(C)
+    #
+    # Given a set of LR(0) items, this functions finds all of the non-terminal
+    # transitions.    These are transitions in which a dot appears immediately before
+    # a non-terminal.   Returns a list of tuples of the form (state,N) where state
+    # is the state number and N is the nonterminal symbol.
+    #
+    # The input C is the set of LR(0) items.
+    # -----------------------------------------------------------------------------
+
+    def find_nonterminal_transitions(self,C):
+         trans = []
+         for state in range(len(C)):
+             for p in C[state]:
+                 if p.lr_index < p.len - 1:
+                      t = (state,p.prod[p.lr_index+1])
+                      if t[1] in self.grammar.Nonterminals:
+                            if t not in trans: trans.append(t)
+             state = state + 1
+         return trans
+
+    # -----------------------------------------------------------------------------
+    # dr_relation()
+    #
+    # Computes the DR(p,A) relationships for non-terminal transitions.  The input
+    # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+    #
+    # Returns a list of terminals.
+    # -----------------------------------------------------------------------------
+
+    def dr_relation(self,C,trans,nullable):
+        dr_set = { }
+        state,N = trans
+        terms = []
+
+        g = self.lr0_goto(C[state],N)
+        for p in g:
+           if p.lr_index < p.len - 1:
+               a = p.prod[p.lr_index+1]
+               if a in self.grammar.Terminals:
+                   if a not in terms: terms.append(a)
+
+        # This extra bit is to handle the start state
+        if state == 0 and N == self.grammar.Productions[0].prod[0]:
+           terms.append('$end')
+
+        return terms
+
+    # -----------------------------------------------------------------------------
+    # reads_relation()
+    #
+    # Computes the READS() relation (p,A) READS (t,C).
+    # -----------------------------------------------------------------------------
+
+    def reads_relation(self,C, trans, empty):
+        # Look for empty transitions
+        rel = []
+        state, N = trans
+
+        g = self.lr0_goto(C[state],N)
+        j = self.lr0_cidhash.get(id(g),-1)
+        for p in g:
+            if p.lr_index < p.len - 1:
+                 a = p.prod[p.lr_index + 1]
+                 if a in empty:
+                      rel.append((j,a))
+
+        return rel
+
+    # -----------------------------------------------------------------------------
+    # compute_lookback_includes()
+    #
+    # Determines the lookback and includes relations
+    #
+    # LOOKBACK:
+    #
+    # This relation is determined by running the LR(0) state machine forward.
+    # For example, starting with a production "N : . A B C", we run it forward
+    # to obtain "N : A B C ."   We then build a relationship between this final
+    # state and the starting state.   These relationships are stored in a dictionary
+    # lookdict.
+    #
+    # INCLUDES:
+    #
+    # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+    #
+    # This relation is used to determine non-terminal transitions that occur
+    # inside of other non-terminal transition states.   (p,A) INCLUDES (p', B)
+    # if the following holds:
+    #
+    #       B -> LAT, where T -> epsilon and p' -L-> p
+    #
+    # L is essentially a prefix (which may be empty), T is a suffix that must be
+    # able to derive an empty string.  State p' must lead to state p with the string L.
+    #
+    # -----------------------------------------------------------------------------
+
+    def compute_lookback_includes(self,C,trans,nullable):
+
+        lookdict = {}          # Dictionary of lookback relations
+        includedict = {}       # Dictionary of include relations
+
+        # Make a dictionary of non-terminal transitions
+        dtrans = {}
+        for t in trans:
+            dtrans[t] = 1
+
+        # Loop over all transitions and compute lookbacks and includes
+        for state,N in trans:
+            lookb = []
+            includes = []
+            for p in C[state]:
+                if p.name != N: continue
+
+                # Okay, we have a name match.  We now follow the production all the way
+                # through the state machine until we get the . on the right hand side
+
+                lr_index = p.lr_index
+                j = state
+                while lr_index < p.len - 1:
+                     lr_index = lr_index + 1
+                     t = p.prod[lr_index]
+
+                     # Check to see if this symbol and state are a non-terminal transition
+                     if (j,t) in dtrans:
+                           # Yes.  Okay, there is some chance that this is an includes relation
+                           # the only way to know for certain is whether the rest of the
+                           # production derives empty
+
+                           li = lr_index + 1
+                           while li < p.len:
+                                if p.prod[li] in self.grammar.Terminals: break      # No forget it
+                                if not p.prod[li] in nullable: break
+                                li = li + 1
+                           else:
+                                # Appears to be a relation between (j,t) and (state,N)
+                                includes.append((j,t))
+
+                     g = self.lr0_goto(C[j],t)               # Go to next set
+                     j = self.lr0_cidhash.get(id(g),-1)     # Go to next state
+
+                # When we get here, j is the final state, now we have to locate the production
+                for r in C[j]:
+                     if r.name != p.name: continue
+                     if r.len != p.len:   continue
+                     i = 0
+                     # This look is comparing a production ". A B C" with "A B C ."
+                     while i < r.lr_index:
+                          if r.prod[i] != p.prod[i+1]: break
+                          i = i + 1
+                     else:
+                          lookb.append((j,r))
+            for i in includes:
+                 if not i in includedict: includedict[i] = []
+                 includedict[i].append((state,N))
+            lookdict[(state,N)] = lookb
+
+        return lookdict,includedict
+
+    # -----------------------------------------------------------------------------
+    # compute_read_sets()
+    #
+    # Given a set of LR(0) items, this function computes the read sets.
+    #
+    # Inputs:  C        =  Set of LR(0) items
+    #          ntrans   = Set of nonterminal transitions
+    #          nullable = Set of empty transitions
+    #
+    # Returns a set containing the read sets
+    # -----------------------------------------------------------------------------
+
+    def compute_read_sets(self,C, ntrans, nullable):
+        FP = lambda x: self.dr_relation(C,x,nullable)
+        R =  lambda x: self.reads_relation(C,x,nullable)
+        F = digraph(ntrans,R,FP)
+        return F
+
+    # -----------------------------------------------------------------------------
+    # compute_follow_sets()
+    #
+    # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+    # and an include set, this function computes the follow sets
+    #
+    # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+    #
+    # Inputs:
+    #            ntrans     = Set of nonterminal transitions
+    #            readsets   = Readset (previously computed)
+    #            inclsets   = Include sets (previously computed)
+    #
+    # Returns a set containing the follow sets
+    # -----------------------------------------------------------------------------
+
+    def compute_follow_sets(self,ntrans,readsets,inclsets):
+         FP = lambda x: readsets[x]
+         R  = lambda x: inclsets.get(x,[])
+         F = digraph(ntrans,R,FP)
+         return F
+
+    # -----------------------------------------------------------------------------
+    # add_lookaheads()
+    #
+    # Attaches the lookahead symbols to grammar rules.
+    #
+    # Inputs:    lookbacks         -  Set of lookback relations
+    #            followset         -  Computed follow set
+    #
+    # This function directly attaches the lookaheads to productions contained
+    # in the lookbacks set
+    # -----------------------------------------------------------------------------
+
+    def add_lookaheads(self,lookbacks,followset):
+        for trans,lb in lookbacks.items():
+            # Loop over productions in lookback
+            for state,p in lb:
+                 if not state in p.lookaheads:
+                      p.lookaheads[state] = []
+                 f = followset.get(trans,[])
+                 for a in f:
+                      if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+    # -----------------------------------------------------------------------------
+    # add_lalr_lookaheads()
+    #
+    # This function does all of the work of adding lookahead information for use
+    # with LALR parsing
+    # -----------------------------------------------------------------------------
+
+    def add_lalr_lookaheads(self,C):
+        # Determine all of the nullable nonterminals
+        nullable = self.compute_nullable_nonterminals()
+
+        # Find all non-terminal transitions
+        trans = self.find_nonterminal_transitions(C)
+
+        # Compute read sets
+        readsets = self.compute_read_sets(C,trans,nullable)
+
+        # Compute lookback/includes relations
+        lookd, included = self.compute_lookback_includes(C,trans,nullable)
+
+        # Compute LALR FOLLOW sets
+        followsets = self.compute_follow_sets(trans,readsets,included)
+
+        # Add all of the lookaheads
+        self.add_lookaheads(lookd,followsets)
+
+    # -----------------------------------------------------------------------------
+    # lr_parse_table()
+    #
+    # This function constructs the parse tables for SLR or LALR
+    # -----------------------------------------------------------------------------
+    def lr_parse_table(self):
+        Productions = self.grammar.Productions
+        Precedence  = self.grammar.Precedence
+        goto   = self.lr_goto         # Goto array
+        action = self.lr_action       # Action array
+        log    = self.log             # Logger for output
+
+        actionp = { }                 # Action production array (temporary)
+        
+        log.info("Parsing method: %s", self.lr_method)
+
+        # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+        # This determines the number of states
+
+        C = self.lr0_items()
+
+        if self.lr_method == 'LALR':
+            self.add_lalr_lookaheads(C)
+
+        # Build the parser table, state by state
+        st = 0
+        for I in C:
+            # Loop over each production in I
+            actlist = [ ]              # List of actions
+            st_action  = { }
+            st_actionp = { }
+            st_goto    = { }
+            log.info("")
+            log.info("state %d", st)
+            log.info("")
+            for p in I:
+                log.info("    (%d) %s", p.number, str(p))
+            log.info("")
+
+            for p in I:
+                    if p.len == p.lr_index + 1:
+                        if p.name == "S'":
+                            # Start symbol. Accept!
+                            st_action["$end"] = 0
+                            st_actionp["$end"] = p
+                        else:
+                            # We are at the end of a production.  Reduce!
+                            if self.lr_method == 'LALR':
+                                laheads = p.lookaheads[st]
+                            else:
+                                laheads = self.grammar.Follow[p.name]
+                            for a in laheads:
+                                actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+                                r = st_action.get(a,None)
+                                if r is not None:
+                                    # Whoa. Have a shift/reduce or reduce/reduce conflict
+                                    if r > 0:
+                                        # Need to decide on shift or reduce here
+                                        # By default we favor shifting. Need to add
+                                        # some precedence rules here.
+                                        sprec,slevel = Productions[st_actionp[a].number].prec
+                                        rprec,rlevel = Precedence.get(a,('right',0))
+                                        if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+                                            # We really need to reduce here.
+                                            st_action[a] = -p.number
+                                            st_actionp[a] = p
+                                            if not slevel and not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as reduce",a)
+                                                self.sr_conflicts.append((st,a,'reduce'))
+                                            Productions[p.number].reduced += 1
+                                        elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                            st_action[a] = None
+                                        else:
+                                            # Hmmm. Guess we'll keep the shift
+                                            if not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as shift",a)
+                                                self.sr_conflicts.append((st,a,'shift'))
+                                    elif r < 0:
+                                        # Reduce/reduce conflict.   In this case, we favor the rule
+                                        # that was defined first in the grammar file
+                                        oldp = Productions[-r]
+                                        pp = Productions[p.number]
+                                        if oldp.line > pp.line:
+                                            st_action[a] = -p.number
+                                            st_actionp[a] = p
+                                            chosenp,rejectp = pp,oldp
+                                            Productions[p.number].reduced += 1
+                                            Productions[oldp.number].reduced -= 1
+                                        else:
+                                            chosenp,rejectp = oldp,pp
+                                        self.rr_conflicts.append((st,chosenp,rejectp))
+                                        log.info("  ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
+                                    else:
+                                        raise LALRError("Unknown conflict in state %d" % st)
+                                else:
+                                    st_action[a] = -p.number
+                                    st_actionp[a] = p
+                                    Productions[p.number].reduced += 1
+                    else:
+                        i = p.lr_index
+                        a = p.prod[i+1]       # Get symbol right after the "."
+                        if a in self.grammar.Terminals:
+                            g = self.lr0_goto(I,a)
+                            j = self.lr0_cidhash.get(id(g),-1)
+                            if j >= 0:
+                                # We are in a shift state
+                                actlist.append((a,p,"shift and go to state %d" % j))
+                                r = st_action.get(a,None)
+                                if r is not None:
+                                    # Whoa have a shift/reduce or shift/shift conflict
+                                    if r > 0:
+                                        if r != j:
+                                            raise LALRError("Shift/shift conflict in state %d" % st)
+                                    elif r < 0:
+                                        # Do a precedence check.
+                                        #   -  if precedence of reduce rule is higher, we reduce.
+                                        #   -  if precedence of reduce is same and left assoc, we reduce.
+                                        #   -  otherwise we shift
+                                        rprec,rlevel = Productions[st_actionp[a].number].prec
+                                        sprec,slevel = Precedence.get(a,('right',0))
+                                        if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+                                            # We decide to shift here... highest precedence to shift
+                                            Productions[st_actionp[a].number].reduced -= 1
+                                            st_action[a] = j
+                                            st_actionp[a] = p
+                                            if not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as shift",a)
+                                                self.sr_conflicts.append((st,a,'shift'))
+                                        elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                            st_action[a] = None
+                                        else:
+                                            # Hmmm. Guess we'll keep the reduce
+                                            if not slevel and not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as reduce",a)
+                                                self.sr_conflicts.append((st,a,'reduce'))
+
+                                    else:
+                                        raise LALRError("Unknown conflict in state %d" % st)
+                                else:
+                                    st_action[a] = j
+                                    st_actionp[a] = p
+
+            # Print the actions associated with each terminal
+            _actprint = { }
+            for a,p,m in actlist:
+                if a in st_action:
+                    if p is st_actionp[a]:
+                        log.info("    %-15s %s",a,m)
+                        _actprint[(a,m)] = 1
+            log.info("")
+            # Print the actions that were not used. (debugging)
+            not_used = 0
+            for a,p,m in actlist:
+                if a in st_action:
+                    if p is not st_actionp[a]:
+                        if not (a,m) in _actprint:
+                            log.debug("  ! %-15s [ %s ]",a,m)
+                            not_used = 1
+                            _actprint[(a,m)] = 1
+            if not_used:
+                log.debug("")
+
+            # Construct the goto table for this state
+
+            nkeys = { }
+            for ii in I:
+                for s in ii.usyms:
+                    if s in self.grammar.Nonterminals:
+                        nkeys[s] = None
+            for n in nkeys:
+                g = self.lr0_goto(I,n)
+                j = self.lr0_cidhash.get(id(g),-1)
+                if j >= 0:
+                    st_goto[n] = j
+                    log.info("    %-30s shift and go to state %d",n,j)
+
+            action[st] = st_action
+            actionp[st] = st_actionp
+            goto[st] = st_goto
+            st += 1
+
+
+    # -----------------------------------------------------------------------------
+    # write()
+    #
+    # This function writes the LR parsing tables to a file
+    # -----------------------------------------------------------------------------
+
+    def write_table(self,modulename,outputdir='',signature=""):
+        basemodulename = modulename.split(".")[-1]
+        filename = os.path.join(outputdir,basemodulename) + ".py"
+        try:
+            f = open(filename,"w")
+
+            f.write("""
+# %s
+# This file is automatically generated. Do not edit.
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+    """ % (filename, __tabversion__, self.lr_method, signature))
+
+            # Change smaller to 0 to go back to original tables
+            smaller = 1
+
+            # Factor out names to try and make smaller
+            if smaller:
+                items = { }
+
+                for s,nd in self.lr_action.items():
+                   for name,v in nd.items():
+                      i = items.get(name)
+                      if not i:
+                         i = ([],[])
+                         items[name] = i
+                      i[0].append(s)
+                      i[1].append(v)
+
+                f.write("\n_lr_action_items = {")
+                for k,v in items.items():
+                    f.write("%r:([" % k)
+                    for i in v[0]:
+                        f.write("%r," % i)
+                    f.write("],[")
+                    for i in v[1]:
+                        f.write("%r," % i)
+
+                    f.write("]),")
+                f.write("}\n")
+
+                f.write("""
+_lr_action = { }
+for _k, _v in _lr_action_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+      if not _x in _lr_action:  _lr_action[_x] = { }
+      _lr_action[_x][_k] = _y
+del _lr_action_items
+""")
+
+            else:
+                f.write("\n_lr_action = { ");
+                for k,v in self.lr_action.items():
+                    f.write("(%r,%r):%r," % (k[0],k[1],v))
+                f.write("}\n");
+
+            if smaller:
+                # Factor out names to try and make smaller
+                items = { }
+
+                for s,nd in self.lr_goto.items():
+                   for name,v in nd.items():
+                      i = items.get(name)
+                      if not i:
+                         i = ([],[])
+                         items[name] = i
+                      i[0].append(s)
+                      i[1].append(v)
+
+                f.write("\n_lr_goto_items = {")
+                for k,v in items.items():
+                    f.write("%r:([" % k)
+                    for i in v[0]:
+                        f.write("%r," % i)
+                    f.write("],[")
+                    for i in v[1]:
+                        f.write("%r," % i)
+
+                    f.write("]),")
+                f.write("}\n")
+
+                f.write("""
+_lr_goto = { }
+for _k, _v in _lr_goto_items.items():
+   for _x,_y in zip(_v[0],_v[1]):
+       if not _x in _lr_goto: _lr_goto[_x] = { }
+       _lr_goto[_x][_k] = _y
+del _lr_goto_items
+""")
+            else:
+                f.write("\n_lr_goto = { ");
+                for k,v in self.lr_goto.items():
+                    f.write("(%r,%r):%r," % (k[0],k[1],v))
+                f.write("}\n");
+
+            # Write production table
+            f.write("_lr_productions = [\n")
+            for p in self.lr_productions:
+                if p.func:
+                    f.write("  (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
+                else:
+                    f.write("  (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
+            f.write("]\n")
+            f.close()
+
+        except IOError:
+            e = sys.exc_info()[1]
+            sys.stderr.write("Unable to create '%s'\n" % filename)
+            sys.stderr.write(str(e)+"\n")
+            return
+
+
+    # -----------------------------------------------------------------------------
+    # pickle_table()
+    #
+    # This function pickles the LR parsing tables to a supplied file object
+    # -----------------------------------------------------------------------------
+
+    def pickle_table(self,filename,signature=""):
+        try:
+            import cPickle as pickle
+        except ImportError:
+            import pickle
+        outf = open(filename,"wb")
+        pickle.dump(__tabversion__,outf,pickle_protocol)
+        pickle.dump(self.lr_method,outf,pickle_protocol)
+        pickle.dump(signature,outf,pickle_protocol)
+        pickle.dump(self.lr_action,outf,pickle_protocol)
+        pickle.dump(self.lr_goto,outf,pickle_protocol)
+
+        outp = []
+        for p in self.lr_productions:
+            if p.func:
+                outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
+            else:
+                outp.append((str(p),p.name,p.len,None,None,None))
+        pickle.dump(outp,outf,pickle_protocol)
+        outf.close()
+
+# -----------------------------------------------------------------------------
+#                            === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack.  This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+    try:
+        raise RuntimeError
+    except RuntimeError:
+        e,b,t = sys.exc_info()
+        f = t.tb_frame
+        while levels > 0:
+            f = f.f_back                   
+            levels -= 1
+        ldict = f.f_globals.copy()
+        if f.f_globals != f.f_locals:
+            ldict.update(f.f_locals)
+
+        return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc,file,line):
+    grammar = []
+    # Split the doc string into lines
+    pstrings = doc.splitlines()
+    lastp = None
+    dline = line
+    for ps in pstrings:
+        dline += 1
+        p = ps.split()
+        if not p: continue
+        try:
+            if p[0] == '|':
+                # This is a continuation of a previous rule
+                if not lastp:
+                    raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
+                prodname = lastp
+                syms = p[1:]
+            else:
+                prodname = p[0]
+                lastp = prodname
+                syms   = p[2:]
+                assign = p[1]
+                if assign != ':' and assign != '::=':
+                    raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
+
+            grammar.append((file,dline,prodname,syms))
+        except SyntaxError:
+            raise
+        except Exception:
+            raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
+
+    return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+    def __init__(self,pdict,log=None):
+        self.pdict      = pdict