Bug 1024056 - Simple ASCII lexical analyzer. r=nfroyd
authorHonza Bambas <honzab.moz@firemni.cz>
Mon, 27 Jul 2015 05:07:00 -0400
changeset 254759 397fd842ab54edaa4dd51c3fd7286dc51f809fdb
parent 254758 df33f085e1a6bdd2faefe155c21c077a3b18c607
child 254770 2ea0af589ebd830f4a4c99ae1d2f46e428801322
push id62843
push userryanvm@gmail.com
push dateMon, 27 Jul 2015 16:07:49 +0000
treeherdermozilla-inbound@397fd842ab54 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersnfroyd
bugs1024056
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1024056 - Simple ASCII lexical analyzer. r=nfroyd
xpcom/ds/Tokenizer.cpp
xpcom/ds/Tokenizer.h
xpcom/ds/moz.build
xpcom/tests/gtest/TestTokenizer.cpp
xpcom/tests/gtest/moz.build
new file mode 100644
--- /dev/null
+++ b/xpcom/ds/Tokenizer.cpp
@@ -0,0 +1,389 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Tokenizer.h"
+
+#include "nsUnicharUtils.h"
+#include "mozilla/CheckedInt.h"
+
+namespace mozilla {
+
+static const char sWhitespaces[] = " \t";
+
+Tokenizer::Tokenizer(const nsACString& aSource)
+  : mPastEof(false)
+  , mHasFailed(false)
+  , mWhitespaces(sWhitespaces)
+{
+  aSource.BeginReading(mCursor);
+  mRecord = mRollback = mCursor;
+  aSource.EndReading(mEnd);
+}
+
+bool
+Tokenizer::Next(Token& aToken)
+{
+  if (!HasInput()) {
+    mHasFailed = true;
+    return false;
+  }
+
+  mRollback = mCursor;
+  mCursor = Parse(aToken);
+  mPastEof = aToken.Type() == TOKEN_EOF;
+  mHasFailed = false;
+  return true;
+}
+
+bool
+Tokenizer::Check(const TokenType aTokenType, Token& aResult)
+{
+  if (!HasInput()) {
+    mHasFailed = true;
+    return false;
+  }
+
+  nsACString::const_char_iterator next = Parse(aResult);
+  if (aTokenType != aResult.Type()) {
+    mHasFailed = true;
+    return false;
+  }
+
+  mRollback = mCursor;
+  mCursor = next;
+  mPastEof = aResult.Type() == TOKEN_EOF;
+  return true;
+}
+
+bool
+Tokenizer::Check(const Token& aToken)
+{
+  if (!HasInput()) {
+    mHasFailed = true;
+    return false;
+  }
+
+  Token parsed;
+  nsACString::const_char_iterator next = Parse(parsed);
+  if (!aToken.Equals(parsed)) {
+    mHasFailed = true;
+    return false;
+  }
+
+  mRollback = mCursor;
+  mCursor = next;
+  mPastEof = parsed.Type() == TOKEN_EOF;
+  return true;
+}
+
+bool
+Tokenizer::HasFailed() const
+{
+  return mHasFailed;
+}
+
+void
+Tokenizer::SkipWhites()
+{
+  if (!CheckWhite()) {
+    return;
+  }
+
+  nsACString::const_char_iterator rollback = mRollback;
+  while (CheckWhite()) {
+  }
+
+  mHasFailed = false;
+  mRollback = rollback;
+}
+
+bool
+Tokenizer::CheckChar(bool (*aClassifier)(const char aChar))
+{
+  if (!aClassifier) {
+    MOZ_ASSERT(false);
+    return false;
+  }
+
+  if (!HasInput() || mCursor == mEnd) {
+    mHasFailed = true;
+    return false;
+  }
+
+  if (!aClassifier(*mCursor)) {
+    mHasFailed = true;
+    return false;
+  }
+
+  mRollback = mCursor;
+  ++mCursor;
+  mHasFailed = false;
+  return true;
+}
+
+void
+Tokenizer::Rollback()
+{
+  MOZ_ASSERT(mCursor > mRollback || mPastEof,
+             "Tokenizer::Rollback() cannot use twice or before any parsing");
+
+  mPastEof = false;
+  mHasFailed = false;
+  mCursor = mRollback;
+}
+
+void
+Tokenizer::Record(ClaimInclusion aInclude)
+{
+  mRecord = aInclude == INCLUDE_LAST
+    ? mRollback
+    : mCursor;
+}
+
+void
+Tokenizer::Claim(nsACString& aResult, ClaimInclusion aInclusion)
+{
+  nsACString::const_char_iterator close = aInclusion == EXCLUDE_LAST
+    ? mRollback
+    : mCursor;
+  aResult.Assign(Substring(mRecord, close));
+}
+
+// protected
+
+bool
+Tokenizer::HasInput() const
+{
+  return !mPastEof;
+}
+
+nsACString::const_char_iterator
+Tokenizer::Parse(Token& aToken) const
+{
+  if (mCursor == mEnd) {
+    aToken = Token::EndOfFile();
+    return mEnd;
+  }
+
+  nsACString::const_char_iterator next = mCursor;
+
+  enum State {
+    PARSE_INTEGER,
+    PARSE_WORD,
+    PARSE_CRLF,
+    PARSE_LF,
+    PARSE_WS,
+    PARSE_CHAR,
+  } state;
+
+  if (IsWordFirst(*next)) {
+    state = PARSE_WORD;
+  } else if (IsNumber(*next)) {
+    state = PARSE_INTEGER;
+  } else if (*next == '\r') {
+    state = PARSE_CRLF;
+  } else if (*next == '\n') {
+    state = PARSE_LF;
+  } else if (strchr(mWhitespaces, *next)) { // not UTF-8 friendly?
+    state = PARSE_WS;
+  } else {
+    state = PARSE_CHAR;
+  }
+
+  mozilla::CheckedInt64 resultingNumber = 0;
+
+  while (next < mEnd) {
+    switch (state) {
+    case PARSE_INTEGER:
+      // Keep it simple for now
+      resultingNumber *= 10;
+      resultingNumber += static_cast<int64_t>(*next - '0');
+
+      ++next;
+      if (IsEnd(next) || !IsNumber(*next)) {
+        if (!resultingNumber.isValid()) {
+          aToken = Token::Error();
+        } else {
+          aToken = Token::Number(resultingNumber.value());
+        }
+        return next;
+      }
+      break;
+
+    case PARSE_WORD:
+      ++next;
+      if (IsEnd(next) || !IsWord(*next)) {
+        aToken = Token::Word(Substring(mCursor, next));
+        return next;
+      }
+      break;
+
+    case PARSE_CRLF:
+      ++next;
+      if (!IsEnd(next) && *next == '\n') { // LF is optional
+        ++next;
+      }
+      aToken = Token::NewLine();
+      return next;
+
+    case PARSE_LF:
+      ++next;
+      aToken = Token::NewLine();
+      return next;
+
+    case PARSE_WS:
+      ++next;
+      aToken = Token::Whitespace();
+      return next;
+
+    case PARSE_CHAR:
+      ++next;
+      aToken = Token::Char(*mCursor);
+      return next;
+    } // switch (state)
+  } // while (next < end)
+
+  return next;
+}
+
+bool
+Tokenizer::IsEnd(const nsACString::const_char_iterator& caret) const
+{
+  return caret == mEnd;
+}
+
+bool
+Tokenizer::IsWordFirst(const char aInput) const
+{
+  // TODO: make this fully work with unicode
+  return (ToLowerCase(static_cast<uint32_t>(aInput)) !=
+          ToUpperCase(static_cast<uint32_t>(aInput))) ||
+          '_' == aInput;
+}
+
+bool
+Tokenizer::IsWord(const char aInput) const
+{
+  return IsWordFirst(aInput) || IsNumber(aInput);
+}
+
+bool
+Tokenizer::IsNumber(const char aInput) const
+{
+  // TODO: are there unicode numbers?
+  return aInput >= '0' && aInput <= '9';
+}
+
+// Tokenizer::Token
+
+// static
+Tokenizer::Token
+Tokenizer::Token::Word(const nsACString& aValue)
+{
+  Token t;
+  t.mType = TOKEN_WORD;
+  t.mWord = aValue;
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::Char(const char aValue)
+{
+  Token t;
+  t.mType = TOKEN_CHAR;
+  t.mChar = aValue;
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::Number(const int64_t aValue)
+{
+  Token t;
+  t.mType = TOKEN_INTEGER;
+  t.mInteger = aValue;
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::Whitespace()
+{
+  Token t;
+  t.mType = TOKEN_WS;
+  t.mChar = '\0';
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::NewLine()
+{
+  Token t;
+  t.mType = TOKEN_EOL;
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::EndOfFile()
+{
+  Token t;
+  t.mType = TOKEN_EOF;
+  return t;
+}
+
+// static
+Tokenizer::Token
+Tokenizer::Token::Error()
+{
+  Token t;
+  t.mType = TOKEN_ERROR;
+  return t;
+}
+
+bool
+Tokenizer::Token::Equals(const Token& aOther) const
+{
+  if (mType != aOther.mType) {
+    return false;
+  }
+
+  switch (mType) {
+  case TOKEN_INTEGER:
+    return AsInteger() == aOther.AsInteger();
+  case TOKEN_WORD:
+    return AsString() == aOther.AsString();
+  case TOKEN_CHAR:
+    return AsChar() == aOther.AsChar();
+  default:
+    return true;
+  }
+}
+
+char
+Tokenizer::Token::AsChar() const
+{
+  MOZ_ASSERT(mType == TOKEN_CHAR || mType == TOKEN_WS);
+  return mChar;
+}
+
+nsCString
+Tokenizer::Token::AsString() const
+{
+  MOZ_ASSERT(mType == TOKEN_WORD);
+  return mWord;
+}
+
+int64_t
+Tokenizer::Token::AsInteger() const
+{
+  MOZ_ASSERT(mType == TOKEN_INTEGER);
+  return mInteger;
+}
+
+} // mozilla
new file mode 100644
--- /dev/null
+++ b/xpcom/ds/Tokenizer.h
@@ -0,0 +1,225 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Tokenizer_h__
+#define Tokenizer_h__
+
+#include "nsString.h"
+
+namespace mozilla {
+
+/**
+ * This is a simple implementation of a lexical analyzer or maybe better
+ * called a tokenizer.  It doesn't allow any user dictionaries or
+ * user define token types.
+ *
+ * It is limited only to ASCII input for now. UTF-8 or any other input
+ * encoding must yet be implemented.
+ */
+class Tokenizer {
+public:
+  /**
+   * The analyzer works with elements in the input cut to a sequence of token
+   * where each token has an elementary type
+   */
+  enum TokenType {
+    TOKEN_UNKNOWN,
+    TOKEN_ERROR,
+    TOKEN_INTEGER,
+    TOKEN_WORD,
+    TOKEN_CHAR,
+    TOKEN_WS,
+    TOKEN_EOL,
+    TOKEN_EOF
+  };
+
+  /**
+   * Class holding the type and the value of a token.  It can be manually created
+   * to allow checks against it via methods of Tokenizer or are results of some of
+   * the Tokenizer's methods.
+   */
+  class Token {
+    TokenType mType;
+    nsCString mWord;
+    char mChar;
+    int64_t mInteger;
+
+  public:
+    Token() : mType(TOKEN_UNKNOWN), mChar(0), mInteger(0) {}
+
+    // Static constructors of tokens by type and value
+    static Token Word(const nsACString& aWord);
+    static Token Char(const char aChar);
+    static Token Number(const int64_t aNumber);
+    static Token Whitespace();
+    static Token NewLine();
+    static Token EndOfFile();
+    static Token Error();
+
+    // Compares the two tokens, type must be identical and value
+    // of one of the tokens must be 'any' or equal.
+    bool Equals(const Token& aOther) const;
+
+    TokenType Type() const { return mType; }
+    char AsChar() const;
+    nsCString AsString() const;
+    int64_t AsInteger() const;
+  };
+
+public:
+  explicit Tokenizer(const nsACString& aSource);
+
+  /**
+   * Some methods are collecting the input as it is being parsed to obtain
+   * a substring between particular syntax bounderies defined by any recursive
+   * descent parser or simple parser the Tokenizer is used to read the input for.
+   */
+  enum ClaimInclusion {
+    /**
+     * Include resulting (or passed) token of the last lexical analyzer operation in the result.
+     */
+    INCLUDE_LAST,
+    /**
+     * Do not include it.
+     */
+    EXCLUDE_LAST
+  };
+
+  /**
+   * When there is still anything to read from the input, tokenize it, store the token type
+   * and value to aToken result and shift the cursor past this just parsed token.  Each call
+   * to Next() reads another token from the input and shifts the cursor.
+   * Returns false if we have passed the end of the input.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool Next(Token& aToken);
+
+  /**
+   * Parse the token on the input read cursor position, check its type is equal to aTokenType
+   * and if so, put it into aResult, shift the cursor and return true.  Otherwise, leave
+   * the input read cursor position intact and return false.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool Check(const TokenType aTokenType, Token& aResult);
+  /**
+   * Same as above method, just compares both token type and token value passed in aToken.
+   * When both the type and the value equals, shift the cursor and return true.  Otherwise
+   * return false.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool Check(const Token& aToken);
+
+  /**
+   * Return false iff the last Check*() call has returned false or when we've read past
+   * the end of the input string.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool HasFailed() const;
+
+  /**
+   * Skips any occurence of whitespaces specified in mWhitespaces member.
+   */
+  void SkipWhites();
+
+  // These are mostly shortcuts for the Check() methods above.
+
+  /**
+   * Check whitespace character is present.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckWhite() { return Check(Token::Whitespace()); }
+  /**
+   * Check there is a single character on the read cursor position.  If so, shift the read
+   * cursor position and return true.  Otherwise false.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckChar(const char aChar) { return Check(Token::Char(aChar)); }
+  /**
+   * This is a customizable version of CheckChar.  aClassifier is a function called with
+   * value of the character on the current input read position.  If this user function
+   * returns true, read cursor is shifted and true returned.  Otherwise false.
+   * The user classifiction function is not called when we are at or past the end and
+   * false is immediately returned.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckChar(bool (*aClassifier)(const char aChar));
+  /**
+   * Shortcut for direct word check.
+   */
+  template <size_t N>
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckWord(const char (&aWord)[N]) { return Check(Token::Word(nsLiteralCString(aWord))); }
+  /**
+   * Checks \r, \n or \r\n.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckEOL() { return Check(Token::NewLine()); }
+  /**
+   * Checks we are at the end of the input string reading.  If so, shift past the end
+   * and returns true.  Otherwise does nothing and returns false.
+   */
+  MOZ_WARN_UNUSED_RESULT
+  bool CheckEOF() { return Check(Token::EndOfFile()); }
+
+  /**
+   * Returns the read cursor position back as it was before the last call of any parsing
+   * method of Tokenizer (Next, Check*, Skip*) so that the last operation can be repeated.
+   * Rollback cannot be used multiple times, it only reverts the last successfull parse
+   * operation.  It also cannot be used before any parsing operation has been called
+   * on the Tokenizer.
+   */
+  void Rollback();
+
+  /**
+   * Start the process of recording.  Based on aInclude value the begining of the recorded
+   * sub-string is at the current position (EXCLUDE_LAST) or at the position before the last
+   * parsed token (INCLUDE_LAST).
+   */
+  void Record(ClaimInclusion aInclude = EXCLUDE_LAST);
+  /**
+   * Claim result of the record started with Record() call before.  Depending on aInclude
+   * the ending of the sub-string result includes or excludes the last parsed or checked
+   * token.
+   */
+  void Claim(nsACString& aResult, ClaimInclusion aInclude = EXCLUDE_LAST);
+
+protected:
+  // true if we have already read the EOF token.
+  bool HasInput() const;
+  // Main parsing function, it doesn't shift the read cursor, just returns the next
+  // token position.
+  nsACString::const_char_iterator Parse(Token& aToken) const;
+  // Is read cursor at the end?
+  bool IsEnd(const nsACString::const_char_iterator& caret) const;
+  // Is read cursor on a character that is a word start?
+  bool IsWordFirst(const char aInput) const;
+  // Is read cursor on a character that is an in-word letter?
+  bool IsWord(const char aInput) const;
+  // Is read cursor on a character that is a valid number?
+  // TODO - support multiple radix
+  bool IsNumber(const char aInput) const;
+
+private:
+  Tokenizer() = delete;
+
+  // true iff we have already read the EOF token
+  bool mPastEof;
+  // true iff the last Check*() call has returned false, reverts to true on Rollback() call
+  bool mHasFailed;
+
+  // Customizable list of whitespaces
+  char const* mWhitespaces;
+
+  // All these point to the original buffer passed to the Tokenizer
+  nsACString::const_char_iterator mRecord; // Position where the recorded sub-string for Claim() is
+  nsACString::const_char_iterator mRollback; // Position of the previous token start
+  nsACString::const_char_iterator mCursor; // Position of the current (actually next to read) token start
+  nsACString::const_char_iterator mEnd; // End of the input position
+};
+
+} // mozilla
+
+#endif // Tokenizer_h__
--- a/xpcom/ds/moz.build
+++ b/xpcom/ds/moz.build
@@ -57,16 +57,17 @@ EXPORTS += [
     'nsSupportsArray.h',
     'nsSupportsPrimitives.h',
     'nsVariant.h',
     'nsWhitespaceTokenizer.h',
 ]
 
 EXPORTS.mozilla += [
     'StickyTimeDuration.h',
+    'Tokenizer.h',
 ]
 
 UNIFIED_SOURCES += [
     'nsArray.cpp',
     'nsAtomService.cpp',
     'nsAtomTable.cpp',
     'nsCRT.cpp',
     'nsHashPropertyBag.cpp',
@@ -74,16 +75,17 @@ UNIFIED_SOURCES += [
     'nsObserverList.cpp',
     'nsObserverService.cpp',
     'nsProperties.cpp',
     'nsStringEnumerator.cpp',
     'nsSupportsArray.cpp',
     'nsSupportsArrayEnumerator.cpp',
     'nsSupportsPrimitives.cpp',
     'nsVariant.cpp',
+    'Tokenizer.cpp',
 ]
 
 # These two files cannot be built in unified mode because they use the
 # PL_ARENA_CONST_ALIGN_MASK macro with plarena.h.
 SOURCES += [
     'nsPersistentProperties.cpp',
     'nsStaticNameTable.cpp',
 ]
new file mode 100644
--- /dev/null
+++ b/xpcom/tests/gtest/TestTokenizer.cpp
@@ -0,0 +1,292 @@
+#include "mozilla/Tokenizer.h"
+#include "gtest/gtest.h"
+
+using namespace mozilla;
+
+static bool IsOperator(char const c)
+{
+  return c == '+' || c == '*';
+}
+
+static bool HttpHeaderCharacter(char const c)
+{
+  return (c >= 'a' && c <= 'z') ||
+         (c >= 'A' && c <= 'Z') ||
+         (c >= '0' && c <= '9') ||
+         (c == '_') ||
+         (c == '-');
+}
+
+TEST(Tokenizer, HTTPResponse)
+{
+  Tokenizer::Token t;
+
+  // Real life test, HTTP response
+
+  Tokenizer p(NS_LITERAL_CSTRING(
+    "HTTP/1.0 304 Not modified\r\n"
+    "ETag: hallo\r\n"
+    "Content-Length: 16\r\n"
+    "\r\n"
+    "This is the body"));
+
+  EXPECT_TRUE(p.CheckWord("HTTP"));
+  EXPECT_TRUE(p.CheckChar('/'));
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_INTEGER, t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_INTEGER);
+  EXPECT_TRUE(t.AsInteger() == 1);
+  EXPECT_TRUE(p.CheckChar('.'));
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_INTEGER, t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_INTEGER);
+  EXPECT_TRUE(t.AsInteger() == 0);
+  p.SkipWhites();
+
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_INTEGER, t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_INTEGER);
+  EXPECT_TRUE(t.AsInteger() == 304);
+  p.SkipWhites();
+
+  p.Record();
+  while (p.Next(t) && t.Type() != Tokenizer::TOKEN_EOL);
+  EXPECT_FALSE(p.HasFailed());
+  nsAutoCString h;
+  p.Claim(h);
+  EXPECT_TRUE(h == "Not modified");
+
+  p.Record();
+  while (p.CheckChar(HttpHeaderCharacter));
+  p.Claim(h, Tokenizer::INCLUDE_LAST);
+  EXPECT_TRUE(h == "ETag");
+  p.SkipWhites();
+  EXPECT_TRUE(p.CheckChar(':'));
+  p.SkipWhites();
+  p.Record();
+  while (p.Next(t) && t.Type() != Tokenizer::TOKEN_EOL);
+  EXPECT_FALSE(p.HasFailed());
+  p.Claim(h);
+  EXPECT_TRUE(h == "hallo");
+
+  p.Record();
+  while (p.CheckChar(HttpHeaderCharacter));
+  p.Claim(h, Tokenizer::INCLUDE_LAST);
+  EXPECT_TRUE(h == "Content-Length");
+  p.SkipWhites();
+  EXPECT_TRUE(p.CheckChar(':'));
+  p.SkipWhites();
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_INTEGER, t));
+  EXPECT_TRUE(t.AsInteger() == 16);
+  EXPECT_TRUE(p.CheckEOL());
+
+  EXPECT_TRUE(p.CheckEOL());
+
+  p.Record();
+  while (p.Next(t) && t.Type() != Tokenizer::TOKEN_EOF);
+  nsAutoCString b;
+  p.Claim(b);
+  EXPECT_TRUE(b == "This is the body");
+}
+
+TEST(Tokenizer, Main)
+{
+  Tokenizer::Token t;
+
+  // Synthetic code-specific test
+
+  Tokenizer p(NS_LITERAL_CSTRING("test123 ,15  \t*\r\n%xx,-15\r\r"));
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_WORD);
+  EXPECT_TRUE(t.AsString() == "test123");
+
+  Tokenizer::Token u;
+  EXPECT_FALSE(p.Check(u));
+
+  EXPECT_FALSE(p.CheckChar('!'));
+
+  EXPECT_FALSE(p.Check(Tokenizer::Token::Number(123)));
+
+  EXPECT_TRUE(p.CheckWhite());
+
+  EXPECT_TRUE(p.CheckChar(','));
+
+  EXPECT_TRUE(p.Check(Tokenizer::Token::Number(15)));
+
+  p.Rollback();
+  EXPECT_TRUE(p.Check(Tokenizer::Token::Number(15)));
+
+  p.Rollback();
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_INTEGER);
+  EXPECT_TRUE(t.AsInteger() == 15);
+
+  EXPECT_FALSE(p.CheckChar(IsOperator));
+
+  EXPECT_TRUE(p.CheckWhite());
+
+  p.SkipWhites();
+
+  EXPECT_FALSE(p.CheckWhite());
+
+  p.Rollback();
+
+  EXPECT_TRUE(p.CheckWhite());
+  EXPECT_TRUE(p.CheckWhite());
+
+  p.Record(Tokenizer::EXCLUDE_LAST);
+
+  EXPECT_TRUE(p.CheckChar(IsOperator));
+
+  p.Rollback();
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_CHAR);
+  EXPECT_TRUE(t.AsChar() == '*');
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_EOL);
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_CHAR);
+  EXPECT_TRUE(t.AsChar() == '%');
+
+  nsAutoCString claim;
+  p.Claim(claim, Tokenizer::EXCLUDE_LAST);
+  EXPECT_TRUE(claim == "*\r\n");
+  p.Claim(claim, Tokenizer::INCLUDE_LAST);
+  EXPECT_TRUE(claim == "*\r\n%");
+
+  p.Rollback();
+  EXPECT_TRUE(p.CheckChar('%'));
+
+  p.Record(Tokenizer::INCLUDE_LAST);
+
+  EXPECT_FALSE(p.CheckWord("xy"));
+
+  EXPECT_TRUE(p.CheckWord("xx"));
+
+
+  p.Claim(claim, Tokenizer::INCLUDE_LAST);
+  EXPECT_TRUE(claim == "%xx");
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_CHAR);
+  EXPECT_TRUE(t.AsChar() == ',');
+
+  EXPECT_TRUE(p.CheckChar('-'));
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_INTEGER);
+  EXPECT_TRUE(t.AsInteger() == 15);
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_EOL);
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_EOL);
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_EOF);
+
+  EXPECT_FALSE(p.Next(t));
+
+  p.Rollback();
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_EOF);
+
+  EXPECT_FALSE(p.Next(t));
+
+  p.Rollback();
+  EXPECT_TRUE(p.CheckEOF());
+
+  EXPECT_FALSE(p.CheckEOF());
+}
+
+TEST(Tokenizer, SingleWord)
+{
+  // Single word with numbers in it test
+
+  Tokenizer p(NS_LITERAL_CSTRING("test123"));
+
+  EXPECT_TRUE(p.CheckWord("test123"));
+  EXPECT_TRUE(p.CheckEOF());
+}
+
+TEST(Tokenizer, EndingAfterNumber)
+{
+  // An end handling after a number
+
+  Tokenizer p(NS_LITERAL_CSTRING("123"));
+
+  EXPECT_FALSE(p.CheckWord("123"));
+  EXPECT_TRUE(p.Check(Tokenizer::Token::Number(123)));
+  EXPECT_TRUE(p.CheckEOF());
+}
+
+TEST(Tokenizer, BadInteger)
+{
+  Tokenizer::Token t;
+
+  // A bad integer test
+
+  Tokenizer p(NS_LITERAL_CSTRING("189234891274981758617846178651647620587135"));
+
+  EXPECT_TRUE(p.Next(t));
+  EXPECT_TRUE(t.Type() == Tokenizer::TOKEN_ERROR);
+  EXPECT_TRUE(p.CheckEOF());
+}
+
+TEST(Tokenizer, CheckExpectedTokenValue)
+{
+  Tokenizer::Token t;
+
+  // Check expected token value test
+
+  Tokenizer p(NS_LITERAL_CSTRING("blue velvet"));
+
+  EXPECT_FALSE(p.Check(Tokenizer::TOKEN_INTEGER, t));
+
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_WORD, t));
+  EXPECT_TRUE(t.AsString() == "blue");
+
+  EXPECT_FALSE(p.Check(Tokenizer::TOKEN_WORD, t));
+
+  EXPECT_TRUE(p.CheckWhite());
+
+  EXPECT_TRUE(p.Check(Tokenizer::TOKEN_WORD, t));
+  EXPECT_TRUE(t.AsString() == "velvet");
+
+  EXPECT_TRUE(p.CheckEOF());
+
+  EXPECT_FALSE(p.Next(t));
+}
+
+TEST(Tokenizer, HasFailed)
+{
+  Tokenizer::Token t;
+
+  // HasFailed test
+
+  Tokenizer p1(NS_LITERAL_CSTRING("a b"));
+
+  while (p1.Next(t) && t.Type() != Tokenizer::TOKEN_CHAR);
+  EXPECT_TRUE(p1.HasFailed());
+
+
+  Tokenizer p2(NS_LITERAL_CSTRING("a b"));
+
+  EXPECT_FALSE(p2.CheckChar('c'));
+  EXPECT_TRUE(p2.HasFailed());
+  EXPECT_TRUE(p2.CheckChar(HttpHeaderCharacter));
+  EXPECT_FALSE(p2.HasFailed());
+  p2.SkipWhites();
+  EXPECT_FALSE(p2.HasFailed());
+  EXPECT_TRUE(p2.Next(t));
+  EXPECT_FALSE(p2.HasFailed());
+  EXPECT_TRUE(p2.Next(t));
+  EXPECT_FALSE(p2.HasFailed());
+  EXPECT_FALSE(p2.CheckChar('c'));
+  EXPECT_TRUE(p2.HasFailed());
+
+  while (p2.Next(t) && t.Type() != Tokenizer::TOKEN_CHAR);
+  EXPECT_TRUE(p2.HasFailed());
+}
--- a/xpcom/tests/gtest/moz.build
+++ b/xpcom/tests/gtest/moz.build
@@ -17,15 +17,16 @@ UNIFIED_SOURCES += [
     'TestStorageStream.cpp',
     'TestStrings.cpp',
     'TestStringStream.cpp',
     'TestSynchronization.cpp',
     'TestTArray.cpp',
     'TestThreadPool.cpp',
     'TestThreads.cpp',
     'TestTimeStamp.cpp',
+    'TestTokenizer.cpp',
     'TestUTF.cpp',
     'TestXPIDLString.cpp',
 ]
 
 FINAL_LIBRARY = 'xul-gtest'
 
 FAIL_ON_WARNINGS = True