Bug 1514273 - Update in-tree copies of compare-locales and Fluent to 5.1 and 0.10. r=stas, a=RyanVM
authorAxel Hecht <axel@pike.org>
Fri, 14 Dec 2018 19:31:11 +0000
changeset 509134 91223ab6d08ea1bdf128c79f3eb05b5ccd1c69f7
parent 509133 25eb46d5f2df3b5b7df76096386dcbe00f51ef6d
child 509135 4044649d4c675c21b4c88bbd5f94187f870d0419
push id1905
push userffxbld-merge
push dateMon, 21 Jan 2019 12:33:13 +0000
treeherdermozilla-release@c2fca1944d8c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersstas, RyanVM
bugs1514273
milestone65.0
Bug 1514273 - Update in-tree copies of compare-locales and Fluent to 5.1 and 0.10. r=stas, a=RyanVM Differential Revision: https://phabricator.services.mozilla.com/D14574
third_party/python/compare-locales/compare_locales/__init__.py
third_party/python/compare-locales/compare_locales/checks/android.py
third_party/python/compare-locales/compare_locales/checks/fluent.py
third_party/python/compare-locales/compare_locales/merge.py
third_party/python/compare-locales/compare_locales/parser/__init__.py
third_party/python/compare-locales/compare_locales/parser/android.py
third_party/python/compare-locales/compare_locales/parser/base.py
third_party/python/compare-locales/compare_locales/parser/defines.py
third_party/python/compare-locales/compare_locales/parser/fluent.py
third_party/python/compare-locales/compare_locales/parser/ini.py
third_party/python/compare-locales/compare_locales/serializer.py
third_party/python/compare-locales/compare_locales/tests/serializer/test_android.py
third_party/python/compare-locales/compare_locales/tests/test_ftl.py
third_party/python/fluent/fluent/syntax/ast.py
third_party/python/fluent/fluent/syntax/errors.py
third_party/python/fluent/fluent/syntax/parser.py
third_party/python/fluent/fluent/syntax/serializer.py
third_party/python/fluent/fluent/syntax/stream.py
--- a/third_party/python/compare-locales/compare_locales/__init__.py
+++ b/third_party/python/compare-locales/compare_locales/__init__.py
@@ -1,1 +1,1 @@
-version = "5.0.2"
+version = "5.1.0"
--- a/third_party/python/compare-locales/compare_locales/checks/android.py
+++ b/third_party/python/compare-locales/compare_locales/checks/android.py
@@ -8,17 +8,17 @@ from __future__ import unicode_literals
 import re
 from xml.dom import minidom
 
 from .base import Checker
 from ..parser.android import textContent
 
 
 class AndroidChecker(Checker):
-    pattern = re.compile('(.*/)?strings.*\\.xml$')
+    pattern = re.compile('(.*)?strings.*\\.xml$')
 
     def check(self, refEnt, l10nEnt):
         '''Given the reference and localized Entities, performs checks.
 
         This is a generator yielding tuples of
         - "warning" or "error", depending on what should be reported,
         - tuple of line, column info for the error within the string
         - description string to be shown in the report
--- a/third_party/python/compare-locales/compare_locales/checks/fluent.py
+++ b/third_party/python/compare-locales/compare_locales/checks/fluent.py
@@ -24,18 +24,18 @@ class FluentChecker(Checker):
         def collect_message_references(node):
             if isinstance(node, ftl.MessageReference):
                 # The key is the name of the referenced message and it will
                 # be used in set algebra to find missing and obsolete
                 # references. The value is the node itself and its span
                 # will be used to pinpoint the error.
                 refs[node.id.name] = node
             if isinstance(node, ftl.TermReference):
-                # Same for terms
-                refs[node.id.name] = node
+                # Same for terms, store them as -term.id
+                refs['-' + node.id.name] = node
             # BaseNode.traverse expects this function to return the node.
             return node
 
         entry.traverse(collect_message_references)
         return refs
 
     def check_values(self, ref_entry, l10n_entry):
         '''Verify that values match, either both have a value or none.'''
--- a/third_party/python/compare-locales/compare_locales/merge.py
+++ b/third_party/python/compare-locales/compare_locales/merge.py
@@ -16,16 +16,17 @@ to newest instead.
 '''
 
 from collections import OrderedDict, defaultdict
 from codecs import encode
 import six
 
 
 from compare_locales import parser as cl
+from compare_locales.parser.base import StickyEntry
 from compare_locales.compare.utils import AddRemove
 
 
 class MergeNotSupportedError(ValueError):
     pass
 
 
 def merge_channels(name, resources):
@@ -86,32 +87,19 @@ def merge_two(newer, older, keep_newer=T
     The values in the dict are the newer ones by default, too.
     If `keep_newer` is False, the values will be taken from the older
     dict.
     '''
     diff = AddRemove()
     diff.set_left(newer.keys())
     diff.set_right(older.keys())
 
-    def get_entity(key):
-        if keep_newer:
-            default, backup = newer, older
-        else:
-            default, backup = older, newer
-
-        entity = default.get(key, None)
-
-        # Always prefer the newer version.
-        if entity is not None:
-            return entity
-
-        return backup.get(key)
-
     # Create a flat sequence of all entities in order reported by AddRemove.
-    contents = [(key, get_entity(key)) for _, key in diff]
+    get_entity = get_newer_entity if keep_newer else get_older_entity
+    contents = [(key, get_entity(newer, older, key)) for _, key in diff]
 
     def prune(acc, cur):
         _, entity = cur
         if entity is None:
             # Prune Nones which stand for duplicated comments.
             return acc
 
         if len(acc) and isinstance(entity, cl.Whitespace):
@@ -125,10 +113,31 @@ def merge_two(newer, older, keep_newer=T
 
         acc.append(cur)
         return acc
 
     pruned = six.moves.reduce(prune, contents, [])
     return OrderedDict(pruned)
 
 
+def get_newer_entity(newer, older, key):
+    entity = newer.get(key, None)
+
+    # Always prefer the newer version.
+    if entity is not None:
+        return entity
+
+    return older.get(key)
+
+
+def get_older_entity(newer, older, key):
+    entity = older.get(key, None)
+
+    # If we don't have an older version, or it's a StickyEntry,
+    # get a newer version
+    if entity is None or isinstance(entity, StickyEntry):
+        return newer.get(key)
+
+    return entity
+
+
 def serialize_legacy_resource(entities):
     return "".join((entity.all for entity in entities))
--- a/third_party/python/compare-locales/compare_locales/parser/__init__.py
+++ b/third_party/python/compare-locales/compare_locales/parser/__init__.py
@@ -3,17 +3,17 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 import re
 
 from .base import (
     CAN_NONE, CAN_COPY, CAN_SKIP, CAN_MERGE,
-    EntityBase, Entity, Comment, OffsetComment, Junk, Whitespace,
+    Entry, Entity, Comment, OffsetComment, Junk, Whitespace,
     BadEntity, Parser,
 )
 from .android import (
     AndroidParser
 )
 from .defines import (
     DefinesParser, DefinesInstruction
 )
@@ -30,17 +30,17 @@ from .po import (
     PoParser
 )
 from .properties import (
     PropertiesParser, PropertiesEntity
 )
 
 __all__ = [
     "CAN_NONE", "CAN_COPY", "CAN_SKIP", "CAN_MERGE",
-    "Junk", "EntityBase", "Entity", "Whitespace", "Comment", "OffsetComment",
+    "Junk", "Entry", "Entity", "Whitespace", "Comment", "OffsetComment",
     "BadEntity", "Parser",
     "AndroidParser",
     "DefinesParser", "DefinesInstruction",
     "DTDParser", "DTDEntity",
     "FluentParser", "FluentComment", "FluentEntity",
     "FluentMessage", "FluentTerm",
     "IniParser", "IniSection",
     "PoParser",
--- a/third_party/python/compare-locales/compare_locales/parser/android.py
+++ b/third_party/python/compare-locales/compare_locales/parser/android.py
@@ -14,18 +14,18 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 
 import re
 from xml.dom import minidom
 from xml.dom.minidom import Node
 
 from .base import (
     CAN_SKIP,
-    EntityBase, Entity, Comment, Junk, Whitespace,
-    LiteralEntity,
+    Entity, Comment, Junk, Whitespace,
+    StickyEntry, LiteralEntity,
     Parser
 )
 
 
 class AndroidEntity(Entity):
     def __init__(
         self, ctx, pre_comment, white_space, node, all, key, raw_val, val
     ):
@@ -109,20 +109,27 @@ class XMLComment(NodeMixin, Comment):
     def val(self):
         return self._val_literal
 
     @property
     def key(self):
         return None
 
 
-class DocumentWrapper(NodeMixin, EntityBase):
-    def __init__(self, all):
+# DocumentWrapper is sticky in serialization.
+# Always keep the one from the reference document.
+class DocumentWrapper(NodeMixin, StickyEntry):
+    def __init__(self, key, all):
         self._all_literal = all
         self._val_literal = all
+        self._key_literal = key
+
+    @property
+    def key(self):
+        return self._key_literal
 
 
 class XMLJunk(Junk):
     def __init__(self, all):
         super(XMLJunk, self).__init__(None, (0, 0))
         self._all_literal = all
 
     @property
@@ -171,18 +178,26 @@ class AndroidParser(Parser):
         except Exception:
             yield XMLJunk(contents)
             return
         if doc.documentElement.nodeName != 'resources':
             yield XMLJunk(doc.toxml())
             return
         root_children = doc.documentElement.childNodes
         if not only_localizable:
+            attributes = ''.join(
+                ' {}="{}"'.format(attr_name, attr_value)
+                for attr_name, attr_value in
+                doc.documentElement.attributes.items()
+            )
             yield DocumentWrapper(
-                '<?xml version="1.0" encoding="utf-8"?>\n<resources>'
+                '<?xml?><resources>',
+                '<?xml version="1.0" encoding="utf-8"?>\n<resources{}>'.format(
+                    attributes
+                )
             )
         child_num = 0
         while child_num < len(root_children):
             node = root_children[child_num]
             if node.nodeType == Node.COMMENT_NODE:
                 current_comment, child_num = self.handleComment(
                     node, root_children, child_num
                 )
@@ -222,17 +237,17 @@ class AndroidParser(Parser):
             else:
                 if not only_localizable:
                     if current_comment:
                         yield current_comment
                     if white_space:
                         yield white_space
             child_num += 1
         if not only_localizable:
-            yield DocumentWrapper('</resources>\n')
+            yield DocumentWrapper('</resources>', '</resources>\n')
 
     def handleElement(self, element, current_comment, white_space):
         if element.nodeName == 'string' and element.hasAttribute('name'):
             return AndroidEntity(
                 self.ctx,
                 current_comment,
                 white_space,
                 element,
--- a/third_party/python/compare-locales/compare_locales/parser/base.py
+++ b/third_party/python/compare-locales/compare_locales/parser/base.py
@@ -27,17 +27,17 @@ CAN_COPY = 1
 # Remove broken entities from localization
 # Without CAN_MERGE, en-US is not good to use for localization.
 CAN_SKIP = 2
 # Add missing and broken entities from the reference to localization
 # This effectively means that en-US is good to use for localized files.
 CAN_MERGE = 4
 
 
-class EntityBase(object):
+class Entry(object):
     '''
     Abstraction layer for a localizable entity.
     Currently supported are grammars of the form:
 
     1: entity definition
     2: entity key (name)
     3: entity value
 
@@ -119,17 +119,24 @@ class EntityBase(object):
         value = self.re_br.sub('\n', self.val)
         value = self.re_sgml.sub('', value)
         return len(value.split())
 
     def equals(self, other):
         return self.key == other.key and self.val == other.val
 
 
-class Entity(EntityBase):
+class StickyEntry(Entry):
+    """Subclass of Entry to use in for syntax fragments
+    which should always be overwritten in the serializer.
+    """
+    pass
+
+
+class Entity(Entry):
     @property
     def localized(self):
         '''Is this entity localized.
 
         Always true for monolingual files.
         In bilingual files, this is a dynamic property.
         '''
         return True
@@ -179,17 +186,17 @@ class LiteralEntity(Entity):
 
 class PlaceholderEntity(LiteralEntity):
     """Subclass of Entity to be removed in merges.
     """
     def __init__(self, key):
         super(PlaceholderEntity, self).__init__(key, "", "\nplaceholder\n")
 
 
-class Comment(EntityBase):
+class Comment(Entry):
     def __init__(self, ctx, span):
         self.ctx = ctx
         self.span = span
         self.val_span = None
         self._val_cache = None
 
     @property
     def key(self):
@@ -258,17 +265,17 @@ class Junk(object):
     @property
     def val(self):
         return self.all
 
     def __repr__(self):
         return self.key
 
 
-class Whitespace(EntityBase):
+class Whitespace(Entry):
     '''Entity-like object representing an empty file with whitespace,
     if allowed
     '''
     def __init__(self, ctx, span):
         self.ctx = ctx
         self.span = self.key_span = self.val_span = span
 
     def __repr__(self):
--- a/third_party/python/compare-locales/compare_locales/parser/defines.py
+++ b/third_party/python/compare-locales/compare_locales/parser/defines.py
@@ -3,22 +3,22 @@
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 import re
 
 from .base import (
     CAN_COPY,
-    EntityBase, OffsetComment, Junk, Whitespace,
+    Entry, OffsetComment, Junk, Whitespace,
     Parser
 )
 
 
-class DefinesInstruction(EntityBase):
+class DefinesInstruction(Entry):
     '''Entity-like object representing processing instructions in inc files
     '''
     def __init__(self, ctx, span, val_span):
         self.ctx = ctx
         self.span = span
         self.key_span = self.val_span = val_span
 
     def __repr__(self):
--- a/third_party/python/compare-locales/compare_locales/parser/fluent.py
+++ b/third_party/python/compare-locales/compare_locales/parser/fluent.py
@@ -6,23 +6,23 @@ from __future__ import absolute_import
 from __future__ import unicode_literals
 import re
 
 from fluent.syntax import FluentParser as FTLParser
 from fluent.syntax import ast as ftl
 from fluent.syntax.serializer import serialize_comment
 from .base import (
     CAN_SKIP,
-    EntityBase, Entity, Comment, Junk, Whitespace,
+    Entry, Entity, Comment, Junk, Whitespace,
     LiteralEntity,
     Parser
 )
 
 
-class FluentAttribute(EntityBase):
+class FluentAttribute(Entry):
     ignored_fields = ['span']
 
     def __init__(self, entity, attr_node):
         self.ctx = entity.ctx
         self.attr = attr_node
         self.key_span = (attr_node.id.span.start, attr_node.id.span.end)
         self.val_span = (attr_node.value.span.start, attr_node.value.span.end)
 
@@ -39,26 +39,32 @@ class FluentEntity(Entity):
 
     def __init__(self, ctx, entry):
         start = entry.span.start
         end = entry.span.end
 
         self.ctx = ctx
         self.span = (start, end)
 
-        self.key_span = (entry.id.span.start, entry.id.span.end)
+        if isinstance(entry, ftl.Term):
+            # Terms don't have their '-' as part of the id, use the prior
+            # character
+            self.key_span = (entry.id.span.start - 1, entry.id.span.end)
+        else:
+            # Message
+            self.key_span = (entry.id.span.start, entry.id.span.end)
 
         if entry.value is not None:
             self.val_span = (entry.value.span.start, entry.value.span.end)
         else:
             self.val_span = None
 
         self.entry = entry
 
-        # EntityBase instances are expected to have pre_comment. It's used by
+        # Entry instances are expected to have pre_comment. It's used by
         # other formats to associate a Comment with an Entity. FluentEntities
         # don't need it because message comments are part of the entry AST and
         # are not separate Comment instances.
         self.pre_comment = None
 
     @property
     def root_node(self):
         '''AST node at which to start traversal for count_words.
--- a/third_party/python/compare-locales/compare_locales/parser/ini.py
+++ b/third_party/python/compare-locales/compare_locales/parser/ini.py
@@ -2,22 +2,22 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import
 from __future__ import unicode_literals
 import re
 
 from .base import (
-    EntityBase, OffsetComment,
+    Entry, OffsetComment,
     Parser
 )
 
 
-class IniSection(EntityBase):
+class IniSection(Entry):
     '''Entity-like object representing sections in ini files
     '''
     def __init__(self, ctx, span, val_span):
         self.ctx = ctx
         self.span = span
         self.key_span = self.val_span = val_span
 
     def __repr__(self):
--- a/third_party/python/compare-locales/compare_locales/serializer.py
+++ b/third_party/python/compare-locales/compare_locales/serializer.py
@@ -50,17 +50,18 @@ def serialize(filename, reference, old_l
     '''
     try:
         parser = getParser(filename)
     except UserWarning:
         raise SerializationNotSupportedError(
             'Unsupported file format ({}).'.format(filename))
     # create template, whitespace and all
     placeholders = [
-        placeholder(entry) for entry in reference
+        placeholder(entry)
+        for entry in reference
         if not isinstance(entry, Junk)
     ]
     ref_mapping = {
         entry.key: entry
         for entry in reference
         if isinstance(entry, Entity)
     }
     # strip obsolete strings
--- a/third_party/python/compare-locales/compare_locales/tests/serializer/test_android.py
+++ b/third_party/python/compare-locales/compare_locales/tests/serializer/test_android.py
@@ -175,8 +175,44 @@ class TestAndroidDuplicateComment(Helper
 <?xml version="1.0" encoding="utf-8"?>
 <resources>
 
     <!-- Label used in the contextmenu shown when long-pressing on a link -->
     <string name="contextmenu_link_share">translation</string>
   </resources>
 """
         )
+
+
+class TestAndroidTools(Helper, unittest.TestCase):
+    name = 'strings.xml'
+    reference_content = (
+        """\
+<resources xmlns:tools="http://schemas.android.com/tools">
+    <string name="app_tagline">Take your passwords everywhere.</string>
+    <string name="search_your_entries" tools:ignore="ExtraTranslation">"""
+        "search your entries"
+        """</string>
+</resources>
+""")
+
+    def test_namespaced_document(self):
+        self._test(
+            """\
+<?xml version="1.0" encoding="utf-8"?>
+<resources>
+    <string name="app_tagline">Localized tag line</string>
+  </resources>
+""",
+            {
+                "search_your_entries": "Looking for Entries"
+            },
+            (
+                """\
+<?xml version="1.0" encoding="utf-8"?>
+<resources xmlns:tools="http://schemas.android.com/tools">
+    <string name="app_tagline">Localized tag line</string>
+    <string name="search_your_entries" tools:ignore="ExtraTranslation">"""
+                "Looking for Entries"
+                """</string>
+</resources>
+""")
+        )
--- a/third_party/python/compare-locales/compare_locales/tests/test_ftl.py
+++ b/third_party/python/compare-locales/compare_locales/tests/test_ftl.py
@@ -77,68 +77,68 @@ h =
         self.assertEqual(g.count_words(), 3)
         self.assertEqual(h.count_words(), 10)
 
     def test_simple_message(self):
         self.parser.readContents(b'a = A')
 
         [a] = list(self.parser)
         self.assertEqual(a.key, 'a')
-        self.assertEqual(a.val, 'A')
+        self.assertEqual(a.raw_val, 'A')
         self.assertEqual(a.all, 'a = A')
         attributes = list(a.attributes)
         self.assertEqual(len(attributes), 0)
 
     def test_complex_message(self):
         self.parser.readContents(b'abc = A { $arg } B { msg } C')
 
         [abc] = list(self.parser)
         self.assertEqual(abc.key, 'abc')
-        self.assertEqual(abc.val, 'A { $arg } B { msg } C')
+        self.assertEqual(abc.raw_val, 'A { $arg } B { msg } C')
         self.assertEqual(abc.all, 'abc = A { $arg } B { msg } C')
 
     def test_multiline_message(self):
         self.parser.readContents(b'''\
 abc =
     A
     B
     C
 ''')
 
         [abc] = list(self.parser)
         self.assertEqual(abc.key, 'abc')
-        self.assertEqual(abc.val, 'A\n    B\n    C')
+        self.assertEqual(abc.raw_val, '    A\n    B\n    C')
         self.assertEqual(abc.all, 'abc =\n    A\n    B\n    C')
 
     def test_message_with_attribute(self):
         self.parser.readContents(b'''\
 abc = ABC
     .attr = Attr
 ''')
 
         [abc] = list(self.parser)
         self.assertEqual(abc.key, 'abc')
-        self.assertEqual(abc.val, 'ABC')
+        self.assertEqual(abc.raw_val, 'ABC')
         self.assertEqual(abc.all, 'abc = ABC\n    .attr = Attr')
 
     def test_message_with_attribute_and_no_value(self):
         self.parser.readContents(b'''\
 abc
     .attr = Attr
 ''')
 
         [abc] = list(self.parser)
         self.assertEqual(abc.key, 'abc')
-        self.assertEqual(abc.val, None)
+        self.assertEqual(abc.raw_val, None)
         self.assertEqual(abc.all, 'abc\n    .attr = Attr')
         attributes = list(abc.attributes)
         self.assertEqual(len(attributes), 1)
         attr = attributes[0]
         self.assertEqual(attr.key, 'attr')
-        self.assertEqual(attr.val, 'Attr')
+        self.assertEqual(attr.raw_val, 'Attr')
 
     def test_non_localizable(self):
         self.parser.readContents(b'''\
 ### Resource Comment
 
 foo = Foo
 
 ## Group Comment
@@ -159,33 +159,33 @@ baz = Baz
         self.assertEqual(entity.all, '### Resource Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentMessage))
-        self.assertEqual(entity.val, 'Foo')
+        self.assertEqual(entity.raw_val, 'Foo')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.all, '## Group Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentTerm))
-        self.assertEqual(entity.val, 'Bar')
+        self.assertEqual(entity.raw_val, 'Bar')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.all, '##')
@@ -199,17 +199,17 @@ baz = Baz
         self.assertEqual(entity.all, '# Standalone Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentMessage))
-        self.assertEqual(entity.val, 'Baz')
+        self.assertEqual(entity.raw_val, 'Baz')
         self.assertEqual(entity.entry.comment.content, 'Baz Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n')
 
         with self.assertRaises(StopIteration):
             next(entities)
@@ -239,17 +239,17 @@ baz = Baz
         self.assertEqual(entity.all, '// Resource Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
-        self.assertEqual(entity.val, 'Foo')
+        self.assertEqual(entity.raw_val, 'Foo')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(
@@ -258,17 +258,17 @@ baz = Baz
         )
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
-        self.assertEqual(entity.val, 'Bar')
+        self.assertEqual(entity.raw_val, 'Bar')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.all, '[[ Another Section ]]')
@@ -282,17 +282,17 @@ baz = Baz
         self.assertEqual(entity.all, '// Standalone Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
-        self.assertEqual(entity.val, 'Baz')
+        self.assertEqual(entity.raw_val, 'Baz')
         self.assertEqual(entity.entry.comment.content, 'Baz Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
         self.assertEqual(entity.all, '\n')
 
         with self.assertRaises(StopIteration):
             next(entities)
@@ -308,17 +308,17 @@ baz = Baz
 # Standalone Comment
 ''')
         entities = self.parser.walk()
 
         entity = next(entities)
         # ensure that fluent comments are FluentComments and Comments
         self.assertTrue(isinstance(entity,  parser.FluentComment))
 
-        # now test the actual .val values
+        # now test the actual .val values, .raw_val is None
         self.assertTrue(isinstance(entity,   parser.Comment))
         self.assertEqual(entity.val, 'Legacy Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,   parser.Comment))
@@ -357,29 +357,29 @@ msg = value
         entities = self.parser.walk()
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.FluentComment))
         self.assertEqual(entity.val, 'Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
-        self.assertEqual(entity.val, '\n\n')
+        self.assertEqual(entity.raw_val, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity,  parser.Junk))
-        self.assertEqual(entity.val, 'Line of junk')
+        self.assertEqual(entity.raw_val, 'Line of junk')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
-        self.assertEqual(entity.val, '\n\n')
+        self.assertEqual(entity.raw_val, '\n\n')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.FluentEntity))
-        self.assertEqual(entity.val, 'value')
+        self.assertEqual(entity.raw_val, 'value')
         self.assertEqual(entity.entry.comment.content, 'Comment')
 
         entity = next(entities)
         self.assertTrue(isinstance(entity, parser.Whitespace))
-        self.assertEqual(entity.val, '\n')
+        self.assertEqual(entity.raw_val, '\n')
 
         with self.assertRaises(StopIteration):
             next(entities)
--- a/third_party/python/fluent/fluent/syntax/ast.py
+++ b/third_party/python/fluent/fluent/syntax/ast.py
@@ -1,20 +1,20 @@
 from __future__ import unicode_literals
 import sys
 import json
 
 
-def to_json(value):
+def to_json(value, fn=None):
     if isinstance(value, BaseNode):
-        return value.to_json()
+        return value.to_json(fn)
     if isinstance(value, list):
-        return list(map(to_json, value))
+        return list(to_json(item, fn) for item in value)
     if isinstance(value, tuple):
-        return list(map(to_json, value))
+        return list(to_json(item, fn) for item in value)
     else:
         return value
 
 
 def from_json(value):
     if isinstance(value, dict):
         cls = getattr(sys.modules[__name__], value['type'])
         args = {
@@ -114,25 +114,25 @@ class BaseNode(object):
                     if not scalars_equal(elem1, elem2, ignored_fields):
                         return False
 
             elif not scalars_equal(field1, field2, ignored_fields):
                 return False
 
         return True
 
-    def to_json(self):
+    def to_json(self, fn=None):
         obj = {
-            name: to_json(value)
+            name: to_json(value, fn)
             for name, value in vars(self).items()
         }
         obj.update(
             {'type': self.__class__.__name__}
         )
-        return obj
+        return fn(obj) if fn else obj
 
     def __str__(self):
         return json.dumps(self.to_json())
 
 
 class SyntaxNode(BaseNode):
     """Base class for AST nodes which can have Spans."""
 
@@ -202,18 +202,19 @@ class Placeable(PatternElement):
         self.expression = expression
 
 
 class Expression(SyntaxNode):
     """An abstract base class for expressions."""
 
 
 class StringLiteral(Expression):
-    def __init__(self, value, **kwargs):
+    def __init__(self, raw, value, **kwargs):
         super(StringLiteral, self).__init__(**kwargs)
+        self.raw = raw
         self.value = value
 
 
 class NumberLiteral(Expression):
     def __init__(self, value, **kwargs):
         super(NumberLiteral, self).__init__(**kwargs)
         self.value = value
 
@@ -231,16 +232,22 @@ class TermReference(Expression):
 
 
 class VariableReference(Expression):
     def __init__(self, id, **kwargs):
         super(VariableReference, self).__init__(**kwargs)
         self.id = id
 
 
+class FunctionReference(Expression):
+    def __init__(self, id, **kwargs):
+        super(FunctionReference, self).__init__(**kwargs)
+        self.id = id
+
+
 class SelectExpression(Expression):
     def __init__(self, selector, variants, **kwargs):
         super(SelectExpression, self).__init__(**kwargs)
         self.selector = selector
         self.variants = variants
 
 
 class AttributeExpression(Expression):
@@ -319,21 +326,16 @@ class GroupComment(BaseComment):
         super(GroupComment, self).__init__(content, **kwargs)
 
 
 class ResourceComment(BaseComment):
     def __init__(self, content=None, **kwargs):
         super(ResourceComment, self).__init__(content, **kwargs)
 
 
-class Function(Identifier):
-    def __init__(self, name, **kwargs):
-        super(Function, self).__init__(name, **kwargs)
-
-
 class Junk(SyntaxNode):
     def __init__(self, content=None, annotations=None, **kwargs):
         super(Junk, self).__init__(**kwargs)
         self.content = content
         self.annotations = annotations or []
 
     def add_annotation(self, annot):
         self.annotations.append(annot)
--- a/third_party/python/fluent/fluent/syntax/errors.py
+++ b/third_party/python/fluent/fluent/syntax/errors.py
@@ -16,22 +16,22 @@ def get_error_message(code, args):
     if code == 'E0003':
         return 'Expected token: "{}"'.format(args[0])
     if code == 'E0004':
         return 'Expected a character from range: "{}"'.format(args[0])
     if code == 'E0005':
         msg = 'Expected message "{}" to have a value or attributes'
         return msg.format(args[0])
     if code == 'E0006':
-        msg = 'Expected term "{}" to have a value'
+        msg = 'Expected term "-{}" to have a value'
         return msg.format(args[0])
     if code == 'E0007':
         return 'Keyword cannot end with a whitespace'
     if code == 'E0008':
-        return 'The callee has to be a simple, upper-case identifier'
+        return 'The callee has to be an upper-case identifier or a term'
     if code == 'E0009':
         return 'The key has to be a simple identifier'
     if code == 'E0010':
         return 'Expected one of the variants to be marked as default (*)'
     if code == 'E0011':
         return 'Expected at least one variant after "->"'
     if code == 'E0012':
         return 'Expected value'
@@ -39,28 +39,30 @@ def get_error_message(code, args):
         return 'Expected variant key'
     if code == 'E0014':
         return 'Expected literal'
     if code == 'E0015':
         return 'Only one variant can be marked as default (*)'
     if code == 'E0016':
         return 'Message references cannot be used as selectors'
     if code == 'E0017':
-        return 'Variants cannot be used as selectors'
+        return 'Terms cannot be used as selectors'
     if code == 'E0018':
         return 'Attributes of messages cannot be used as selectors'
     if code == 'E0019':
         return 'Attributes of terms cannot be used as placeables'
     if code == 'E0020':
         return 'Unterminated string expression'
     if code == 'E0021':
         return 'Positional arguments must not follow named arguments'
     if code == 'E0022':
         return 'Named arguments must be unique'
-    if code == 'E0023':
-        return 'VariantLists are only allowed inside of other VariantLists.'
     if code == 'E0024':
         return 'Cannot access variants of a message.'
     if code == 'E0025':
-        return 'Unknown escape sequence: {}'.format(args[0])
+        return 'Unknown escape sequence: \\{}.'.format(args[0])
     if code == 'E0026':
         return 'Invalid Unicode escape sequence: {}'.format(args[0])
+    if code == 'E0027':
+        return 'Unbalanced closing brace in TextElement.'
+    if code == 'E0028':
+        return 'Expected an inline expression'
     return code
--- a/third_party/python/fluent/fluent/syntax/parser.py
+++ b/third_party/python/fluent/fluent/syntax/parser.py
@@ -1,24 +1,24 @@
 from __future__ import unicode_literals
 import re
 from . import ast
 from .stream import EOF, EOL, FluentParserStream
 from .errors import ParseError
 
 
 def with_span(fn):
-    def decorated(self, ps, *args):
+    def decorated(self, ps, *args, **kwargs):
         if not self.with_spans:
-            return fn(self, ps, *args)
+            return fn(self, ps, *args, **kwargs)
 
         start = ps.index
-        node = fn(self, ps, *args)
+        node = fn(self, ps, *args, **kwargs)
 
-        # Don't re-add the span if the node already has it.  This may happen
+        # Don't re-add the span if the node already has it. This may happen
         # when one decorated function calls another decorated function.
         if node.span is not None:
             return node
 
         end = ps.index
         node.add_span(start, end)
         return node
 
@@ -40,20 +40,18 @@ class FluentParser(object):
             entry = self.get_entry_or_junk(ps)
             blank_lines = ps.skip_blank_block()
 
             # Regular Comments require special logic. Comments may be attached
             # to Messages or Terms if they are followed immediately by them.
             # However they should parse as standalone when they're followed by
             # Junk. Consequently, we only attach Comments once we know that the
             # Message or the Term parsed successfully.
-            if (
-                isinstance(entry, ast.Comment)
-                and blank_lines == 0 and ps.current_char
-            ):
+            if isinstance(entry, ast.Comment) and len(blank_lines) == 0 \
+                    and ps.current_char:
                 # Stash the comment and decide what to do with it
                 # in the next pass.
                 last_comment = entry
                 continue
 
             if last_comment is not None:
                 if isinstance(entry, (ast.Message, ast.Term)):
                     entry.comment = last_comment
@@ -156,17 +154,17 @@ class FluentParser(object):
         content = ''
 
         while True:
             ch = ps.take_char(lambda x: x != EOL)
             while ch:
                 content += ch
                 ch = ps.take_char(lambda x: x != EOL)
 
-            if ps.is_next_line_zero_four_comment(skip=False):
+            if ps.is_next_line_zero_four_comment():
                 content += ps.current_char
                 ps.next()
                 ps.expect_char('/')
                 ps.expect_char('/')
                 ps.take_char(lambda x: x == ' ')
             else:
                 break
 
@@ -200,17 +198,17 @@ class FluentParser(object):
 
             if ps.current_char != EOL:
                 ps.expect_char(' ')
                 ch = ps.take_char(lambda x: x != EOL)
                 while ch:
                     content += ch
                     ch = ps.take_char(lambda x: x != EOL)
 
-            if ps.is_next_line_comment(skip=False, level=level):
+            if ps.is_next_line_comment(level=level):
                 content += ps.current_char
                 ps.next()
             else:
                 break
 
         if level == 0:
             return ast.Comment(content)
         elif level == 1:
@@ -232,98 +230,88 @@ class FluentParser(object):
 
         # A Section without a comment is like an empty Group Comment.
         # Semantically it ends the previous group and starts a new one.
         return ast.GroupComment('')
 
     @with_span
     def get_message(self, ps):
         id = self.get_identifier(ps)
-
         ps.skip_blank_inline()
-        pattern = None
 
         # XXX Syntax 0.4 compat
         if ps.current_char == '=':
             ps.next()
-
-            if ps.is_value_start(skip=True):
-                pattern = self.get_pattern(ps)
+            value = self.maybe_get_pattern(ps)
+        else:
+            value = None
 
-        if ps.is_next_line_attribute_start(skip=True):
-            attrs = self.get_attributes(ps)
-        else:
-            attrs = None
+        attrs = self.get_attributes(ps)
 
-        if pattern is None and attrs is None:
+        if value is None and len(attrs) == 0:
             raise ParseError('E0005', id.name)
 
-        return ast.Message(id, pattern, attrs)
+        return ast.Message(id, value, attrs)
 
     @with_span
     def get_term(self, ps):
-        id = self.get_term_identifier(ps)
+        ps.expect_char('-')
+        id = self.get_identifier(ps)
 
         ps.skip_blank_inline()
         ps.expect_char('=')
 
-        if ps.is_value_start(skip=True):
-            value = self.get_value(ps)
-        else:
+        # Syntax 0.8 compat: VariantLists are supported but deprecated. They
+        # can only be found as values of Terms. Nested VariantLists are not
+        # allowed.
+        value = self.maybe_get_variant_list(ps) or self.maybe_get_pattern(ps)
+        if value is None:
             raise ParseError('E0006', id.name)
 
-        if ps.is_next_line_attribute_start(skip=True):
-            attrs = self.get_attributes(ps)
-        else:
-            attrs = None
-
+        attrs = self.get_attributes(ps)
         return ast.Term(id, value, attrs)
 
     @with_span
     def get_attribute(self, ps):
         ps.expect_char('.')
 
         key = self.get_identifier(ps)
 
         ps.skip_blank_inline()
         ps.expect_char('=')
 
-        if ps.is_value_start(skip=True):
-            value = self.get_pattern(ps)
-            return ast.Attribute(key, value)
+        value = self.maybe_get_pattern(ps)
+        if value is None:
+            raise ParseError('E0012')
 
-        raise ParseError('E0012')
+        return ast.Attribute(key, value)
+
 
     def get_attributes(self, ps):
         attrs = []
+        ps.peek_blank()
 
-        while True:
+        while ps.is_attribute_start():
+            ps.skip_to_peek()
             attr = self.get_attribute(ps)
             attrs.append(attr)
+            ps.peek_blank();
 
-            if not ps.is_next_line_attribute_start(skip=True):
-                break
         return attrs
 
     @with_span
     def get_identifier(self, ps):
         name = ps.take_id_start()
         ch = ps.take_id_char()
         while ch:
             name += ch
             ch = ps.take_id_char()
 
         return ast.Identifier(name)
 
-    @with_span
-    def get_term_identifier(self, ps):
-        ps.expect_char('-')
-        id = self.get_identifier(ps)
-        return ast.Identifier('-{}'.format(id.name))
-
     def get_variant_key(self, ps):
         ch = ps.current_char
 
         if ch is EOF:
             raise ParseError('E0013')
 
         cc = ord(ch)
         if ((cc >= 48 and cc <= 57) or cc == 45):  # 0-9, -
@@ -344,38 +332,40 @@ class FluentParser(object):
         ps.expect_char('[')
         ps.skip_blank()
 
         key = self.get_variant_key(ps)
 
         ps.skip_blank()
         ps.expect_char(']')
 
-        if ps.is_value_start(skip=True):
-            value = self.get_value(ps)
-            return ast.Variant(key, value, default_index)
+        value = self.maybe_get_pattern(ps)
+        if value is None:
+            raise ParseError('E0012')
 
-        raise ParseError('E0012')
+        return ast.Variant(key, value, default_index)
+
 
     def get_variants(self, ps):
         variants = []
         has_default = False
 
-        while True:
+        ps.skip_blank()
+        while ps.is_variant_start():
             variant = self.get_variant(ps, has_default)
 
             if variant.default:
                 has_default = True
 
             variants.append(variant)
+            ps.expect_line_end()
+            ps.skip_blank()
 
-            if not ps.is_next_line_variant_start(skip=False):
-                break
-
-            ps.skip_blank()
+        if len(variants) == 0:
+            raise ParseError('E0011')
 
         if not has_default:
             raise ParseError('E0010')
 
         return variants
 
     def get_digits(self, ps):
         num = ''
@@ -402,318 +392,416 @@ class FluentParser(object):
 
         if ps.current_char == '.':
             num += '.'
             ps.next()
             num += self.get_digits(ps)
 
         return ast.NumberLiteral(num)
 
-    @with_span
-    def get_value(self, ps):
-        if ps.current_char == '{':
+    def maybe_get_pattern(self, ps):
+        '''Parse an inline or a block Pattern, or None
+
+        maybe_get_pattern distinguishes between patterns which start on the
+        same line as the indentifier (aka inline singleline patterns and inline
+        multiline patterns), and patterns which start on a new line (aka block
+        patterns). The distinction is important for the dedentation logic: the
+        indent of the first line of a block pattern must be taken into account
+        when calculating the maximum common indent.
+        '''
+        ps.peek_blank_inline()
+        if ps.is_value_start():
+            ps.skip_to_peek()
+            return self.get_pattern(ps, is_block=False)
+
+        ps.peek_blank_block()
+        if ps.is_value_continuation():
+            ps.skip_to_peek()
+            return self.get_pattern(ps, is_block=True)
+
+        return None
+
+    def maybe_get_variant_list(self, ps):
+        '''Parse a VariantList, or None
+
+        Deprecated in Syntax 0.8. VariantLists are only allowed as values of
+        Terms. Values of Messages, Attributes and Variants must be Patterns.
+        This method is only used in get_term.
+        '''
+        ps.peek_blank()
+        if ps.current_peek == '{':
+            start = ps.peek_offset
             ps.peek()
             ps.peek_blank_inline()
-            if ps.is_next_line_variant_start(skip=False):
-                return self.get_variant_list(ps)
-            ps.reset_peek()
+            if ps.current_peek == EOL:
+                ps.peek_blank()
+                if ps.is_variant_start():
+                    ps.reset_peek(start)
+                    ps.skip_to_peek()
+                    return self.get_variant_list(ps)
 
-        return self.get_pattern(ps)
+        ps.reset_peek()
+        return None
 
     @with_span
     def get_variant_list(self, ps):
         ps.expect_char('{')
-        ps.skip_blank_inline()
-        ps.expect_line_end()
-        ps.skip_blank()
         variants = self.get_variants(ps)
-        ps.expect_line_end()
-        ps.skip_blank()
         ps.expect_char('}')
         return ast.VariantList(variants)
 
     @with_span
-    def get_pattern(self, ps):
+    def get_pattern(self, ps, is_block):
         elements = []
+        if is_block:
+            # A block pattern is a pattern which starts on a new line. Measure
+            # the indent of this first line for the dedentation logic.
+            blank_start = ps.index
+            first_indent = ps.skip_blank_inline()
+            elements.append(self.Indent(first_indent, blank_start, ps.index))
+            common_indent_length = len(first_indent)
+        else:
+            common_indent_length = float('infinity')
+
 
         while ps.current_char:
-            ch = ps.current_char
+            if ps.current_char == EOL:
+                blank_start = ps.index
+                blank_lines = ps.peek_blank_block()
+                if ps.is_value_continuation():
+                    ps.skip_to_peek()
+                    indent = ps.skip_blank_inline()
+                    common_indent_length = min(common_indent_length, len(indent))
+                    elements.append(self.Indent(blank_lines + indent, blank_start, ps.index))
+                    continue
 
-            # The end condition for get_pattern's while loop is a newline
-            # which is not followed by a valid pattern continuation.
-            if ch == EOL and not ps.is_next_line_value(skip=False):
+                # The end condition for get_pattern's while loop is a newline
+                # which is not followed by a valid pattern continuation.
+                ps.reset_peek()
                 break
 
-            if ch == '{':
+            if ps.current_char == '}':
+                raise ParseError('E0027')
+
+            if ps.current_char == '{':
                 element = self.get_placeable(ps)
             else:
                 element = self.get_text_element(ps)
+
             elements.append(element)
 
-        # Trim trailing whitespace.
-        last_element = elements[-1]
+        dedented = self.dedent(elements, common_indent_length)
+        return ast.Pattern(dedented)
+
+    class Indent(ast.SyntaxNode):
+        def __init__(self, value, start, end):
+            super(FluentParser.Indent, self).__init__()
+            self.value = value
+            self.add_span(start, end)
+
+    def dedent(self, elements, common_indent):
+        '''Dedent a list of elements by removing the maximum common indent from
+        the beginning of text lines. The common indent is calculated in
+        get_pattern.
+        '''
+        trimmed = []
+
+        for element in elements:
+            if isinstance(element, ast.Placeable):
+                trimmed.append(element)
+                continue
+
+            if isinstance(element, self.Indent):
+                # Strip the common indent.
+                element.value = element.value[:len(element.value) - common_indent]
+                if len(element.value) == 0:
+                    continue
+
+            prev = trimmed[-1] if len(trimmed) > 0 else None
+            if isinstance(prev, ast.TextElement):
+                # Join adjacent TextElements by replacing them with their sum.
+                sum = ast.TextElement(prev.value + element.value)
+                if self.with_spans:
+                    sum.add_span(prev.span.start, element.span.end)
+                trimmed[-1] = sum
+                continue
+
+            if isinstance(element, self.Indent):
+                # If the indent hasn't been merged into a preceding
+                # TextElements, convert it into a new TextElement.
+                text_element = ast.TextElement(element.value)
+                if self.with_spans:
+                    text_element.add_span(element.span.start, element.span.end)
+                element = text_element
+
+            trimmed.append(element)
+
+        # Trim trailing whitespace from the Pattern.
+        last_element = trimmed[-1] if len(trimmed) > 0 else None
         if isinstance(last_element, ast.TextElement):
             last_element.value = last_element.value.rstrip(' \t\n\r')
             if last_element.value == "":
-                elements.pop()
+                trimmed.pop()
 
-        return ast.Pattern(elements)
+        return trimmed
 
     @with_span
     def get_text_element(self, ps):
         buf = ''
 
         while ps.current_char:
             ch = ps.current_char
 
-            if ch == '{':
+            if ch == '{' or ch == '}':
                 return ast.TextElement(buf)
 
             if ch == EOL:
-                if not ps.is_next_line_value(skip=False):
-                    return ast.TextElement(buf)
-
-                ps.next()
-                ps.skip_blank_inline()
-
-                buf += EOL
-                continue
-
-            if ch == '\\':
-                ps.next()
-                buf += self.get_escape_sequence(ps)
-                continue
+                return ast.TextElement(buf)
 
             buf += ch
             ps.next()
 
         return ast.TextElement(buf)
 
-    def get_escape_sequence(self, ps, specials=('{', '\\')):
+    def get_escape_sequence(self, ps):
         next = ps.current_char
 
-        if next in specials:
+        if next == '\\' or next == '"':
             ps.next()
-            return '\\{}'.format(next)
+            return '\\{}'.format(next), next
 
         if next == 'u':
-            sequence = ''
-            ps.next()
+            return self.get_unicode_escape_sequence(ps, next, 4)
 
-            for _ in range(4):
-                ch = ps.take_hex_digit()
-                if not ch:
-                    raise ParseError('E0026', sequence + ps.current_char)
-                sequence += ch
-
-            return '\\u{}'.format(sequence)
+        if next == 'U':
+            return self.get_unicode_escape_sequence(ps, next, 6)
 
         raise ParseError('E0025', next)
 
+    def get_unicode_escape_sequence(self, ps, u, digits):
+        ps.expect_char(u)
+        sequence = ''
+        for _ in range(digits):
+            ch = ps.take_hex_digit()
+            if not ch:
+                raise ParseError('E0026', '\\{}{}{}'.format(u, sequence, ps.current_char))
+            sequence += ch
+
+        codepoint = int(sequence, 16)
+        if codepoint <= 0xD7FF or 0xE000 <= codepoint:
+            # It's a Unicode scalar value. The escape sequence is 4 or 6 digits
+            # long. Convert it to a 8-digit-long \UHHHHHHHH sequence and encode
+            # it as bytes, because in Python 3 decode is not available on str.
+            byte_sequence = "\\U{:08x}".format(codepoint).encode('utf-8')
+            unescaped = byte_sequence.decode('unicode-escape')
+        else:
+            # Escape sequences reresenting surrogate code points are
+            # well-formed but invalid in Fluent. Replace them with U+FFFD
+            # REPLACEMENT CHARACTER.
+            unescaped = '\uFFFD'
+
+        return '\\{}{}'.format(u, sequence), unescaped
+
     @with_span
     def get_placeable(self, ps):
         ps.expect_char('{')
+        ps.skip_blank()
         expression = self.get_expression(ps)
         ps.expect_char('}')
         return ast.Placeable(expression)
 
     @with_span
     def get_expression(self, ps):
-        ps.skip_blank()
-
-        selector = self.get_selector_expression(ps)
+        selector = self.get_inline_expression(ps)
 
         ps.skip_blank()
 
         if ps.current_char == '-':
             if ps.peek() != '>':
                 ps.reset_peek()
                 return selector
 
             if isinstance(selector, ast.MessageReference):
                 raise ParseError('E0016')
 
             if isinstance(selector, ast.AttributeExpression) \
-               and isinstance(selector.ref, ast.MessageReference):
+                   and isinstance(selector.ref, ast.MessageReference):
                 raise ParseError('E0018')
 
-            if isinstance(selector, ast.VariantExpression):
+            if isinstance(selector, ast.TermReference) \
+                    or isinstance(selector, ast.VariantExpression):
+                raise ParseError('E0017')
+
+            if isinstance(selector, ast.CallExpression) \
+                   and isinstance(selector.callee, ast.TermReference):
                 raise ParseError('E0017')
 
             ps.next()
             ps.next()
 
             ps.skip_blank_inline()
             ps.expect_line_end()
-            ps.skip_blank()
 
             variants = self.get_variants(ps)
-            ps.skip_blank()
-
-            if len(variants) == 0:
-                raise ParseError('E0011')
+            return ast.SelectExpression(selector, variants)
 
-            # VariantLists are only allowed in other VariantLists.
-            if any(isinstance(v.value, ast.VariantList) for v in variants):
-                raise ParseError('E0023')
-
-            return ast.SelectExpression(selector, variants)
-        elif (
-            isinstance(selector, ast.AttributeExpression)
-            and isinstance(selector.ref, ast.TermReference)
-        ):
+        if isinstance(selector, ast.AttributeExpression) \
+                and isinstance(selector.ref, ast.TermReference):
             raise ParseError('E0019')
 
-        ps.skip_blank()
+        if isinstance(selector, ast.CallExpression) \
+                and isinstance(selector.callee, ast.AttributeExpression):
+            raise ParseError('E0019')
 
         return selector
 
     @with_span
-    def get_selector_expression(self, ps):
+    def get_inline_expression(self, ps):
         if ps.current_char == '{':
             return self.get_placeable(ps)
 
-        literal = self.get_literal(ps)
+        expr = self.get_simple_expression(ps)
 
-        if not isinstance(literal, (ast.MessageReference, ast.TermReference)):
-            return literal
-
-        ch = ps.current_char
+        if isinstance(expr, (ast.NumberLiteral, ast.StringLiteral,
+                ast.VariableReference)):
+            return expr
 
-        if (ch == '.'):
-            ps.next()
-            attr = self.get_identifier(ps)
-            return ast.AttributeExpression(literal, attr)
+        if isinstance(expr, ast.MessageReference):
+            if ps.current_char == '.':
+                ps.next()
+                attr = self.get_identifier(ps)
+                return ast.AttributeExpression(expr, attr)
 
-        if (ch == '['):
-            ps.next()
-
-            if isinstance(literal, ast.MessageReference):
-                raise ParseError('E0024')
+            if ps.current_char == '(':
+                # It's a Function. Ensure it's all upper-case.
+                if not re.match('^[A-Z][A-Z_?-]*$', expr.id.name):
+                    raise ParseError('E0008')
+                func = ast.FunctionReference(expr.id)
+                if self.with_spans:
+                    func.add_span(expr.span.start, expr.span.end)
+                return ast.CallExpression(func, *self.get_call_arguments(ps))
 
-            key = self.get_variant_key(ps)
-            ps.expect_char(']')
-            return ast.VariantExpression(literal, key)
+            return expr
 
-        if (ch == '('):
-            ps.next()
-
-            if not re.match('^[A-Z][A-Z_?-]*$', literal.id.name):
-                raise ParseError('E0008')
+        if isinstance(expr, ast.TermReference):
+            if (ps.current_char == '['):
+                ps.next()
+                key = self.get_variant_key(ps)
+                ps.expect_char(']')
+                return ast.VariantExpression(expr, key)
 
-            positional, named = self.get_call_args(ps)
-            ps.expect_char(')')
+            if (ps.current_char == '.'):
+                ps.next()
+                attr = self.get_identifier(ps)
+                expr = ast.AttributeExpression(expr, attr)
 
-            func = ast.Function(literal.id.name)
-            if (self.with_spans):
-                func.add_span(literal.span.start, literal.span.end)
+            if (ps.current_char == '('):
+                return ast.CallExpression(expr, *self.get_call_arguments(ps))
 
-            return ast.CallExpression(func, positional, named)
+            return expr
 
-        return literal
+        raise ParseError('E0028')
 
     @with_span
-    def get_call_arg(self, ps):
-        exp = self.get_selector_expression(ps)
+    def get_simple_expression(self, ps):
+        if ps.is_number_start():
+            return self.get_number(ps)
+        if ps.current_char == '"':
+            return self.get_string(ps)
+        if ps.current_char == '$':
+            ps.next()
+            id = self.get_identifier(ps)
+            return ast.VariableReference(id)
+        if ps.current_char == '-':
+            ps.next()
+            id = self.get_identifier(ps)
+            return ast.TermReference(id)
+        if ps.is_identifier_start():
+            id = self.get_identifier(ps)
+            return ast.MessageReference(id)
+        raise ParseError('E0028')
+
+    @with_span
+    def get_call_argument(self, ps):
+        exp = self.get_inline_expression(ps)
 
         ps.skip_blank()
 
         if ps.current_char != ':':
             return exp
 
         if not isinstance(exp, ast.MessageReference):
             raise ParseError('E0009')
 
         ps.next()
         ps.skip_blank()
 
-        val = self.get_arg_val(ps)
+        value = self.get_literal(ps)
+        return ast.NamedArgument(exp.id, value)
 
-        return ast.NamedArgument(exp.id, val)
-
-    def get_call_args(self, ps):
+    def get_call_arguments(self, ps):
         positional = []
         named = []
         argument_names = set()
 
+        ps.expect_char('(')
         ps.skip_blank()
 
         while True:
             if ps.current_char == ')':
                 break
 
-            arg = self.get_call_arg(ps)
+            arg = self.get_call_argument(ps)
             if isinstance(arg, ast.NamedArgument):
                 if arg.name.name in argument_names:
                     raise ParseError('E0022')
                 named.append(arg)
                 argument_names.add(arg.name.name)
             elif len(argument_names) > 0:
                 raise ParseError('E0021')
             else:
                 positional.append(arg)
 
             ps.skip_blank()
 
             if ps.current_char == ',':
                 ps.next()
                 ps.skip_blank()
                 continue
-            else:
-                break
 
-        return positional, named
+            break
 
-    def get_arg_val(self, ps):
-        if ps.is_number_start():
-            return self.get_number(ps)
-        elif ps.current_char == '"':
-            return self.get_string(ps)
-        raise ParseError('E0012')
+        ps.expect_char(')')
+        return positional, named
 
     @with_span
     def get_string(self, ps):
-        val = ''
+        raw = ''
+        value = ''
 
         ps.expect_char('"')
 
-        ch = ps.take_char(lambda x: x != '"' and x != EOL)
-        while ch:
+        while True:
+            ch = ps.take_char(lambda x: x != '"' and x != EOL)
+            if not ch:
+                break
             if ch == '\\':
-                val += self.get_escape_sequence(ps, ('{', '\\', '"'))
+                sequence, unescaped = self.get_escape_sequence(ps)
+                raw += sequence
+                value += unescaped
             else:
-                val += ch
-            ch = ps.take_char(lambda x: x != '"' and x != EOL)
+                raw += ch
+                value += ch
 
         if ps.current_char == EOL:
             raise ParseError('E0020')
 
         ps.expect_char('"')
 
-        return ast.StringLiteral(val)
+        return ast.StringLiteral(raw, value)
 
     @with_span
     def get_literal(self, ps):
-        ch = ps.current_char
-
-        if ch is EOF:
-            raise ParseError('E0014')
-
-        if ch == '$':
-            ps.next()
-            id = self.get_identifier(ps)
-            return ast.VariableReference(id)
-
-        elif ps.is_identifier_start():
-            id = self.get_identifier(ps)
-            return ast.MessageReference(id)
-
-        elif ps.is_number_start():
+        if ps.is_number_start():
             return self.get_number(ps)
-
-        elif ch == '-':
-            id = self.get_term_identifier(ps)
-            return ast.TermReference(id)
-
-        elif ch == '"':
+        if ps.current_char == '"':
             return self.get_string(ps)
-
         raise ParseError('E0014')
--- a/third_party/python/fluent/fluent/syntax/serializer.py
+++ b/third_party/python/fluent/fluent/syntax/serializer.py
@@ -38,17 +38,17 @@ class FluentSerializer(object):
                     state |= self.HAS_ENTRIES
 
         return "".join(parts)
 
     def serialize_entry(self, entry, state=0):
         if isinstance(entry, ast.Message):
             return serialize_message(entry)
         if isinstance(entry, ast.Term):
-            return serialize_message(entry)
+            return serialize_term(entry)
         if isinstance(entry, ast.Comment):
             if state & self.HAS_ENTRIES:
                 return "\n{}\n".format(serialize_comment(entry, "#"))
             return "{}\n".format(serialize_comment(entry, "#"))
         if isinstance(entry, ast.GroupComment):
             if state & self.HAS_ENTRIES:
                 return "\n{}\n".format(serialize_comment(entry, "##"))
             return "{}\n".format(serialize_comment(entry, "##"))
@@ -78,34 +78,49 @@ def serialize_junk(junk):
 
 
 def serialize_message(message):
     parts = []
 
     if message.comment:
         parts.append(serialize_comment(message.comment))
 
-    parts.append(serialize_identifier(message.id))
-    parts.append(" =")
+    parts.append("{} =".format(message.id.name))
 
     if message.value:
         parts.append(serialize_value(message.value))
 
     if message.attributes:
         for attribute in message.attributes:
             parts.append(serialize_attribute(attribute))
 
     parts.append("\n")
+    return ''.join(parts)
 
+
+def serialize_term(term):
+    parts = []
+
+    if term.comment:
+        parts.append(serialize_comment(term.comment))
+
+    parts.append("-{} =".format(term.id.name))
+    parts.append(serialize_value(term.value))
+
+    if term.attributes:
+        for attribute in term.attributes:
+            parts.append(serialize_attribute(attribute))
+
+    parts.append("\n")
     return ''.join(parts)
 
 
 def serialize_attribute(attribute):
     return "\n    .{} ={}".format(
-        serialize_identifier(attribute.id),
+        attribute.id.name,
         indent(serialize_value(attribute.value))
     )
 
 
 def serialize_value(value):
     if isinstance(value, ast.Pattern):
         return serialize_pattern(value)
     if isinstance(value, ast.VariantList):
@@ -138,79 +153,61 @@ def serialize_variant(variant):
         "   *" if variant.default else "    ",
         serialize_variant_key(variant.key),
         indent(serialize_value(variant.value))
     )
 
 
 def serialize_element(element):
     if isinstance(element, ast.TextElement):
-        return serialize_text_element(element)
+        return element.value
     if isinstance(element, ast.Placeable):
         return serialize_placeable(element)
     raise Exception('Unknown element type: {}'.format(type(element)))
 
 
-def serialize_text_element(text):
-    return text.value
-
-
 def serialize_placeable(placeable):
     expr = placeable.expression
 
     if isinstance(expr, ast.Placeable):
         return "{{{}}}".format(serialize_placeable(expr))
     if isinstance(expr, ast.SelectExpression):
         # Special-case select expressions to control the withespace around the
         # opening and the closing brace.
         return "{{ {}}}".format(serialize_select_expression(expr))
     if isinstance(expr, ast.Expression):
         return "{{ {} }}".format(serialize_expression(expr))
 
 
 def serialize_expression(expression):
     if isinstance(expression, ast.StringLiteral):
-        return serialize_string_literal(expression)
+        return '"{}"'.format(expression.raw)
     if isinstance(expression, ast.NumberLiteral):
-        return serialize_number_literal(expression)
+        return expression.value
     if isinstance(expression, ast.MessageReference):
-        return serialize_message_reference(expression)
+        return expression.id.name
+    if isinstance(expression, ast.FunctionReference):
+        return expression.id.name
     if isinstance(expression, ast.TermReference):
-        return serialize_message_reference(expression)
+        return '-{}'.format(expression.id.name)
     if isinstance(expression, ast.VariableReference):
-        return serialize_variable_reference(expression)
+        return '${}'.format(expression.id.name)
     if isinstance(expression, ast.AttributeExpression):
         return serialize_attribute_expression(expression)
     if isinstance(expression, ast.VariantExpression):
         return serialize_variant_expression(expression)
     if isinstance(expression, ast.CallExpression):
         return serialize_call_expression(expression)
     if isinstance(expression, ast.SelectExpression):
         return serialize_select_expression(expression)
     if isinstance(expression, ast.Placeable):
         return serialize_placeable(expression)
     raise Exception('Unknown expression type: {}'.format(type(expression)))
 
 
-def serialize_string_literal(expr):
-    return "\"{}\"".format(expr.value)
-
-
-def serialize_number_literal(expr):
-    return expr.value
-
-
-def serialize_message_reference(expr):
-    return serialize_identifier(expr.id)
-
-
-def serialize_variable_reference(expr):
-    return "${}".format(serialize_identifier(expr.id))
-
-
 def serialize_select_expression(expr):
     parts = []
     selector = "{} ->".format(
         serialize_expression(expr.selector))
     parts.append(selector)
 
     for variant in expr.variants:
         parts.append(serialize_variant(variant))
@@ -218,70 +215,42 @@ def serialize_select_expression(expr):
     parts.append("\n")
 
     return "".join(parts)
 
 
 def serialize_attribute_expression(expr):
     return "{}.{}".format(
         serialize_expression(expr.ref),
-        serialize_identifier(expr.name),
+        expr.name.name,
     )
 
 
 def serialize_variant_expression(expr):
     return "{}[{}]".format(
         serialize_expression(expr.ref),
         serialize_variant_key(expr.key),
     )
 
 
 def serialize_call_expression(expr):
-    fun = serialize_function(expr.callee)
+    callee = serialize_expression(expr.callee)
     positional = ", ".join(
         serialize_expression(arg) for arg in expr.positional)
     named = ", ".join(
         serialize_named_argument(arg) for arg in expr.named)
     if len(expr.positional) > 0 and len(expr.named) > 0:
-        return '{}({}, {})'.format(fun, positional, named)
-    return '{}({})'.format(fun, positional or named)
-
-
-def serialize_call_argument(arg):
-    if isinstance(arg, ast.Expression):
-        return serialize_expression(arg)
-    if isinstance(arg, ast.NamedArgument):
-        return serialize_named_argument(arg)
+        return '{}({}, {})'.format(callee, positional, named)
+    return '{}({})'.format(callee, positional or named)
 
 
 def serialize_named_argument(arg):
     return "{}: {}".format(
-        serialize_identifier(arg.name),
-        serialize_argument_value(arg.value)
+        arg.name.name,
+        serialize_expression(arg.value)
     )
 
 
-def serialize_argument_value(argval):
-    if isinstance(argval, ast.StringLiteral):
-        return serialize_string_literal(argval)
-    if isinstance(argval, ast.NumberLiteral):
-        return serialize_number_literal(argval)
-    raise Exception('Unknown argument type: {}'.format(type(argval)))
-
-
-def serialize_identifier(identifier):
-    return identifier.name
-
-
-def serialize_variant_name(symbol):
-    return symbol.name
-
-
 def serialize_variant_key(key):
     if isinstance(key, ast.Identifier):
-        return serialize_identifier(key)
-    if isinstance(key, ast.NumberLiteral):
-        return serialize_number_literal(key)
-    raise Exception('Unknown variant key type: {}'.format(type(key)))
-
-
-def serialize_function(function):
-    return function.name
+        return key.name
+    else:
+        return serialize_expression(key)
--- a/third_party/python/fluent/fluent/syntax/stream.py
+++ b/third_party/python/fluent/fluent/syntax/stream.py
@@ -61,57 +61,59 @@ class ParserStream(object):
 EOL = '\n'
 EOF = None
 SPECIAL_LINE_START_CHARS = ('}', '.', '[', '*')
 
 
 class FluentParserStream(ParserStream):
     last_comment_zero_four_syntax = False
 
-    def skip_blank_inline(self):
-        while self.current_char == ' ':
-            self.next()
-
     def peek_blank_inline(self):
+        start = self.index + self.peek_offset
         while self.current_peek == ' ':
             self.peek()
+        return self.string[start:self.index + self.peek_offset]
 
-    def skip_blank_block(self):
-        line_count = 0
+    def skip_blank_inline(self):
+        blank = self.peek_blank_inline()
+        self.skip_to_peek()
+        return blank
+
+    def peek_blank_block(self):
+        blank = ""
         while True:
+            line_start = self.peek_offset
             self.peek_blank_inline()
 
             if self.current_peek == EOL:
-                self.skip_to_peek()
-                self.next()
-                line_count += 1
-            else:
-                self.reset_peek()
-                return line_count
+                blank += EOL
+                self.peek()
+                continue
 
-    def peek_blank_block(self):
-        while True:
-            line_start = self.peek_offset
+            if self.current_peek is EOF:
+                # Treat the blank line at EOF as a blank block.
+                return blank
 
-            self.peek_blank_inline()
+            # Any other char; reset to column 1 on this line.
+            self.reset_peek(line_start)
+            return blank
 
-            if self.current_peek == EOL:
-                self.peek()
-            else:
-                self.reset_peek(line_start)
-                break
-
-    def skip_blank(self):
-        while self.current_char in (" ", EOL):
-            self.next()
+    def skip_blank_block(self):
+        blank = self.peek_blank_block()
+        self.skip_to_peek()
+        return blank
 
     def peek_blank(self):
         while self.current_peek in (" ", EOL):
             self.peek()
 
+    def skip_blank(self):
+        self.peek_blank()
+        self.skip_to_peek()
+
     def expect_char(self, ch):
         if self.current_char == ch:
             self.next()
             return True
 
         raise ParseError('E0003', ch)
 
     def expect_line_end(self):
@@ -158,49 +160,50 @@ class FluentParserStream(ParserStream):
         return is_digit
 
     def is_char_pattern_continuation(self, ch):
         if ch is EOF:
             return False
 
         return ch not in SPECIAL_LINE_START_CHARS
 
-    def is_value_start(self, skip):
-        if skip is False:
-            raise NotImplementedError()
+    def is_value_start(self):
+        # Inline Patterns may start with any char.
+        return self.current_peek is not EOF and self.current_peek != EOL
 
+    def is_value_continuation(self):
+        column1 = self.peek_offset
         self.peek_blank_inline()
-        ch = self.current_peek
 
-        # Inline Patterns may start with any char.
-        if ch is not EOF and ch != EOL:
-            self.skip_to_peek()
+        if self.current_peek == '{':
+            self.reset_peek(column1)
             return True
 
-        return self.is_next_line_value(skip)
+        if self.peek_offset - column1 == 0:
+            return False
 
-    def is_next_line_zero_four_comment(self, skip):
-        if skip is True:
-            raise NotImplementedError()
+        if self.is_char_pattern_continuation(self.current_peek):
+            self.reset_peek(column1)
+            return True
 
+        return False
+
+    def is_next_line_zero_four_comment(self):
         if self.current_peek != EOL:
             return False
 
         is_comment = (self.peek(), self.peek()) == ('/', '/')
         self.reset_peek()
         return is_comment
 
     # -1 - any
     #  0 - comment
     #  1 - group comment
     #  2 - resource comment
-    def is_next_line_comment(self, skip, level=-1):
-        if skip is True:
-            raise NotImplementedError()
-
+    def is_next_line_comment(self, level=-1):
         if self.current_peek != EOL:
             return False
 
         i = 0
 
         while (i <= level or (level == -1 and i < 3)):
             if self.peek() != '#':
                 if i <= level and level != -1:
@@ -212,73 +215,29 @@ class FluentParserStream(ParserStream):
         # The first char after #, ## or ###.
         if self.peek() in (' ', EOL):
             self.reset_peek()
             return True
 
         self.reset_peek()
         return False
 
-    def is_next_line_variant_start(self, skip):
-        if skip is True:
-            raise NotImplementedError()
-
-        if self.current_peek != EOL:
-            return False
-
-        self.peek_blank()
-
+    def is_variant_start(self):
+        current_peek_offset = self.peek_offset
         if self.current_peek == '*':
             self.peek()
-
         if self.current_peek == '[' and self.peek() != '[':
-            self.reset_peek()
-            return True
-
-        self.reset_peek()
-        return False
-
-    def is_next_line_attribute_start(self, skip):
-        if skip is False:
-            raise NotImplementedError()
-
-        self.peek_blank()
-
-        if self.current_peek == '.':
-            self.skip_to_peek()
+            self.reset_peek(current_peek_offset)
             return True
 
-        self.reset_peek()
+        self.reset_peek(current_peek_offset)
         return False
 
-    def is_next_line_value(self, skip):
-        if self.current_peek != EOL:
-            return False
-
-        self.peek_blank_block()
-
-        ptr = self.peek_offset
-
-        self.peek_blank_inline()
-
-        if self.current_peek != "{":
-            if (self.peek_offset - ptr == 0):
-                self.reset_peek()
-                return False
-
-            if not self.is_char_pattern_continuation(self.current_peek):
-                self.reset_peek()
-                return False
-
-        if skip:
-            self.skip_to_peek()
-        else:
-            self.reset_peek()
-
-        return True
+    def is_attribute_start(self):
+        return self.current_peek == '.'
 
     def skip_to_next_entry_start(self, junk_start):
         last_newline = self.string.rfind(EOL, 0, self.index)
         if junk_start < last_newline:
             # Last seen newline is _after_ the junk start. It's safe to rewind
             # without the risk of resuming at the same broken entry.
             self.index = last_newline