summaryrefslogtreecommitdiffstats
path: root/third_party/python/fluent.syntax
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/python/fluent.syntax')
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/DESCRIPTION.rst24
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/METADATA41
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/RECORD14
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/WHEEL6
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/metadata.json1
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/fluent.syntax/fluent/__init__.py1
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/__init__.py16
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/ast.py349
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/errors.py70
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/parser.py683
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/serializer.py233
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/stream.py282
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/visitor.py65
14 files changed, 1786 insertions, 0 deletions
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/DESCRIPTION.rst b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000..4f6444ed25
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,24 @@
+``fluent.syntax`` |fluent.syntax|
+---------------------------------
+
+Read, write, and transform `Fluent`_ files.
+
+This package includes the parser, serializer, and traversal
+utilities like Visitor and Transformer. You’re looking for this package
+if you work on tooling for Fluent in Python.
+
+.. code-block:: python
+
+ >>> from fluent.syntax import parse, ast, serialize
+ >>> resource = parse("a-key = String to localize")
+ >>> resource.body[0].value.elements[0].value = "Localized string"
+ >>> serialize(resource)
+ 'a-key = Localized string\n'
+
+
+Find the full documentation on https://projectfluent.org/python-fluent/fluent.syntax/.
+
+.. _fluent: https://projectfluent.org/
+.. |fluent.syntax| image:: https://github.com/projectfluent/python-fluent/workflows/fluent.syntax/badge.svg
+
+
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/METADATA b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/METADATA
new file mode 100644
index 0000000000..3a803aec82
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/METADATA
@@ -0,0 +1,41 @@
+Metadata-Version: 2.0
+Name: fluent.syntax
+Version: 0.18.1
+Summary: Localization library for expressive translations.
+Home-page: https://github.com/projectfluent/python-fluent
+Author: Mozilla
+Author-email: l10n-drivers@mozilla.org
+License: APL 2
+Keywords: fluent,localization,l10n
+Platform: UNKNOWN
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Description-Content-Type: text/x-rst
+
+``fluent.syntax`` |fluent.syntax|
+---------------------------------
+
+Read, write, and transform `Fluent`_ files.
+
+This package includes the parser, serializer, and traversal
+utilities like Visitor and Transformer. You’re looking for this package
+if you work on tooling for Fluent in Python.
+
+.. code-block:: python
+
+ >>> from fluent.syntax import parse, ast, serialize
+ >>> resource = parse("a-key = String to localize")
+ >>> resource.body[0].value.elements[0].value = "Localized string"
+ >>> serialize(resource)
+ 'a-key = Localized string\n'
+
+
+Find the full documentation on https://projectfluent.org/python-fluent/fluent.syntax/.
+
+.. _fluent: https://projectfluent.org/
+.. |fluent.syntax| image:: https://github.com/projectfluent/python-fluent/workflows/fluent.syntax/badge.svg
+
+
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/RECORD b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/RECORD
new file mode 100644
index 0000000000..3260cdd71c
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/RECORD
@@ -0,0 +1,14 @@
+fluent/__init__.py,sha256=jv2YF__bseklT3OWEzlqJ5qE24c4aWd5F4r0TTjOrWQ,65
+fluent/syntax/__init__.py,sha256=ZRWtukW3DYmR3VkegVXxzKarqrx4Fv92iJO5W-evVB8,425
+fluent/syntax/ast.py,sha256=Jvd6PpR7u6E711jF1qZPRUulBqTRTtZRnRRADK1BRwI,10030
+fluent/syntax/errors.py,sha256=zena7CVTg8CvF52B4kadEuQUqQ3QSUyNm5lL3ynVxM0,2579
+fluent/syntax/parser.py,sha256=WVDbDiDwb1EJwlv_sgjH815tsvfnjpuyfL9lSlRWIIc,20194
+fluent/syntax/serializer.py,sha256=nERP9e_eMdfgx9B74tr2h05IO1lYFkRKZOeAtujpoz4,7606
+fluent/syntax/stream.py,sha256=Whap0UNpWo5-wOZBP3z34mRPMilWbsEPpxVFtAjcw1k,8015
+fluent/syntax/visitor.py,sha256=DISMs3y_rTaMZlaKylbxJ0rANmDzSbo22eZXcnb5TLA,2149
+fluent.syntax-0.18.1.dist-info/DESCRIPTION.rst,sha256=A33VWXJd9FFc9mg8QjsWPcprRJ7RazA0cYQyabC-A2M,794
+fluent.syntax-0.18.1.dist-info/METADATA,sha256=QptbKFmazXHoKtm4JWVb93AIh96sq5M2pF3D5qesZjU,1381
+fluent.syntax-0.18.1.dist-info/RECORD,,
+fluent.syntax-0.18.1.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
+fluent.syntax-0.18.1.dist-info/metadata.json,sha256=5XyFJ0X2dc59aI7E3WrIFZ3aMs5Ir2K2n2IY3Z7wBrI,805
+fluent.syntax-0.18.1.dist-info/top_level.txt,sha256=E6y0EXb_8ntRq2470rEss448Ec6wP_-DI3zVECukrn0,7
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/WHEEL b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/WHEEL
new file mode 100644
index 0000000000..7332a419cd
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/metadata.json b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/metadata.json
new file mode 100644
index 0000000000..7b4494d18d
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.5"], "description_content_type": "text/x-rst", "extensions": {"python.details": {"contacts": [{"email": "l10n-drivers@mozilla.org", "name": "Mozilla", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/projectfluent/python-fluent"}}}, "generator": "bdist_wheel (0.30.0)", "keywords": ["fluent", "localization", "l10n"], "license": "APL 2", "metadata_version": "2.0", "name": "fluent.syntax", "summary": "Localization library for expressive translations.", "test_requires": [{"requires": ["six"]}], "version": "0.18.1"} \ No newline at end of file
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/top_level.txt b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..a3582d405a
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.18.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+fluent
diff --git a/third_party/python/fluent.syntax/fluent/__init__.py b/third_party/python/fluent.syntax/fluent/__init__.py
new file mode 100644
index 0000000000..69e3be50da
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/third_party/python/fluent.syntax/fluent/syntax/__init__.py b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
new file mode 100644
index 0000000000..0975b110b9
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
@@ -0,0 +1,16 @@
+from .parser import FluentParser
+from .serializer import FluentSerializer
+
+
+def parse(source, **kwargs):
+ """Create an ast.Resource from a Fluent Syntax source.
+ """
+ parser = FluentParser(**kwargs)
+ return parser.parse(source)
+
+
+def serialize(resource, **kwargs):
+ """Serialize an ast.Resource to a unicode string.
+ """
+ serializer = FluentSerializer(**kwargs)
+ return serializer.serialize(resource)
diff --git a/third_party/python/fluent.syntax/fluent/syntax/ast.py b/third_party/python/fluent.syntax/fluent/syntax/ast.py
new file mode 100644
index 0000000000..7ad5d611d6
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/ast.py
@@ -0,0 +1,349 @@
+# coding=utf-8
+from __future__ import unicode_literals
+import re
+import sys
+import json
+import six
+
+
+def to_json(value, fn=None):
+ if isinstance(value, BaseNode):
+ return value.to_json(fn)
+ if isinstance(value, list):
+ return list(to_json(item, fn) for item in value)
+ if isinstance(value, tuple):
+ return list(to_json(item, fn) for item in value)
+ else:
+ return value
+
+
+def from_json(value):
+ if isinstance(value, dict):
+ cls = getattr(sys.modules[__name__], value['type'])
+ args = {
+ k: from_json(v)
+ for k, v in value.items()
+ if k != 'type'
+ }
+ return cls(**args)
+ if isinstance(value, list):
+ return list(map(from_json, value))
+ else:
+ return value
+
+
+def scalars_equal(node1, node2, ignored_fields):
+ """Compare two nodes which are not lists."""
+
+ if type(node1) != type(node2):
+ return False
+
+ if isinstance(node1, BaseNode):
+ return node1.equals(node2, ignored_fields)
+
+ return node1 == node2
+
+
+class BaseNode(object):
+ """Base class for all Fluent AST nodes.
+
+ All productions described in the ASDL subclass BaseNode, including Span and
+ Annotation. Implements __str__, to_json and traverse.
+ """
+
+ def clone(self):
+ """Create a deep clone of the current node."""
+ def visit(value):
+ """Clone node and its descendants."""
+ if isinstance(value, BaseNode):
+ return value.clone()
+ if isinstance(value, list):
+ return [visit(child) for child in value]
+ if isinstance(value, tuple):
+ return tuple(visit(child) for child in value)
+ return value
+
+ # Use all attributes found on the node as kwargs to the constructor.
+ return self.__class__(
+ **{name: visit(value) for name, value in vars(self).items()}
+ )
+
+ def equals(self, other, ignored_fields=['span']):
+ """Compare two nodes.
+
+ Nodes are deeply compared on a field by field basis. If possible, False
+ is returned early. When comparing attributes and variants in
+ SelectExpressions, the order doesn't matter. By default, spans are not
+ taken into account.
+ """
+
+ self_keys = set(vars(self).keys())
+ other_keys = set(vars(other).keys())
+
+ if ignored_fields:
+ for key in ignored_fields:
+ self_keys.discard(key)
+ other_keys.discard(key)
+
+ if self_keys != other_keys:
+ return False
+
+ for key in self_keys:
+ field1 = getattr(self, key)
+ field2 = getattr(other, key)
+
+ # List-typed nodes are compared item-by-item. When comparing
+ # attributes and variants, the order of items doesn't matter.
+ if isinstance(field1, list) and isinstance(field2, list):
+ if len(field1) != len(field2):
+ return False
+
+ for elem1, elem2 in zip(field1, field2):
+ if not scalars_equal(elem1, elem2, ignored_fields):
+ return False
+
+ elif not scalars_equal(field1, field2, ignored_fields):
+ return False
+
+ return True
+
+ def to_json(self, fn=None):
+ obj = {
+ name: to_json(value, fn)
+ for name, value in vars(self).items()
+ }
+ obj.update(
+ {'type': self.__class__.__name__}
+ )
+ return fn(obj) if fn else obj
+
+ def __str__(self):
+ return json.dumps(self.to_json())
+
+
+class SyntaxNode(BaseNode):
+ """Base class for AST nodes which can have Spans."""
+
+ def __init__(self, span=None, **kwargs):
+ super(SyntaxNode, self).__init__(**kwargs)
+ self.span = span
+
+ def add_span(self, start, end):
+ self.span = Span(start, end)
+
+
+class Resource(SyntaxNode):
+ def __init__(self, body=None, **kwargs):
+ super(Resource, self).__init__(**kwargs)
+ self.body = body or []
+
+
+class Entry(SyntaxNode):
+ """An abstract base class for useful elements of Resource.body."""
+
+
+class Message(Entry):
+ def __init__(self, id, value=None, attributes=None,
+ comment=None, **kwargs):
+ super(Message, self).__init__(**kwargs)
+ self.id = id
+ self.value = value
+ self.attributes = attributes or []
+ self.comment = comment
+
+
+class Term(Entry):
+ def __init__(self, id, value, attributes=None,
+ comment=None, **kwargs):
+ super(Term, self).__init__(**kwargs)
+ self.id = id
+ self.value = value
+ self.attributes = attributes or []
+ self.comment = comment
+
+
+class Pattern(SyntaxNode):
+ def __init__(self, elements, **kwargs):
+ super(Pattern, self).__init__(**kwargs)
+ self.elements = elements
+
+
+class PatternElement(SyntaxNode):
+ """An abstract base class for elements of Patterns."""
+
+
+class TextElement(PatternElement):
+ def __init__(self, value, **kwargs):
+ super(TextElement, self).__init__(**kwargs)
+ self.value = value
+
+
+class Placeable(PatternElement):
+ def __init__(self, expression, **kwargs):
+ super(Placeable, self).__init__(**kwargs)
+ self.expression = expression
+
+
+class Expression(SyntaxNode):
+ """An abstract base class for expressions."""
+
+
+class Literal(Expression):
+ """An abstract base class for literals."""
+ def __init__(self, value, **kwargs):
+ super(Literal, self).__init__(**kwargs)
+ self.value = value
+
+ def parse(self):
+ return {'value': self.value}
+
+
+class StringLiteral(Literal):
+ def parse(self):
+ def from_escape_sequence(matchobj):
+ c, codepoint4, codepoint6 = matchobj.groups()
+ if c:
+ return c
+ codepoint = int(codepoint4 or codepoint6, 16)
+ if codepoint <= 0xD7FF or 0xE000 <= codepoint:
+ return six.unichr(codepoint)
+ # Escape sequences reresenting surrogate code points are
+ # well-formed but invalid in Fluent. Replace them with U+FFFD
+ # REPLACEMENT CHARACTER.
+ return '�'
+
+ value = re.sub(
+ r'\\(?:(\\|")|u([0-9a-fA-F]{4})|U([0-9a-fA-F]{6}))',
+ from_escape_sequence,
+ self.value
+ )
+ return {'value': value}
+
+
+class NumberLiteral(Literal):
+ def parse(self):
+ value = float(self.value)
+ decimal_position = self.value.find('.')
+ precision = 0
+ if decimal_position >= 0:
+ precision = len(self.value) - decimal_position - 1
+ return {
+ 'value': value,
+ 'precision': precision
+ }
+
+
+class MessageReference(Expression):
+ def __init__(self, id, attribute=None, **kwargs):
+ super(MessageReference, self).__init__(**kwargs)
+ self.id = id
+ self.attribute = attribute
+
+
+class TermReference(Expression):
+ def __init__(self, id, attribute=None, arguments=None, **kwargs):
+ super(TermReference, self).__init__(**kwargs)
+ self.id = id
+ self.attribute = attribute
+ self.arguments = arguments
+
+
+class VariableReference(Expression):
+ def __init__(self, id, **kwargs):
+ super(VariableReference, self).__init__(**kwargs)
+ self.id = id
+
+
+class FunctionReference(Expression):
+ def __init__(self, id, arguments, **kwargs):
+ super(FunctionReference, self).__init__(**kwargs)
+ self.id = id
+ self.arguments = arguments
+
+
+class SelectExpression(Expression):
+ def __init__(self, selector, variants, **kwargs):
+ super(SelectExpression, self).__init__(**kwargs)
+ self.selector = selector
+ self.variants = variants
+
+
+class CallArguments(SyntaxNode):
+ def __init__(self, positional=None, named=None, **kwargs):
+ super(CallArguments, self).__init__(**kwargs)
+ self.positional = [] if positional is None else positional
+ self.named = [] if named is None else named
+
+
+class Attribute(SyntaxNode):
+ def __init__(self, id, value, **kwargs):
+ super(Attribute, self).__init__(**kwargs)
+ self.id = id
+ self.value = value
+
+
+class Variant(SyntaxNode):
+ def __init__(self, key, value, default=False, **kwargs):
+ super(Variant, self).__init__(**kwargs)
+ self.key = key
+ self.value = value
+ self.default = default
+
+
+class NamedArgument(SyntaxNode):
+ def __init__(self, name, value, **kwargs):
+ super(NamedArgument, self).__init__(**kwargs)
+ self.name = name
+ self.value = value
+
+
+class Identifier(SyntaxNode):
+ def __init__(self, name, **kwargs):
+ super(Identifier, self).__init__(**kwargs)
+ self.name = name
+
+
+class BaseComment(Entry):
+ def __init__(self, content=None, **kwargs):
+ super(BaseComment, self).__init__(**kwargs)
+ self.content = content
+
+
+class Comment(BaseComment):
+ def __init__(self, content=None, **kwargs):
+ super(Comment, self).__init__(content, **kwargs)
+
+
+class GroupComment(BaseComment):
+ def __init__(self, content=None, **kwargs):
+ super(GroupComment, self).__init__(content, **kwargs)
+
+
+class ResourceComment(BaseComment):
+ def __init__(self, content=None, **kwargs):
+ super(ResourceComment, self).__init__(content, **kwargs)
+
+
+class Junk(SyntaxNode):
+ def __init__(self, content=None, annotations=None, **kwargs):
+ super(Junk, self).__init__(**kwargs)
+ self.content = content
+ self.annotations = annotations or []
+
+ def add_annotation(self, annot):
+ self.annotations.append(annot)
+
+
+class Span(BaseNode):
+ def __init__(self, start, end, **kwargs):
+ super(Span, self).__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class Annotation(SyntaxNode):
+ def __init__(self, code, arguments=None, message=None, **kwargs):
+ super(Annotation, self).__init__(**kwargs)
+ self.code = code
+ self.arguments = arguments or []
+ self.message = message
diff --git a/third_party/python/fluent.syntax/fluent/syntax/errors.py b/third_party/python/fluent.syntax/fluent/syntax/errors.py
new file mode 100644
index 0000000000..cd137871b8
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/errors.py
@@ -0,0 +1,70 @@
+from __future__ import unicode_literals
+
+
+class ParseError(Exception):
+ def __init__(self, code, *args):
+ self.code = code
+ self.args = args
+ self.message = get_error_message(code, args)
+
+
+def get_error_message(code, args):
+ if code == 'E00001':
+ return 'Generic error'
+ if code == 'E0002':
+ return 'Expected an entry start'
+ if code == 'E0003':
+ return 'Expected token: "{}"'.format(args[0])
+ if code == 'E0004':
+ return 'Expected a character from range: "{}"'.format(args[0])
+ if code == 'E0005':
+ msg = 'Expected message "{}" to have a value or attributes'
+ return msg.format(args[0])
+ if code == 'E0006':
+ msg = 'Expected term "-{}" to have a value'
+ return msg.format(args[0])
+ if code == 'E0007':
+ return 'Keyword cannot end with a whitespace'
+ if code == 'E0008':
+ return 'The callee has to be an upper-case identifier or a term'
+ if code == 'E0009':
+ return 'The argument name has to be a simple identifier'
+ if code == 'E0010':
+ return 'Expected one of the variants to be marked as default (*)'
+ if code == 'E0011':
+ return 'Expected at least one variant after "->"'
+ if code == 'E0012':
+ return 'Expected value'
+ if code == 'E0013':
+ return 'Expected variant key'
+ if code == 'E0014':
+ return 'Expected literal'
+ if code == 'E0015':
+ return 'Only one variant can be marked as default (*)'
+ if code == 'E0016':
+ return 'Message references cannot be used as selectors'
+ if code == 'E0017':
+ return 'Terms cannot be used as selectors'
+ if code == 'E0018':
+ return 'Attributes of messages cannot be used as selectors'
+ if code == 'E0019':
+ return 'Attributes of terms cannot be used as placeables'
+ if code == 'E0020':
+ return 'Unterminated string expression'
+ if code == 'E0021':
+ return 'Positional arguments must not follow named arguments'
+ if code == 'E0022':
+ return 'Named arguments must be unique'
+ if code == 'E0024':
+ return 'Cannot access variants of a message.'
+ if code == 'E0025':
+ return 'Unknown escape sequence: \\{}.'.format(args[0])
+ if code == 'E0026':
+ return 'Invalid Unicode escape sequence: {}.'.format(args[0])
+ if code == 'E0027':
+ return 'Unbalanced closing brace in TextElement.'
+ if code == 'E0028':
+ return 'Expected an inline expression'
+ if code == 'E0029':
+ return 'Expected simple expression as selector'
+ return code
diff --git a/third_party/python/fluent.syntax/fluent/syntax/parser.py b/third_party/python/fluent.syntax/fluent/syntax/parser.py
new file mode 100644
index 0000000000..6731136cef
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/parser.py
@@ -0,0 +1,683 @@
+from __future__ import unicode_literals
+import re
+from . import ast
+from .stream import EOF, EOL, FluentParserStream
+from .errors import ParseError
+
+
+def with_span(fn):
+ def decorated(self, ps, *args, **kwargs):
+ if not self.with_spans:
+ return fn(self, ps, *args, **kwargs)
+
+ start = ps.index
+ node = fn(self, ps, *args, **kwargs)
+
+ # Don't re-add the span if the node already has it. This may happen
+ # when one decorated function calls another decorated function.
+ if node.span is not None:
+ return node
+
+ end = ps.index
+ node.add_span(start, end)
+ return node
+
+ return decorated
+
+
+class FluentParser(object):
+ """This class is used to parse Fluent source content.
+
+ ``with_spans`` enables source information in the form of
+ :class:`.ast.Span` objects for each :class:`.ast.SyntaxNode`.
+ """
+ def __init__(self, with_spans=True):
+ self.with_spans = with_spans
+
+ def parse(self, source):
+ """Create a :class:`.ast.Resource` from a Fluent source.
+ """
+ ps = FluentParserStream(source)
+ ps.skip_blank_block()
+
+ entries = []
+ last_comment = None
+
+ while ps.current_char:
+ entry = self.get_entry_or_junk(ps)
+ blank_lines = ps.skip_blank_block()
+
+ # Regular Comments require special logic. Comments may be attached
+ # to Messages or Terms if they are followed immediately by them.
+ # However they should parse as standalone when they're followed by
+ # Junk. Consequently, we only attach Comments once we know that the
+ # Message or the Term parsed successfully.
+ if isinstance(entry, ast.Comment) and len(blank_lines) == 0 \
+ and ps.current_char:
+ # Stash the comment and decide what to do with it
+ # in the next pass.
+ last_comment = entry
+ continue
+
+ if last_comment is not None:
+ if isinstance(entry, (ast.Message, ast.Term)):
+ entry.comment = last_comment
+ if self.with_spans:
+ entry.span.start = entry.comment.span.start
+ else:
+ entries.append(last_comment)
+ # In either case, the stashed comment has been dealt with;
+ # clear it.
+ last_comment = None
+
+ entries.append(entry)
+
+ res = ast.Resource(entries)
+
+ if self.with_spans:
+ res.add_span(0, ps.index)
+
+ return res
+
+ def parse_entry(self, source):
+ """Parse the first :class:`.ast.Entry` in source.
+
+ Skip all encountered comments and start parsing at the first :class:`.ast.Message`
+ or :class:`.ast.Term` start. Return :class:`.ast.Junk` if the parsing is not successful.
+
+ Preceding comments are ignored unless they contain syntax errors
+ themselves, in which case :class:`.ast.Junk` for the invalid comment is returned.
+ """
+ ps = FluentParserStream(source)
+ ps.skip_blank_block()
+
+ while ps.current_char == '#':
+ skipped = self.get_entry_or_junk(ps)
+ if isinstance(skipped, ast.Junk):
+ # Don't skip Junk comments.
+ return skipped
+ ps.skip_blank_block()
+
+ return self.get_entry_or_junk(ps)
+
+ def get_entry_or_junk(self, ps):
+ entry_start_pos = ps.index
+
+ try:
+ entry = self.get_entry(ps)
+ ps.expect_line_end()
+ return entry
+ except ParseError as err:
+ error_index = ps.index
+ ps.skip_to_next_entry_start(entry_start_pos)
+ next_entry_start = ps.index
+ if next_entry_start < error_index:
+ # The position of the error must be inside of the Junk's span.
+ error_index = next_entry_start
+
+ # Create a Junk instance
+ slice = ps.string[entry_start_pos:next_entry_start]
+ junk = ast.Junk(slice)
+ if self.with_spans:
+ junk.add_span(entry_start_pos, next_entry_start)
+ annot = ast.Annotation(err.code, err.args, err.message)
+ annot.add_span(error_index, error_index)
+ junk.add_annotation(annot)
+ return junk
+
+ def get_entry(self, ps):
+ if ps.current_char == '#':
+ return self.get_comment(ps)
+
+ if ps.current_char == '-':
+ return self.get_term(ps)
+
+ if ps.is_identifier_start():
+ return self.get_message(ps)
+
+ raise ParseError('E0002')
+
+ @with_span
+ def get_comment(self, ps):
+ # 0 - comment
+ # 1 - group comment
+ # 2 - resource comment
+ level = -1
+ content = ''
+
+ while True:
+ i = -1
+ while ps.current_char == '#' \
+ and (i < (2 if level == -1 else level)):
+ ps.next()
+ i += 1
+
+ if level == -1:
+ level = i
+
+ if ps.current_char != EOL:
+ ps.expect_char(' ')
+ ch = ps.take_char(lambda x: x != EOL)
+ while ch:
+ content += ch
+ ch = ps.take_char(lambda x: x != EOL)
+
+ if ps.is_next_line_comment(level=level):
+ content += ps.current_char
+ ps.next()
+ else:
+ break
+
+ if level == 0:
+ return ast.Comment(content)
+ elif level == 1:
+ return ast.GroupComment(content)
+ elif level == 2:
+ return ast.ResourceComment(content)
+
+ @with_span
+ def get_message(self, ps):
+ id = self.get_identifier(ps)
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ attrs = self.get_attributes(ps)
+
+ if value is None and len(attrs) == 0:
+ raise ParseError('E0005', id.name)
+
+ return ast.Message(id, value, attrs)
+
+ @with_span
+ def get_term(self, ps):
+ ps.expect_char('-')
+ id = self.get_identifier(ps)
+
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0006', id.name)
+
+ attrs = self.get_attributes(ps)
+ return ast.Term(id, value, attrs)
+
+ @with_span
+ def get_attribute(self, ps):
+ ps.expect_char('.')
+
+ key = self.get_identifier(ps)
+
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0012')
+
+ return ast.Attribute(key, value)
+
+ def get_attributes(self, ps):
+ attrs = []
+ ps.peek_blank()
+
+ while ps.is_attribute_start():
+ ps.skip_to_peek()
+ attr = self.get_attribute(ps)
+ attrs.append(attr)
+ ps.peek_blank()
+
+ return attrs
+
+ @with_span
+ def get_identifier(self, ps):
+ name = ps.take_id_start()
+ ch = ps.take_id_char()
+ while ch:
+ name += ch
+ ch = ps.take_id_char()
+
+ return ast.Identifier(name)
+
+ def get_variant_key(self, ps):
+ ch = ps.current_char
+
+ if ch is EOF:
+ raise ParseError('E0013')
+
+ cc = ord(ch)
+ if ((cc >= 48 and cc <= 57) or cc == 45): # 0-9, -
+ return self.get_number(ps)
+
+ return self.get_identifier(ps)
+
+ @with_span
+ def get_variant(self, ps, has_default):
+ default_index = False
+
+ if ps.current_char == '*':
+ if has_default:
+ raise ParseError('E0015')
+ ps.next()
+ default_index = True
+
+ ps.expect_char('[')
+ ps.skip_blank()
+
+ key = self.get_variant_key(ps)
+
+ ps.skip_blank()
+ ps.expect_char(']')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0012')
+
+ return ast.Variant(key, value, default_index)
+
+ def get_variants(self, ps):
+ variants = []
+ has_default = False
+
+ ps.skip_blank()
+ while ps.is_variant_start():
+ variant = self.get_variant(ps, has_default)
+
+ if variant.default:
+ has_default = True
+
+ variants.append(variant)
+ ps.expect_line_end()
+ ps.skip_blank()
+
+ if len(variants) == 0:
+ raise ParseError('E0011')
+
+ if not has_default:
+ raise ParseError('E0010')
+
+ return variants
+
+ def get_digits(self, ps):
+ num = ''
+
+ ch = ps.take_digit()
+ while ch:
+ num += ch
+ ch = ps.take_digit()
+
+ if len(num) == 0:
+ raise ParseError('E0004', '0-9')
+
+ return num
+
+ @with_span
+ def get_number(self, ps):
+ num = ''
+
+ if ps.current_char == '-':
+ num += '-'
+ ps.next()
+
+ num += self.get_digits(ps)
+
+ if ps.current_char == '.':
+ num += '.'
+ ps.next()
+ num += self.get_digits(ps)
+
+ return ast.NumberLiteral(num)
+
+ def maybe_get_pattern(self, ps):
+ '''Parse an inline or a block Pattern, or None
+
+ maybe_get_pattern distinguishes between patterns which start on the
+ same line as the indentifier (aka inline singleline patterns and inline
+ multiline patterns), and patterns which start on a new line (aka block
+ patterns). The distinction is important for the dedentation logic: the
+ indent of the first line of a block pattern must be taken into account
+ when calculating the maximum common indent.
+ '''
+ ps.peek_blank_inline()
+ if ps.is_value_start():
+ ps.skip_to_peek()
+ return self.get_pattern(ps, is_block=False)
+
+ ps.peek_blank_block()
+ if ps.is_value_continuation():
+ ps.skip_to_peek()
+ return self.get_pattern(ps, is_block=True)
+
+ return None
+
+ @with_span
+ def get_pattern(self, ps, is_block):
+ elements = []
+ if is_block:
+ # A block pattern is a pattern which starts on a new line. Measure
+ # the indent of this first line for the dedentation logic.
+ blank_start = ps.index
+ first_indent = ps.skip_blank_inline()
+ elements.append(self.Indent(first_indent, blank_start, ps.index))
+ common_indent_length = len(first_indent)
+ else:
+ common_indent_length = float('infinity')
+
+ while ps.current_char:
+ if ps.current_char == EOL:
+ blank_start = ps.index
+ blank_lines = ps.peek_blank_block()
+ if ps.is_value_continuation():
+ ps.skip_to_peek()
+ indent = ps.skip_blank_inline()
+ common_indent_length = min(common_indent_length, len(indent))
+ elements.append(self.Indent(blank_lines + indent, blank_start, ps.index))
+ continue
+
+ # The end condition for get_pattern's while loop is a newline
+ # which is not followed by a valid pattern continuation.
+ ps.reset_peek()
+ break
+
+ if ps.current_char == '}':
+ raise ParseError('E0027')
+
+ if ps.current_char == '{':
+ element = self.get_placeable(ps)
+ else:
+ element = self.get_text_element(ps)
+
+ elements.append(element)
+
+ dedented = self.dedent(elements, common_indent_length)
+ return ast.Pattern(dedented)
+
+ class Indent(ast.SyntaxNode):
+ def __init__(self, value, start, end):
+ super(FluentParser.Indent, self).__init__()
+ self.value = value
+ self.add_span(start, end)
+
+ def dedent(self, elements, common_indent):
+ '''Dedent a list of elements by removing the maximum common indent from
+ the beginning of text lines. The common indent is calculated in
+ get_pattern.
+ '''
+ trimmed = []
+
+ for element in elements:
+ if isinstance(element, ast.Placeable):
+ trimmed.append(element)
+ continue
+
+ if isinstance(element, self.Indent):
+ # Strip the common indent.
+ element.value = element.value[:len(element.value) - common_indent]
+ if len(element.value) == 0:
+ continue
+
+ prev = trimmed[-1] if len(trimmed) > 0 else None
+ if isinstance(prev, ast.TextElement):
+ # Join adjacent TextElements by replacing them with their sum.
+ sum = ast.TextElement(prev.value + element.value)
+ if self.with_spans:
+ sum.add_span(prev.span.start, element.span.end)
+ trimmed[-1] = sum
+ continue
+
+ if isinstance(element, self.Indent):
+ # If the indent hasn't been merged into a preceding
+ # TextElements, convert it into a new TextElement.
+ text_element = ast.TextElement(element.value)
+ if self.with_spans:
+ text_element.add_span(element.span.start, element.span.end)
+ element = text_element
+
+ trimmed.append(element)
+
+ # Trim trailing whitespace from the Pattern.
+ last_element = trimmed[-1] if len(trimmed) > 0 else None
+ if isinstance(last_element, ast.TextElement):
+ last_element.value = last_element.value.rstrip(' \t\n\r')
+ if last_element.value == "":
+ trimmed.pop()
+
+ return trimmed
+
+ @with_span
+ def get_text_element(self, ps):
+ buf = ''
+
+ while ps.current_char:
+ ch = ps.current_char
+
+ if ch == '{' or ch == '}':
+ return ast.TextElement(buf)
+
+ if ch == EOL:
+ return ast.TextElement(buf)
+
+ buf += ch
+ ps.next()
+
+ return ast.TextElement(buf)
+
+ def get_escape_sequence(self, ps):
+ next = ps.current_char
+
+ if next == '\\' or next == '"':
+ ps.next()
+ return '\\{}'.format(next)
+
+ if next == 'u':
+ return self.get_unicode_escape_sequence(ps, next, 4)
+
+ if next == 'U':
+ return self.get_unicode_escape_sequence(ps, next, 6)
+
+ raise ParseError('E0025', next)
+
+ def get_unicode_escape_sequence(self, ps, u, digits):
+ ps.expect_char(u)
+ sequence = ''
+ for _ in range(digits):
+ ch = ps.take_hex_digit()
+ if not ch:
+ raise ParseError('E0026', '\\{}{}{}'.format(u, sequence, ps.current_char))
+ sequence += ch
+
+ return '\\{}{}'.format(u, sequence)
+
+ @with_span
+ def get_placeable(self, ps):
+ ps.expect_char('{')
+ ps.skip_blank()
+ expression = self.get_expression(ps)
+ ps.expect_char('}')
+ return ast.Placeable(expression)
+
+ @with_span
+ def get_expression(self, ps):
+ selector = self.get_inline_expression(ps)
+
+ ps.skip_blank()
+
+ if ps.current_char == '-':
+ if ps.peek() != '>':
+ ps.reset_peek()
+ return selector
+
+ if isinstance(selector, ast.MessageReference):
+ if selector.attribute is None:
+ raise ParseError('E0016')
+ else:
+ raise ParseError('E0018')
+
+ elif (
+ isinstance(selector, ast.TermReference)
+ ):
+ if selector.attribute is None:
+ raise ParseError('E0017')
+ elif not (
+ isinstance(selector, (
+ ast.StringLiteral,
+ ast.NumberLiteral,
+ ast.VariableReference,
+ ast.FunctionReference,
+ ))
+ ):
+ raise ParseError('E0029')
+
+ ps.next()
+ ps.next()
+
+ ps.skip_blank_inline()
+ ps.expect_line_end()
+
+ variants = self.get_variants(ps)
+ return ast.SelectExpression(selector, variants)
+
+ if (
+ isinstance(selector, ast.TermReference)
+ and selector.attribute is not None
+ ):
+ raise ParseError('E0019')
+
+ return selector
+
+ @with_span
+ def get_inline_expression(self, ps):
+ if ps.current_char == '{':
+ return self.get_placeable(ps)
+
+ if ps.is_number_start():
+ return self.get_number(ps)
+
+ if ps.current_char == '"':
+ return self.get_string(ps)
+
+ if ps.current_char == '$':
+ ps.next()
+ id = self.get_identifier(ps)
+ return ast.VariableReference(id)
+
+ if ps.current_char == '-':
+ ps.next()
+ id = self.get_identifier(ps)
+ attribute = None
+ if ps.current_char == '.':
+ ps.next()
+ attribute = self.get_identifier(ps)
+ arguments = None
+ ps.peek_blank()
+ if ps.current_peek == '(':
+ ps.skip_to_peek()
+ arguments = self.get_call_arguments(ps)
+ return ast.TermReference(id, attribute, arguments)
+
+ if ps.is_identifier_start():
+ id = self.get_identifier(ps)
+ ps.peek_blank()
+
+ if ps.current_peek == '(':
+ # It's a Function. Ensure it's all upper-case.
+ if not re.match('^[A-Z][A-Z0-9_-]*$', id.name):
+ raise ParseError('E0008')
+ ps.skip_to_peek()
+ args = self.get_call_arguments(ps)
+ return ast.FunctionReference(id, args)
+
+ attribute = None
+ if ps.current_char == '.':
+ ps.next()
+ attribute = self.get_identifier(ps)
+
+ return ast.MessageReference(id, attribute)
+
+ raise ParseError('E0028')
+
+ @with_span
+ def get_call_argument(self, ps):
+ exp = self.get_inline_expression(ps)
+
+ ps.skip_blank()
+
+ if ps.current_char != ':':
+ return exp
+
+ if isinstance(exp, ast.MessageReference) and exp.attribute is None:
+ ps.next()
+ ps.skip_blank()
+
+ value = self.get_literal(ps)
+ return ast.NamedArgument(exp.id, value)
+
+ raise ParseError('E0009')
+
+ @with_span
+ def get_call_arguments(self, ps):
+ positional = []
+ named = []
+ argument_names = set()
+
+ ps.expect_char('(')
+ ps.skip_blank()
+
+ while True:
+ if ps.current_char == ')':
+ break
+
+ arg = self.get_call_argument(ps)
+ if isinstance(arg, ast.NamedArgument):
+ if arg.name.name in argument_names:
+ raise ParseError('E0022')
+ named.append(arg)
+ argument_names.add(arg.name.name)
+ elif len(argument_names) > 0:
+ raise ParseError('E0021')
+ else:
+ positional.append(arg)
+
+ ps.skip_blank()
+
+ if ps.current_char == ',':
+ ps.next()
+ ps.skip_blank()
+ continue
+
+ break
+
+ ps.expect_char(')')
+ return ast.CallArguments(positional, named)
+
+ @with_span
+ def get_string(self, ps):
+ value = ''
+
+ ps.expect_char('"')
+
+ while True:
+ ch = ps.take_char(lambda x: x != '"' and x != EOL)
+ if not ch:
+ break
+ if ch == '\\':
+ value += self.get_escape_sequence(ps)
+ else:
+ value += ch
+
+ if ps.current_char == EOL:
+ raise ParseError('E0020')
+
+ ps.expect_char('"')
+
+ return ast.StringLiteral(value)
+
+ @with_span
+ def get_literal(self, ps):
+ if ps.is_number_start():
+ return self.get_number(ps)
+ if ps.current_char == '"':
+ return self.get_string(ps)
+ raise ParseError('E0014')
diff --git a/third_party/python/fluent.syntax/fluent/syntax/serializer.py b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
new file mode 100644
index 0000000000..7c1bb08727
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
@@ -0,0 +1,233 @@
+from __future__ import unicode_literals
+from . import ast
+
+
+def indent_except_first_line(content):
+ return " ".join(
+ content.splitlines(True)
+ )
+
+
+def includes_new_line(elem):
+ return isinstance(elem, ast.TextElement) and "\n" in elem.value
+
+
+def is_select_expr(elem):
+ return (
+ isinstance(elem, ast.Placeable) and
+ isinstance(elem.expression, ast.SelectExpression))
+
+
+def should_start_on_new_line(pattern):
+ is_multiline = any(is_select_expr(elem) for elem in pattern.elements) \
+ or any(includes_new_line(elem) for elem in pattern.elements)
+
+ if is_multiline:
+ first_element = pattern.elements[0]
+ if isinstance(first_element, ast.TextElement):
+ first_char = first_element.value[0]
+ if first_char in ("[", ".", "*"):
+ return False
+ return True
+ return False
+
+
+class FluentSerializer(object):
+ """FluentSerializer converts :class:`.ast.SyntaxNode` objects to unicode strings.
+
+ `with_junk` controls if parse errors are written back or not.
+ """
+ HAS_ENTRIES = 1
+
+ def __init__(self, with_junk=False):
+ self.with_junk = with_junk
+
+ def serialize(self, resource):
+ "Serialize a :class:`.ast.Resource` to a string."
+ if not isinstance(resource, ast.Resource):
+ raise Exception('Unknown resource type: {}'.format(type(resource)))
+
+ state = 0
+
+ parts = []
+ for entry in resource.body:
+ if not isinstance(entry, ast.Junk) or self.with_junk:
+ parts.append(self.serialize_entry(entry, state))
+ if not state & self.HAS_ENTRIES:
+ state |= self.HAS_ENTRIES
+
+ return "".join(parts)
+
+ def serialize_entry(self, entry, state=0):
+ "Serialize an :class:`.ast.Entry` to a string."
+ if isinstance(entry, ast.Message):
+ return serialize_message(entry)
+ if isinstance(entry, ast.Term):
+ return serialize_term(entry)
+ if isinstance(entry, ast.Comment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "#"))
+ return "{}\n".format(serialize_comment(entry, "#"))
+ if isinstance(entry, ast.GroupComment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "##"))
+ return "{}\n".format(serialize_comment(entry, "##"))
+ if isinstance(entry, ast.ResourceComment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "###"))
+ return "{}\n".format(serialize_comment(entry, "###"))
+ if isinstance(entry, ast.Junk):
+ return serialize_junk(entry)
+ raise Exception('Unknown entry type: {}'.format(type(entry)))
+
+
+def serialize_comment(comment, prefix="#"):
+ prefixed = "\n".join([
+ prefix if len(line) == 0 else "{} {}".format(prefix, line)
+ for line in comment.content.split("\n")
+ ])
+ # Add the trailing line break.
+ return '{}\n'.format(prefixed)
+
+
+def serialize_junk(junk):
+ return junk.content
+
+
+def serialize_message(message):
+ parts = []
+
+ if message.comment:
+ parts.append(serialize_comment(message.comment))
+
+ parts.append("{} =".format(message.id.name))
+
+ if message.value:
+ parts.append(serialize_pattern(message.value))
+
+ if message.attributes:
+ for attribute in message.attributes:
+ parts.append(serialize_attribute(attribute))
+
+ parts.append("\n")
+ return ''.join(parts)
+
+
+def serialize_term(term):
+ parts = []
+
+ if term.comment:
+ parts.append(serialize_comment(term.comment))
+
+ parts.append("-{} =".format(term.id.name))
+ parts.append(serialize_pattern(term.value))
+
+ if term.attributes:
+ for attribute in term.attributes:
+ parts.append(serialize_attribute(attribute))
+
+ parts.append("\n")
+ return ''.join(parts)
+
+
+def serialize_attribute(attribute):
+ return "\n .{} ={}".format(
+ attribute.id.name,
+ indent_except_first_line(serialize_pattern(attribute.value))
+ )
+
+
+def serialize_pattern(pattern):
+ content = "".join(serialize_element(elem) for elem in pattern.elements)
+ content = indent_except_first_line(content)
+
+ if should_start_on_new_line(pattern):
+ return '\n {}'.format(content)
+
+ return ' {}'.format(content)
+
+
+def serialize_element(element):
+ if isinstance(element, ast.TextElement):
+ return element.value
+ if isinstance(element, ast.Placeable):
+ return serialize_placeable(element)
+ raise Exception('Unknown element type: {}'.format(type(element)))
+
+
+def serialize_placeable(placeable):
+ expr = placeable.expression
+ if isinstance(expr, ast.Placeable):
+ return "{{{}}}".format(serialize_placeable(expr))
+ if isinstance(expr, ast.SelectExpression):
+ # Special-case select expressions to control the withespace around the
+ # opening and the closing brace.
+ return "{{ {}}}".format(serialize_expression(expr))
+ if isinstance(expr, ast.Expression):
+ return "{{ {} }}".format(serialize_expression(expr))
+
+
+def serialize_expression(expression):
+ if isinstance(expression, ast.StringLiteral):
+ return '"{}"'.format(expression.value)
+ if isinstance(expression, ast.NumberLiteral):
+ return expression.value
+ if isinstance(expression, ast.VariableReference):
+ return "${}".format(expression.id.name)
+ if isinstance(expression, ast.TermReference):
+ out = "-{}".format(expression.id.name)
+ if expression.attribute is not None:
+ out += ".{}".format(expression.attribute.name)
+ if expression.arguments is not None:
+ out += serialize_call_arguments(expression.arguments)
+ return out
+ if isinstance(expression, ast.MessageReference):
+ out = expression.id.name
+ if expression.attribute is not None:
+ out += ".{}".format(expression.attribute.name)
+ return out
+ if isinstance(expression, ast.FunctionReference):
+ args = serialize_call_arguments(expression.arguments)
+ return "{}{}".format(expression.id.name, args)
+ if isinstance(expression, ast.SelectExpression):
+ out = "{} ->".format(
+ serialize_expression(expression.selector))
+ for variant in expression.variants:
+ out += serialize_variant(variant)
+ return "{}\n".format(out)
+ if isinstance(expression, ast.Placeable):
+ return serialize_placeable(expression)
+ raise Exception('Unknown expression type: {}'.format(type(expression)))
+
+
+def serialize_variant(variant):
+ return "\n{}[{}]{}".format(
+ " *" if variant.default else " ",
+ serialize_variant_key(variant.key),
+ indent_except_first_line(serialize_pattern(variant.value))
+ )
+
+
+def serialize_call_arguments(expr):
+ positional = ", ".join(
+ serialize_expression(arg) for arg in expr.positional)
+ named = ", ".join(
+ serialize_named_argument(arg) for arg in expr.named)
+ if len(expr.positional) > 0 and len(expr.named) > 0:
+ return '({}, {})'.format(positional, named)
+ return '({})'.format(positional or named)
+
+
+def serialize_named_argument(arg):
+ return "{}: {}".format(
+ arg.name.name,
+ serialize_expression(arg.value)
+ )
+
+
+def serialize_variant_key(key):
+ if isinstance(key, ast.Identifier):
+ return key.name
+ if isinstance(key, ast.NumberLiteral):
+ return key.value
+ raise Exception('Unknown variant key type: {}'.format(type(key)))
diff --git a/third_party/python/fluent.syntax/fluent/syntax/stream.py b/third_party/python/fluent.syntax/fluent/syntax/stream.py
new file mode 100644
index 0000000000..1f3852c8c2
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/stream.py
@@ -0,0 +1,282 @@
+from __future__ import unicode_literals
+from .errors import ParseError
+
+
+class ParserStream(object):
+ def __init__(self, string):
+ self.string = string
+ self.index = 0
+ self.peek_offset = 0
+
+ def get(self, offset):
+ try:
+ return self.string[offset]
+ except IndexError:
+ return None
+
+ def char_at(self, offset):
+ # When the cursor is at CRLF, return LF but don't move the cursor. The
+ # cursor still points to the EOL position, which in this case is the
+ # beginning of the compound CRLF sequence. This ensures slices of
+ # [inclusive, exclusive) continue to work properly.
+ if self.get(offset) == '\r' \
+ and self.get(offset + 1) == '\n':
+ return '\n'
+
+ return self.get(offset)
+
+ @property
+ def current_char(self):
+ return self.char_at(self.index)
+
+ @property
+ def current_peek(self):
+ return self.char_at(self.index + self.peek_offset)
+
+ def next(self):
+ self.peek_offset = 0
+ # Skip over CRLF as if it was a single character.
+ if self.get(self.index) == '\r' \
+ and self.get(self.index + 1) == '\n':
+ self.index += 1
+ self.index += 1
+ return self.get(self.index)
+
+ def peek(self):
+ # Skip over CRLF as if it was a single character.
+ if self.get(self.index + self.peek_offset) == '\r' \
+ and self.get(self.index + self.peek_offset + 1) == '\n':
+ self.peek_offset += 1
+ self.peek_offset += 1
+ return self.get(self.index + self.peek_offset)
+
+ def reset_peek(self, offset=0):
+ self.peek_offset = offset
+
+ def skip_to_peek(self):
+ self.index += self.peek_offset
+ self.peek_offset = 0
+
+
+EOL = '\n'
+EOF = None
+SPECIAL_LINE_START_CHARS = ('}', '.', '[', '*')
+
+
+class FluentParserStream(ParserStream):
+
+ def peek_blank_inline(self):
+ start = self.index + self.peek_offset
+ while self.current_peek == ' ':
+ self.peek()
+ return self.string[start:self.index + self.peek_offset]
+
+ def skip_blank_inline(self):
+ blank = self.peek_blank_inline()
+ self.skip_to_peek()
+ return blank
+
+ def peek_blank_block(self):
+ blank = ""
+ while True:
+ line_start = self.peek_offset
+ self.peek_blank_inline()
+
+ if self.current_peek == EOL:
+ blank += EOL
+ self.peek()
+ continue
+
+ if self.current_peek is EOF:
+ # Treat the blank line at EOF as a blank block.
+ return blank
+
+ # Any other char; reset to column 1 on this line.
+ self.reset_peek(line_start)
+ return blank
+
+ def skip_blank_block(self):
+ blank = self.peek_blank_block()
+ self.skip_to_peek()
+ return blank
+
+ def peek_blank(self):
+ while self.current_peek in (" ", EOL):
+ self.peek()
+
+ def skip_blank(self):
+ self.peek_blank()
+ self.skip_to_peek()
+
+ def expect_char(self, ch):
+ if self.current_char == ch:
+ self.next()
+ return True
+
+ raise ParseError('E0003', ch)
+
+ def expect_line_end(self):
+ if self.current_char is EOF:
+ # EOF is a valid line end in Fluent.
+ return True
+
+ if self.current_char == EOL:
+ self.next()
+ return True
+
+ # Unicode Character 'SYMBOL FOR NEWLINE' (U+2424)
+ raise ParseError('E0003', '\u2424')
+
+ def take_char(self, f):
+ ch = self.current_char
+ if ch is EOF:
+ return EOF
+ if f(ch):
+ self.next()
+ return ch
+ return False
+
+ def is_char_id_start(self, ch):
+ if ch is EOF:
+ return False
+
+ cc = ord(ch)
+ return (cc >= 97 and cc <= 122) or \
+ (cc >= 65 and cc <= 90)
+
+ def is_identifier_start(self):
+ return self.is_char_id_start(self.current_peek)
+
+ def is_number_start(self):
+ ch = self.peek() if self.current_char == '-' else self.current_char
+ if ch is EOF:
+ self.reset_peek()
+ return False
+
+ cc = ord(ch)
+ is_digit = cc >= 48 and cc <= 57
+ self.reset_peek()
+ return is_digit
+
+ def is_char_pattern_continuation(self, ch):
+ if ch is EOF:
+ return False
+
+ return ch not in SPECIAL_LINE_START_CHARS
+
+ def is_value_start(self):
+ # Inline Patterns may start with any char.
+ return self.current_peek is not EOF and self.current_peek != EOL
+
+ def is_value_continuation(self):
+ column1 = self.peek_offset
+ self.peek_blank_inline()
+
+ if self.current_peek == '{':
+ self.reset_peek(column1)
+ return True
+
+ if self.peek_offset - column1 == 0:
+ return False
+
+ if self.is_char_pattern_continuation(self.current_peek):
+ self.reset_peek(column1)
+ return True
+
+ return False
+
+ # -1 - any
+ # 0 - comment
+ # 1 - group comment
+ # 2 - resource comment
+ def is_next_line_comment(self, level=-1):
+ if self.current_peek != EOL:
+ return False
+
+ i = 0
+
+ while (i <= level or (level == -1 and i < 3)):
+ if self.peek() != '#':
+ if i <= level and level != -1:
+ self.reset_peek()
+ return False
+ break
+ i += 1
+
+ # The first char after #, ## or ###.
+ if self.peek() in (' ', EOL):
+ self.reset_peek()
+ return True
+
+ self.reset_peek()
+ return False
+
+ def is_variant_start(self):
+ current_peek_offset = self.peek_offset
+ if self.current_peek == '*':
+ self.peek()
+ if self.current_peek == '[' and self.peek() != '[':
+ self.reset_peek(current_peek_offset)
+ return True
+
+ self.reset_peek(current_peek_offset)
+ return False
+
+ def is_attribute_start(self):
+ return self.current_peek == '.'
+
+ def skip_to_next_entry_start(self, junk_start):
+ last_newline = self.string.rfind(EOL, 0, self.index)
+ if junk_start < last_newline:
+ # Last seen newline is _after_ the junk start. It's safe to rewind
+ # without the risk of resuming at the same broken entry.
+ self.index = last_newline
+
+ while self.current_char:
+ # We're only interested in beginnings of line.
+ if self.current_char != EOL:
+ self.next()
+ continue
+
+ # Break if the first char in this line looks like an entry start.
+ first = self.next()
+ if self.is_char_id_start(first) or first == '-' or first == '#':
+ break
+
+ # Syntax 0.4 compatibility
+ peek = self.peek()
+ self.reset_peek()
+ if (first, peek) == ('/', '/') or (first, peek) == ('[', '['):
+ break
+
+ def take_id_start(self):
+ if self.is_char_id_start(self.current_char):
+ ret = self.current_char
+ self.next()
+ return ret
+
+ raise ParseError('E0004', 'a-zA-Z')
+
+ def take_id_char(self):
+ def closure(ch):
+ cc = ord(ch)
+ return ((cc >= 97 and cc <= 122) or
+ (cc >= 65 and cc <= 90) or
+ (cc >= 48 and cc <= 57) or
+ cc == 95 or cc == 45)
+ return self.take_char(closure)
+
+ def take_digit(self):
+ def closure(ch):
+ cc = ord(ch)
+ return (cc >= 48 and cc <= 57)
+ return self.take_char(closure)
+
+ def take_hex_digit(self):
+ def closure(ch):
+ cc = ord(ch)
+ return (
+ (cc >= 48 and cc <= 57) # 0-9
+ or (cc >= 65 and cc <= 70) # A-F
+ or (cc >= 97 and cc <= 102)) # a-f
+ return self.take_char(closure)
diff --git a/third_party/python/fluent.syntax/fluent/syntax/visitor.py b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
new file mode 100644
index 0000000000..491a79597c
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
@@ -0,0 +1,65 @@
+# coding=utf-8
+from __future__ import unicode_literals, absolute_import
+
+from .ast import BaseNode
+
+
+class Visitor(object):
+ '''Read-only visitor pattern.
+
+ Subclass this to gather information from an AST.
+ To generally define which nodes not to descend in to, overload
+ `generic_visit`.
+ To handle specific node types, add methods like `visit_Pattern`.
+ If you want to still descend into the children of the node, call
+ `generic_visit` of the superclass.
+ '''
+ def visit(self, node):
+ if isinstance(node, list):
+ for child in node:
+ self.visit(child)
+ return
+ if not isinstance(node, BaseNode):
+ return
+ nodename = type(node).__name__
+ visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
+ visit(node)
+
+ def generic_visit(self, node):
+ for propname, propvalue in vars(node).items():
+ self.visit(propvalue)
+
+
+class Transformer(Visitor):
+ '''In-place AST Transformer pattern.
+
+ Subclass this to create an in-place modified variant
+ of the given AST.
+ If you need to keep the original AST around, pass
+ a `node.clone()` to the transformer.
+ '''
+ def visit(self, node):
+ if not isinstance(node, BaseNode):
+ return node
+
+ nodename = type(node).__name__
+ visit = getattr(self, 'visit_{}'.format(nodename), self.generic_visit)
+ return visit(node)
+
+ def generic_visit(self, node):
+ for propname, propvalue in vars(node).items():
+ if isinstance(propvalue, list):
+ new_vals = []
+ for child in propvalue:
+ new_val = self.visit(child)
+ if new_val is not None:
+ new_vals.append(new_val)
+ # in-place manipulation
+ propvalue[:] = new_vals
+ elif isinstance(propvalue, BaseNode):
+ new_val = self.visit(propvalue)
+ if new_val is None:
+ delattr(node, propname)
+ else:
+ setattr(node, propname, new_val)
+ return node