From 61ac4388717c904501018533b43fc5de0cdfa06c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 15 Oct 2021 12:40:03 +0200 Subject: [PATCH] Remove the old templating system (#3445) * Inline resolve_references in dump-swagger Since this is the only bit of the old templating system we still use, let's inline it. OrederedLoader and OrderedDict are now redundant, because all dicts in Python preserve insertion order. * Remove the old templating system We've now replaced the old templates with hugo, so we can get rid of this mess. --- .gitignore | 3 - scripts/dump-swagger.py | 25 +- scripts/requirements.txt | 5 - scripts/templating/README.md | 88 -- scripts/templating/batesian/__init__.py | 38 - scripts/templating/batesian/sections.py | 77 -- scripts/templating/batesian/units.py | 59 - scripts/templating/build.py | 286 ---- .../templating/matrix_templates/__init__.py | 22 - .../templating/matrix_templates/sections.py | 244 ---- .../matrix_templates/templates/apis.tmpl | 4 - .../templates/common-event-fields.tmpl | 12 - .../matrix_templates/templates/events.tmpl | 32 - .../matrix_templates/templates/http-api.tmpl | 94 -- .../matrix_templates/templates/msgtypes.tmpl | 15 - .../templates/schema-definition.tmpl | 21 - .../matrix_templates/templates/tables.tmpl | 106 -- scripts/templating/matrix_templates/units.py | 1153 ----------------- 18 files changed, 21 insertions(+), 2263 deletions(-) delete mode 100644 scripts/templating/README.md delete mode 100644 scripts/templating/batesian/__init__.py delete mode 100644 scripts/templating/batesian/sections.py delete mode 100644 scripts/templating/batesian/units.py delete mode 100644 scripts/templating/build.py delete mode 100644 scripts/templating/matrix_templates/__init__.py delete mode 100644 scripts/templating/matrix_templates/sections.py delete mode 100644 scripts/templating/matrix_templates/templates/apis.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/common-event-fields.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/events.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/http-api.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/msgtypes.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/schema-definition.tmpl delete mode 100644 scripts/templating/matrix_templates/templates/tables.tmpl delete mode 100644 scripts/templating/matrix_templates/units.py diff --git a/.gitignore b/.gitignore index 2a58c6c1..6e5a25c7 100644 --- a/.gitignore +++ b/.gitignore @@ -4,11 +4,8 @@ node_modules /data/msc /env* /resources -/scripts/gen -/scripts/continuserv/continuserv /scripts/swagger /scripts/tmp -/templating/out /hugo-config.toml /public *.pyc diff --git a/scripts/dump-swagger.py b/scripts/dump-swagger.py index 784eb644..8b594eef 100755 --- a/scripts/dump-swagger.py +++ b/scripts/dump-swagger.py @@ -30,12 +30,29 @@ import yaml scripts_dir = os.path.dirname(os.path.abspath(__file__)) -templating_dir = os.path.join(scripts_dir, "templating") api_dir = os.path.join(os.path.dirname(scripts_dir), "data", "api") -sys.path.insert(0, templating_dir) +def resolve_references(path, schema): + if isinstance(schema, dict): + # do $ref first + if '$ref' in schema: + value = schema['$ref'] + path = os.path.join(os.path.dirname(path), value) + with open(path, encoding="utf-8") as f: + ref = yaml.safe_load(f) + result = resolve_references(path, ref) + del schema['$ref'] + else: + result = {} + + for key, value in schema.items(): + result[key] = resolve_references(path, value) + return result + elif isinstance(schema, list): + return [resolve_references(path, value) for value in schema] + else: + return schema -from matrix_templates import units parser = argparse.ArgumentParser( "dump-swagger.py - assemble the Swagger specs into a single JSON file" @@ -103,7 +120,7 @@ for filename in os.listdir(cs_api_dir): print("Reading swagger API: %s" % filepath) with open(filepath, "r") as f: api = yaml.safe_load(f.read()) - api = units.resolve_references(filepath, api) + api = resolve_references(filepath, api) basePath = api['basePath'] for path, methods in api["paths"].items(): diff --git a/scripts/requirements.txt b/scripts/requirements.txt index 66027f91..2af156cc 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -1,10 +1,6 @@ # no doubt older versions would be fine for many of these but these were # current at the time of writing -docutils >= 0.14 -pygments >= 2.2.0 -Jinja2 >= 2.9.6 - # jsonschema 3.0.0 objects to the $refs in our schema file. TODO: figure out # why. jsonschema >= 2.6.0, < 3.0.0 @@ -12,4 +8,3 @@ jsonschema >= 2.6.0, < 3.0.0 PyYAML >= 3.12 requests >= 2.18.4 towncrier == 18.6.0 -six >= 1.11.0 diff --git a/scripts/templating/README.md b/scripts/templating/README.md deleted file mode 100644 index a4d0881c..00000000 --- a/scripts/templating/README.md +++ /dev/null @@ -1,88 +0,0 @@ -This folder contains the templates and a home-brewed templating system called -Batesian for creating the spec. Batesian uses the templating system Jinja2 in -Python. - -Installation ------------- -``` - $ pip install Jinja2 -``` - -Running -------- -To pass arbitrary files (not limited to RST) through the templating system: -``` - $ python build.py -i matrix_templates /random/file/path/here.rst -``` - -The template output can be found at ``out/here.rst``. For a full list of -options, type ``python build.py --help``. - -Developing ----------- - -### Sections and Units -Batesian is built around the concept of Sections and Units. Sections are strings -which will be inserted into the provided document. Every section has a unique -key name which is the template variable that it represents. Units are arbitrary -python data. They are also represented by unique key names. - -### Adding template variables -If you want to add a new template variable e.g. `{{foo_bar}}` which is replaced -with the text `foobar`, you need to add a new Section: - - - Open `matrix_templates/sections.py`. - - Add a new function to `MatrixSections` called `render_foo_bar`. The function - name after `render_` determines the template variable name, and the return - value of this function determines what will be inserted. - - ```python - def render_foo_bar(self): - return "foobar" - ``` - - Run `build.py` with a file which has `{{foo_bar}}` in it, and it will be - replaced with `foobar`. - -### Adding data for template variables -If you want to expose arbitrary data which can be used by `MatrixSections`, you -need to add a new Unit: - - - Open `matrix_templates/units.py`. - - Add a new function to `MatrixUnits` called `load_some_data`. Similar to - sections, the function name after `load_` determines the unit name, and the - return value of this function determines the value of the unit. - - ```python - def load_some_data(self): - return { - "data": "this could be JSON from file from json.loads()", - "processed_data": "this data may have helper keys added", - "types": "it doesn't even need to be a dict. Whatever you want!" - } - ``` - - In `MatrixSections`, you can now call `self.units.get("some_data")` to - retrieve the value you returned. - -### Using Jinja templates -Sections can use Jinja templates to return text. Batesian will attempt to load -all templates from `matrix_templates/templates/`. These can be accessed in -Section code via `template = self.env.get_template("name_of_template.tmpl")`. At -this point, the `template` is just a standard `jinja2.Template`. In fact, -`self.env` is just a `jinja2.Environment`. - -### Debugging -If you don't know why your template isn't behaving as you'd expect, or you just -want to add some informative logging, use `self.log` in either the Sections -class or Units class. You'll need to add `-v` to `build.py` for these lines to -show. - -About ------ - -Batesian was designed to be extremely simple and just use Python as its language -rather than another intermediary language like some other templating systems. -This provides a **lot** of flexibility since you aren't contrained by a -templating language. Batesian provides a thin abstraction over Jinja which is -very useful when you want to do random bits of processing then dump the output -into a Jinja template. Its name is derived from Batesian mimicry due to how the -templating system uses Python as its language, but in a harmless way. diff --git a/scripts/templating/batesian/__init__.py b/scripts/templating/batesian/__init__.py deleted file mode 100644 index e901590f..00000000 --- a/scripts/templating/batesian/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -class AccessKeyStore(object): - """Storage for arbitrary data. Monitors get calls so we know if they - were used or not.""" - - def __init__(self, existing_data=None): - if not existing_data: - existing_data = {} - self.data = existing_data - self.accessed_set = set() - - def keys(self): - return self.data.keys() - - def add(self, key, unit_dict): - self.data[key] = unit_dict - - def get(self, key): - self.accessed_set.add(key) - return self.data[key] - - def get_unaccessed_set(self): - data_list = set(self.data.keys()) - return data_list - self.accessed_set \ No newline at end of file diff --git a/scripts/templating/batesian/sections.py b/scripts/templating/batesian/sections.py deleted file mode 100644 index 18a622f6..00000000 --- a/scripts/templating/batesian/sections.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parent class for writing sections.""" -import inspect -import os - - -class Sections(object): - """A class which creates sections for each method starting with "render_". - The key for the section is the text after "render_" - e.g. "render_room_events" has the section key "room_events" - """ - - def __init__(self, env, units, debug=False): - self.env = env - self.units = units - self.debug = debug - - def log(self, text): - if self.debug: - print("batesian:sections: %s" % text) - - def get_sections(self): - render_list = inspect.getmembers(self, predicate=inspect.ismethod) - section_dict = {} - for (func_name, func) in render_list: - if not func_name.startswith("render_"): - continue - section_key = func_name[len("render_"):] - self.log("Generating section '%s'" % section_key) - section = func() - if isinstance(section, str): - if section_key in section_dict: - raise Exception( - ("%s : Section %s already exists. It must have been " + - "generated dynamically. Check which render_ methods " + - "return a dict.") % - (func_name, section_key) - ) - section_dict[section_key] = section - self.log( - " Generated. Snippet => %s" % section[:60].replace("\n","") - ) - elif isinstance(section, dict): - self.log(" Generated multiple sections:") - for (k, v) in section.items(): - if not isinstance(k, str) or not isinstance(v, str): - raise Exception( - ("Method %s returned multiple sections as a dict but " + - "expected the dict elements to be strings but they aren't.") % - (func_name, ) - ) - if k in section_dict: - raise Exception( - "%s tried to produce section %s which already exists." % - (func_name, k) - ) - section_dict[k] = v - self.log( - " %s => %s" % (k, v[:60].replace("\n","")) - ) - else: - raise Exception( - "Section function '%s' didn't return a string/dict!" % func_name - ) - return section_dict \ No newline at end of file diff --git a/scripts/templating/batesian/units.py b/scripts/templating/batesian/units.py deleted file mode 100644 index 82cc52f9..00000000 --- a/scripts/templating/batesian/units.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parent class for writing units.""" -import inspect - - -class Units(object): - - @staticmethod - def prop(obj, path): - # Helper method to extract nested property values - nested_keys = path.split("/") - val = obj - for key in nested_keys: - val = val.get(key, {}) - return val - - - def __init__(self, debug=False, substitutions=None): - self.debug = debug - - if substitutions is None: - self.substitutions = {} - else: - self.substitutions = substitutions - - def log(self, text): - if self.debug: - func_name = "" - trace = inspect.stack() - if len(trace) > 1 and len(trace[1]) > 2: - func_name = trace[1][3] + ":" - print("batesian:units:%s %s" % (func_name, text)) - - def get_units(self, debug=False): - unit_list = inspect.getmembers(self, predicate=inspect.ismethod) - unit_dict = {} - for (func_name, func) in unit_list: - if not func_name.startswith("load_"): - continue - unit_key = func_name[len("load_"):] - if len(inspect.getargs(func.__code__).args) > 1: - unit_dict[unit_key] = func(self.substitutions) - else: - unit_dict[unit_key] = func() - self.log("Generated unit '%s'" % unit_key) - - return unit_dict diff --git a/scripts/templating/build.py b/scripts/templating/build.py deleted file mode 100644 index fae4db56..00000000 --- a/scripts/templating/build.py +++ /dev/null @@ -1,286 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Batesian: A simple templating system using Jinja. - -Architecture -============ - - INPUT FILE --------+ -+-------+ +----------+ | -| units |-+ | sections |-+ V -+-------+ |-+ == used to create ==> +----------- | == provides vars to ==> Jinja - +-------+ | +----------+ | - +--------+ V -RAW DATA (e.g. json) Blobs of text OUTPUT FILE - -Units -===== -Units are random bits of unprocessed data, e.g. schema JSON files. Anything can -be done to them, from processing it with Jinja to arbitrary python processing. -They are typically dicts. - -Sections -======== -Sections are strings, typically short segments of RST. They will be dropped in -to the provided input file based on their section key name (template var) -They typically use a combination of templates + units to construct bits of RST. - -Input File -========== -The input file is a text file which is passed through Jinja along with the -section keys as template variables. - -Processing -========== -- Execute all unit functions to load units into memory and process them. -- Execute all section functions (which can now be done because the units exist) -- Process the input file through Jinja, giving it the sections as template vars. -""" -from batesian import AccessKeyStore - -from jinja2 import Environment, FileSystemLoader, StrictUndefined, Template, meta -from argparse import ArgumentParser, FileType -import importlib -import json -import logging -import os -import sys -from textwrap import TextWrapper - -from matrix_templates.units import TypeTableRow -from functools import reduce - - -def create_from_template(template, sections): - return template.render(sections) - -def check_unaccessed(name, store): - unaccessed_keys = store.get_unaccessed_set() - if len(unaccessed_keys) > 0: - log("Found %s unused %s keys." % (len(unaccessed_keys), name)) - log(unaccessed_keys) - -def main(input_module, files=None, out_dir=None, verbose=False, substitutions={}): - if out_dir and not os.path.exists(out_dir): - os.makedirs(out_dir) - - in_mod = importlib.import_module(input_module) - - # add a template filter to produce pretty pretty JSON - def jsonify(input, indent=None, pre_whitespace=0): - code = json.dumps(input, indent=indent, sort_keys=True) - if pre_whitespace: - code = code.replace("\n", ("\n" +" "*pre_whitespace)) - - return code - - def indent_block(input, indent): - return input.replace("\n", ("\n" + " "*indent)) - - def indent(input, indent): - return " "*indent + input - - def wrap(input, wrap=80, initial_indent=""): - if len(input) == 0: - return initial_indent - # TextWrapper collapses newlines into single spaces; we do our own - # splitting on newlines to prevent this, so that newlines can actually - # be intentionally inserted in text. - input_lines = input.split('\n\n') - wrapper = TextWrapper(initial_indent=initial_indent, width=wrap) - output_lines = [wrapper.fill(line) for line in input_lines] - - for i in range(len(output_lines)): - line = output_lines[i] - in_bullet = line.startswith("- ") - if in_bullet: - output_lines[i] = line.replace("\n", "\n " + initial_indent) - - return '\n\n'.join(output_lines) - - def fieldwidths(input, keys, defaults=[], default_width=15): - """ - A template filter to help in the generation of tables. - - Given a list of rows, returns a list giving the maximum length of the - values in each column. - - :param list[TypeTableRow|dict[str,str]] input: - a list of rows - :param list[str] keys: the keys corresponding to the table columns - :param list[int] defaults: for each column, the default column width. - :param int default_width: if ``defaults`` is shorter than ``keys``, this - will be used as a fallback - """ - def getrowattribute(row, k): - # the row may be a dict (particularly the title row, which is - # generated by the template - if not isinstance(row, TypeTableRow): - return row[k] - return getattr(row, k) - - def colwidth(key, default): - rowwidths = (len(getrowattribute(row, key)) for row in input) - return reduce(max, rowwidths, - default if default is not None else default_width) - - results = list(map(colwidth, keys, defaults)) - return results - - # make Jinja aware of the templates and filters - env = Environment( - loader=FileSystemLoader(in_mod.exports["templates"]), - undefined=StrictUndefined - ) - env.filters["jsonify"] = jsonify - env.filters["indent"] = indent - env.filters["indent_block"] = indent_block - env.filters["wrap"] = wrap - env.filters["fieldwidths"] = fieldwidths - - # load up and parse the lowest single units possible: we don't know or care - # which spec section will use it, we just need it there in memory for when - # they want it. - units = AccessKeyStore( - existing_data=in_mod.exports["units"]( - debug=verbose, - substitutions=substitutions, - ).get_units() - ) - - # use the units to create RST sections - sections = in_mod.exports["sections"](env, units, debug=verbose).get_sections() - - # print out valid section keys if no file supplied - if not files: - print("\nValid template variables:") - for key in sections.keys(): - sec_text = "" if (len(sections[key]) > 75) else ( - "(Value: '%s')" % sections[key] - ) - sec_info = "%s characters" % len(sections[key]) - if sections[key].count("\n") > 0: - sec_info += ", %s lines" % sections[key].count("\n") - print(" %s" % key) - print(" %s %s" % (sec_info, sec_text)) - return - - # check the input files and substitute in sections where required - for input_filename in files: - output_filename = os.path.join(out_dir, - os.path.basename(input_filename)) - process_file(env, sections, input_filename, output_filename) - - check_unaccessed("units", units) - -def process_file(env, sections, filename, output_filename): - log("Parsing input template: %s" % filename) - - with open(filename, "rb") as file_stream: - temp_str = file_stream.read().decode('UTF-8') - - # do sanity checking on the template to make sure they aren't reffing things - # which will never be replaced with a section. - ast = env.parse(temp_str) - template_vars = meta.find_undeclared_variables(ast) - unused_vars = [var for var in template_vars if var not in sections] - if len(unused_vars) > 0: - raise Exception( - "You have {{ variables }} which are not found in sections: %s" % - (unused_vars,) - ) - # process the template - temp = Template(temp_str) - output = create_from_template(temp, sections) - - # Do these substitutions outside of the ordinary templating system because - # we want them to apply to things like the underlying swagger used to - # generate the templates, not just the top-level sections. - for old, new in substitutions.items(): - output = output.replace(old, new) - - with open(output_filename, "wb") as f: - f.write(output.encode('UTF-8')) - log("Output file for: %s" % output_filename) - - -def log(line): - print("batesian: %s" % line) - -if __name__ == '__main__': - parser = ArgumentParser( - "Processes a file (typically .rst) through Jinja to replace templated "+ - "areas with section information from the provided input module. For a "+ - "list of possible template variables, add --show-template-vars." - ) - parser.add_argument( - "files", nargs="+", - help="The input files to process. These will be passed through Jinja "+ - "then output under the same name to the output directory." - ) - parser.add_argument( - "--input", "-i", - help="The python module (not file) which contains the sections/units "+ - "classes. This module must have an 'exports' dict which has "+ - "{ 'units': UnitClass, 'sections': SectionClass, "+ - "'templates': 'template/dir' }" - ) - parser.add_argument( - "--out-directory", "-o", help="The directory to output the file to."+ - " Default: /out", - default="out" - ) - parser.add_argument( - "--show-template-vars", "-s", action="store_true", - help="Show a list of all possible variables (sections) you can use in"+ - " the input file." - ) - parser.add_argument( - "--verbose", "-v", action="store_true", - help="Turn on verbose mode." - ) - parser.add_argument( - "--substitution", action="append", - help="Substitutions to apply to the generated output, of form NEEDLE=REPLACEMENT.", - default=[], - ) - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - if not args.input: - raise Exception("Missing [i]nput python module.") - - if (args.show_template_vars): - main(args.input, verbose=args.verbose) - sys.exit(0) - - substitutions = {} - for substitution in args.substitution: - parts = substitution.split("=", 1) - if len(parts) != 2: - raise Exception("Invalid substitution") - substitutions[parts[0]] = parts[1] - - main( - args.input, files=args.files, out_dir=args.out_directory, - substitutions=substitutions, verbose=args.verbose - ) diff --git a/scripts/templating/matrix_templates/__init__.py b/scripts/templating/matrix_templates/__init__.py deleted file mode 100644 index b81c5a30..00000000 --- a/scripts/templating/matrix_templates/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from .sections import MatrixSections -from .units import MatrixUnits -import os - -exports = { - "units": MatrixUnits, - "sections": MatrixSections, - "templates": os.path.join(os.path.dirname(os.path.abspath(__file__)), "templates") -} \ No newline at end of file diff --git a/scripts/templating/matrix_templates/sections.py b/scripts/templating/matrix_templates/sections.py deleted file mode 100644 index 7000916b..00000000 --- a/scripts/templating/matrix_templates/sections.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains all the sections for the spec.""" -from batesian import AccessKeyStore -from batesian.sections import Sections -import inspect -import json -import os -import logging -import re - - -logger = logging.getLogger(__name__) - -class MatrixSections(Sections): - - # pass through git ver so it'll be dropped in the input file - def render_git_version(self): - return self.units.get("git_version")["string"] - - def render_git_rev(self): - return self.units.get("git_version")["revision"] - - def render_changelogs(self): - rendered = {} - changelogs = self.units.get("changelogs") - for spec, changelog_text in changelogs.items(): - spec_var = "%s_changelog" % spec - logger.info("Rendering changelog for spec: %s" % spec) - rendered[spec_var] = changelog_text - return rendered - - def _render_events(self, filterFn, sortFn): - template = self.env.get_template("events.tmpl") - examples = self.units.get("event_examples") - schemas = self.units.get("event_schemas") - subtitle_title_char = self.units.get("spec_targets")[ - "relative_title_styles" - ]["subtitle"] - sections = [] - for event_name in sortFn(schemas): - if not filterFn(event_name): - continue - sections.append(template.render( - examples=examples[event_name], - event=schemas[event_name], - title_kind=subtitle_title_char - )) - return "\n\n".join(sections) - - def _render_http_api_group(self, group, sortPathList=None): - template = self.env.get_template("http-api.tmpl") - http_api = self.units.get("swagger_apis")[group]["__meta"] - subtitle_title_char = self.units.get("spec_targets")[ - "relative_title_styles" - ]["subtitle"] - sections = [] - endpoints = [] - if sortPathList: - # list of substrings to sort by - sorted_endpoints = [] - for path_substr in sortPathList: - for e in http_api["endpoints"]: - if path_substr in e["path"]: - sorted_endpoints.append(e) # could have multiple - # dump rest - rest = [ - e for e in http_api["endpoints"] if e not in sorted_endpoints - ] - endpoints = sorted_endpoints + rest - else: - # sort alphabetically based on path - endpoints = http_api["endpoints"] - - for endpoint in endpoints: - sections.append(template.render( - endpoint=endpoint, - title_kind=subtitle_title_char - )) - return "\n\n".join(sections) - - - # Special function: Returning a dict will specify multiple sections where - # the key is the section name and the value is the value of the section - def render_group_http_apis(self): - # map all swagger_apis to the form $GROUP_http_api - swagger_groups = self.units.get("swagger_apis").keys() - renders = {} - for group in swagger_groups: - sortFnOrPathList = None - if group == "presence_cs": - sortFnOrPathList = ["status"] - elif group == "profile_cs": - sortFnOrPathList=["displayname", "avatar_url"] - renders[group + "_http_api"] = self._render_http_api_group( - group, sortFnOrPathList - ) - return renders - - # Special function: Returning a dict will specify multiple sections where - # the key is the section name and the value is the value of the section - def render_group_events(self): - # map all event schemata to the form $EVENTTYPE_event with s/.#/_/g - # e.g. m_room_topic_event or m_room_message_m_text_event - schemas = self.units.get("event_schemas") - renders = {} - for event_type in schemas: - underscored_event_type = event_type.replace(".", "_").replace("$", "_") - renders[underscored_event_type + "_event"] = self._render_events( - lambda x: x == event_type, sorted - ) - return renders - - def render_room_events(self): - def filterFn(eventType): - return ( - eventType.startswith("m.room") and - not eventType.startswith("m.room.message$m.") - ) - return self._render_events(filterFn, sorted) - - def render_msgtype_events(self): - template = self.env.get_template("msgtypes.tmpl") - examples = self.units.get("event_examples") - schemas = self.units.get("event_schemas") - subtitle_title_char = self.units.get("spec_targets")[ - "relative_title_styles" - ]["subtitle"] - sections = [] - msgtype_order = [ - "m.room.message$m.text", "m.room.message$m.emote", - "m.room.message$m.notice", "m.room.message$m.image", - "m.room.message$m.file" - ] - excluded_types = [ - # We exclude server notices from here because we handle them in a - # dedicated module. We do not want to confuse developers this early - # in the spec. - "m.room.message$m.server_notice", - ] - other_msgtypes = [ - k for k in schemas.keys() if k.startswith("m.room.message$") and - k not in msgtype_order and k not in excluded_types - ] - for event_name in (msgtype_order + other_msgtypes): - if not event_name.startswith("m.room.message$m."): - continue - sections.append(template.render( - example=examples[event_name][0], - event=schemas[event_name], - title_kind=subtitle_title_char - )) - return "\n\n".join(sections) - - def render_voip_events(self): - def filterFn(eventType): - return eventType.startswith("m.call") - def sortFn(eventTypes): - ordering = [ - "m.call.invite", "m.call.candidates", "m.call.answer", - "m.call.hangup" - ] - rest = [ - k for k in eventTypes if k not in ordering - ] - return ordering + rest - return self._render_events(filterFn, sortFn) - - def render_presence_events(self): - def filterFn(eventType): - return eventType.startswith("m.presence") - return self._render_events(filterFn, sorted) - - def _render_ce_type(self, type): - template = self.env.get_template("common-event-fields.tmpl") - ce_types = self.units.get("common_event_fields") - subtitle_title_char = self.units.get("spec_targets")[ - "relative_title_styles" - ]["subtitle"] - return template.render( - common_event=ce_types[type], title_kind=subtitle_title_char - ) - - def render_common_event_fields(self): - return self._render_ce_type("event") - - def render_common_room_event_fields(self): - return self._render_ce_type("room_event") - - def render_common_state_event_fields(self): - return self._render_ce_type("state_event") - - def render_apis(self): - template = self.env.get_template("apis.tmpl") - apis = self.units.get("apis") - return template.render(apis=apis) - - def render_unstable_warnings(self): - rendered = {} - blocks = self.units.get("unstable_warnings") - for var, text in blocks.items(): - rendered["unstable_warning_block_" + var] = text - return rendered - - def render_swagger_definition(self): - rendered = {} - template = self.env.get_template("schema-definition.tmpl") - subtitle_title_char = self.units.get("spec_targets")[ - "relative_title_styles" - ]["subtitle"] - definitions = self.units.get("swagger_definitions") - for group, swagger_def in definitions.items(): - rendered["definition_" + group] = template.render( - definition=swagger_def['definition'], - examples=swagger_def['examples'], - title_kind=subtitle_title_char) - return rendered - - def render_sas_emoji_table(self): - emoji = self.units.get("sas_emoji") - rendered = ".. csv-table::\n" - rendered += " :header: \"Number\", \"Emoji\", \"Unicode\", \"Description\"\n" - rendered += " :widths: 10, 10, 15, 20\n" - rendered += "\n" - for row in emoji: - rendered += " %d, \"%s\", \"``%s``\", \"%s\"\n" % ( - row['number'], - row['emoji'], - row['unicode'], - row['description'], - ) - rendered += "\n" - return rendered diff --git a/scripts/templating/matrix_templates/templates/apis.tmpl b/scripts/templating/matrix_templates/templates/apis.tmpl deleted file mode 100644 index 943aadc8..00000000 --- a/scripts/templating/matrix_templates/templates/apis.tmpl +++ /dev/null @@ -1,4 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -{{ tables.paramtable(apis.rows, ["API", "Version", "Description"]) }} - diff --git a/scripts/templating/matrix_templates/templates/common-event-fields.tmpl b/scripts/templating/matrix_templates/templates/common-event-fields.tmpl deleted file mode 100644 index f62f59ba..00000000 --- a/scripts/templating/matrix_templates/templates/common-event-fields.tmpl +++ /dev/null @@ -1,12 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -{{common_event.title}} Fields -{{(7 + common_event.title | length) * title_kind}} - -{{common_event.desc}} - -{% for table in common_event.tables %} - -{{ tables.paramtable(table.rows, ["Key", "Type", "Description"], (table.title or "")) }} - -{% endfor %} diff --git a/scripts/templating/matrix_templates/templates/events.tmpl b/scripts/templating/matrix_templates/templates/events.tmpl deleted file mode 100644 index f55be73f..00000000 --- a/scripts/templating/matrix_templates/templates/events.tmpl +++ /dev/null @@ -1,32 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -{% if (event.type_with_msgtype) %} -``{{event.type_with_msgtype}}`` -{{(4 + event.type_with_msgtype | length) * title_kind}} -{% endif -%} - -{% if (not event.type_with_msgtype) %} -``{{event.type}}`` -{{(4 + event.type | length) * title_kind}} -{% endif -%} - -{% if (event.typeof | length) %} -*{{event.typeof}}* - {{event.typeof_info | indent_block(4)}} - -{% endif -%} - -{{event.desc}} - -{% for table in event.content_fields %} - -{{ tables.paramtable(table.rows, [(table.title or "Content") ~ " Key", "Type", "Description"], (table.title or "")) }} - -{% endfor %} -Example{% if examples | length > 1 %}s{% endif %}: - -{% for example in examples %} -.. code:: json - - {{example | jsonify(4, 4)}} -{% endfor %} diff --git a/scripts/templating/matrix_templates/templates/http-api.tmpl b/scripts/templating/matrix_templates/templates/http-api.tmpl deleted file mode 100644 index d2ee3ff7..00000000 --- a/scripts/templating/matrix_templates/templates/http-api.tmpl +++ /dev/null @@ -1,94 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -{% if "deprecated" in endpoint and endpoint.deprecated -%} -Deprecated: ``{{endpoint.method}} {{endpoint.path}}`` -{{(17 + (endpoint.path | length) + (endpoint.method | length)) * title_kind}} - -.. WARNING:: - This API is deprecated and will be removed from a future release. - -{% else %} -``{{endpoint.method}} {{endpoint.path}}`` -{{(5 + (endpoint.path | length) + (endpoint.method | length)) * title_kind}} -{% endif -%} - -{{endpoint.desc}} - -{{":Rate-limited: Yes." if endpoint.rate_limited else ":Rate-limited: No." }} -{{":Requires auth: Yes." if endpoint.requires_auth else ":Requires auth: No." }} - -.. class:: httpheaders - - Request format: - -{% if (endpoint.req_param_by_loc | length) %} -{{ tables.split_paramtable(endpoint.req_param_by_loc) }} -{% if (endpoint.req_body_tables) %} -{% for table in endpoint.req_body_tables -%} -{{ tables.paramtable(table.rows, caption=(table.title or "")) }} - -{% endfor -%} -{% endif -%} - -{% else %} -`No parameters` -{% endif %} - -{% if endpoint.res_headers is not none -%} - -.. class:: httpheaders - - Response headers: - -{{ tables.paramtable(endpoint.res_headers.rows) }} -{% endif -%} - -{% if endpoint.res_tables|length > 0 -%} - -.. class:: httpheaders - - Response format: - -{% for table in endpoint.res_tables -%} - -{{ tables.paramtable(table.rows, caption=(table.title or "")) }} - - -{% endfor %} -{% endif -%} - -.. class:: httpheaders - - Example request: - -.. code:: http - - {{endpoint.example.req | indent_block(2)}} - -{% if endpoint.responses|length > 0 -%} - -.. class:: httpheaders - - Response{{"s" if endpoint.responses|length > 1 else "" }}: - -{% endif -%} - -{% for res in endpoint.responses -%} - -**Status code {{res["code"]}}:** - -{{res["description"]}} - -{% if res["example"] -%} - -.. class:: httpheaders - - Example - -.. code:: json - - {{res["example"] | indent_block(2)}} - -{% endif -%} - -{% endfor %} diff --git a/scripts/templating/matrix_templates/templates/msgtypes.tmpl b/scripts/templating/matrix_templates/templates/msgtypes.tmpl deleted file mode 100644 index 060a7db0..00000000 --- a/scripts/templating/matrix_templates/templates/msgtypes.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -``{{event.msgtype}}`` -{{(4 + event.msgtype | length) * title_kind}} -{{event.desc | wrap(80)}} -{% for table in event.content_fields -%} - -{{ tables.paramtable(table.rows, [(table.title or "Content") ~ " Key", "Type", "Description"], (table.title or "")) }} - -{% endfor %} -Example: - -.. code:: json - - {{example | jsonify(4, 4)}} diff --git a/scripts/templating/matrix_templates/templates/schema-definition.tmpl b/scripts/templating/matrix_templates/templates/schema-definition.tmpl deleted file mode 100644 index e2be12e8..00000000 --- a/scripts/templating/matrix_templates/templates/schema-definition.tmpl +++ /dev/null @@ -1,21 +0,0 @@ -{% import 'tables.tmpl' as tables -%} - -``{{definition.title}}`` schema -{{(11 + definition.title | length) * title_kind}} - -{% if 'description' in definition %} -{{definition.description}} -{% endif %} - -{% for table in definition.tables -%} -{{"``"+table.title+"``" if table.title else "" }} -{{ tables.paramtable(table.rows) }} -{% endfor %} - -Example{% if examples | length > 1 %}s{% endif %}: - -{% for example in examples %} -.. code:: json - - {{example | jsonify(4, 4)}} -{% endfor %} diff --git a/scripts/templating/matrix_templates/templates/tables.tmpl b/scripts/templating/matrix_templates/templates/tables.tmpl deleted file mode 100644 index fde546a7..00000000 --- a/scripts/templating/matrix_templates/templates/tables.tmpl +++ /dev/null @@ -1,106 +0,0 @@ -{# - # A set of macros for generating RST tables - #} - - -{# - # write a table for a list of parameters. - # - # 'rows' is the list of parameters. Each row should be a TypeTableRow. - #} -{% macro paramtable(rows, titles=["Parameter", "Type", "Description"], caption="") -%} -{{ split_paramtable({None: rows}, titles, caption) }} -{% endmacro %} - - -{# - # write a table for the request parameters, split by location. - # 'rows_by_loc' is a map from location to a list of parameters. - # - # As a special case, if a key of 'rows_by_loc' is 'None', no title row is - # written for that location. This is used by the standard 'paramtable' macro. - #} -{% macro split_paramtable(rows_by_loc, - titles=["Parameter", "Type", "Description"], caption="") -%} - -{% set rowkeys = ['key', 'title', 'desc'] %} -{% set titlerow = {'key': titles[0], 'title': titles[1], 'desc': titles[2]} %} - -{# We need the rows flattened into a single list. Abuse the 'sum' filter to - # join arrays instead of add numbers. -#} -{% set flatrows = rows_by_loc.values()|sum(start=[]) -%} - -{# Figure out the widths of the columns. The last column is always 50 characters - # wide; the others default to 10, but stretch if there is wider text in the - # column. -#} -{% set fieldwidths = (([titlerow] + flatrows) | - fieldwidths(rowkeys[0:-1], [10, 10])) + [50] -%} - -{{".. table:: "}}{{ caption }} -{{" :widths: auto"}} -{{""}} -{{ tableheader(fieldwidths) }} -{{ tablerow(fieldwidths, titlerow, rowkeys) }} -{{ tableheader(fieldwidths) }} -{% for loc in rows_by_loc -%} - -{% if loc != None -%} -{{ tablespan(fieldwidths, "*" ~ loc ~ " parameters*") }} -{% endif -%} - -{% for row in rows_by_loc[loc] -%} -{{ tablerow(fieldwidths, row, rowkeys) }} -{% endfor -%} -{% endfor -%} - -{{ tableheader(fieldwidths) }} -{% endmacro %} - - - -{# - # Write a table header row, for the given column widths - #} -{% macro tableheader(widths) -%} -{{" "}}{% for arg in widths -%} -{{"="*arg}} {% endfor -%} -{% endmacro %} - - - -{# - # Write a normal table row. Each of 'widths' and 'keys' should be sequences - # of the same length; 'widths' defines the column widths, and 'keys' the - # attributes of 'row' to look up for values to put in the columns. - #} -{% macro tablerow(widths, row, keys) -%} -{{" "}}{% for key in keys -%} -{% set value=row[key] -%} -{% if not loop.last -%} - {# the first few columns need space after them -#} - {{ value }}{{" "*(1+widths[loop.index0]-value|length) -}} -{% else -%} - {# the last column needs wrapping and indenting (by the sum of the widths of - the preceding columns, plus the number of preceding columns (for the - separators)) -#} - {{ value | wrap(widths[loop.index0]) | - indent_block(widths[0:-1]|sum + loop.index0 + 2) -}} -{% endif -%} -{% endfor -%} -{% endmacro %} - - - - -{# - # write a tablespan row. This is a single value which spans the entire table. - #} -{% macro tablespan(widths, value) -%} -{{" "}}{{value}} -{# we write a trailing space to stop the separator being misinterpreted - # as a header line. -#} -{{" "}}{{"-"*(widths|sum + widths|length -1)}} {% endmacro %} - - - - diff --git a/scripts/templating/matrix_templates/units.py b/scripts/templating/matrix_templates/units.py deleted file mode 100644 index ff614e34..00000000 --- a/scripts/templating/matrix_templates/units.py +++ /dev/null @@ -1,1153 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Contains all the units for the spec. - -This file loads swagger and JSON schema files and parses out the useful bits -and returns them as Units for use in Batesian. - -For the actual conversion of data -> RST (including templates), see the sections -file instead. -""" -from batesian.units import Units -from collections import OrderedDict -import logging -import json -import os -import os.path -import re -import subprocess -import sys -import yaml -from functools import reduce -from six.moves.urllib.parse import urlencode, quote - -matrix_doc_dir=reduce(lambda acc,_: os.path.dirname(acc), - range(1, 5), os.path.abspath(__file__)) - -HTTP_APIS = { - os.path.join(matrix_doc_dir, "api/application-service"): "as", - os.path.join(matrix_doc_dir, "api/client-server"): "cs", - os.path.join(matrix_doc_dir, "api/identity"): "is", - os.path.join(matrix_doc_dir, "api/push-gateway"): "push", - os.path.join(matrix_doc_dir, "api/server-server"): "ss", -} -SWAGGER_DEFINITIONS = { - os.path.join(matrix_doc_dir, "api/application-service/definitions"): "as", - os.path.join(matrix_doc_dir, "api/client-server/definitions"): "cs", - os.path.join(matrix_doc_dir, "api/identity/definitions"): "is", - os.path.join(matrix_doc_dir, "api/push-gateway/definitions"): "push", - os.path.join(matrix_doc_dir, "api/server-server/definitions"): "ss", -} -EVENT_EXAMPLES = os.path.join(matrix_doc_dir, "event-schemas/examples") -EVENT_SCHEMA = os.path.join(matrix_doc_dir, "event-schemas/schema") -CORE_EVENT_SCHEMA = os.path.join(matrix_doc_dir, "event-schemas/schema/core-event-schema") -CHANGELOG_DIR = os.path.join(matrix_doc_dir, "changelogs") -TARGETS = os.path.join(matrix_doc_dir, "specification/targets.yaml") - -ROOM_EVENT = "core-event-schema/room_event.yaml" -STATE_EVENT = "core-event-schema/state_event.yaml" - -SAS_EMOJI_JSON = os.path.join(matrix_doc_dir, "data-definitions/sas-emoji.json") - -logger = logging.getLogger(__name__) - -# a yaml Loader which loads mappings into OrderedDicts instead of regular -# dicts, so that we preserve the ordering of properties from the api files. -# -# with thanks to http://stackoverflow.com/a/21912744/637864 -class OrderedLoader(yaml.Loader): - pass -def construct_mapping(loader, node): - loader.flatten_mapping(node) - pairs = loader.construct_pairs(node) - return OrderedDict(pairs) -OrderedLoader.add_constructor( - yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - construct_mapping) - - -class TypeTable(object): - """Describes a table documenting an object type - - Attributes: - title(str|None): Title of the table - normally the object type - desc(str|None): description of the object - rows(list[TypeTableRow]): the rows in the table - """ - def __init__(self, title=None, desc=None, rows=[]): - self.title=title - self.desc=desc - self._rows = [] - for row in rows: - self.add_row(row) - - def add_row(self, row): - if not isinstance(row, TypeTableRow): - raise ValueError("Can only add TypeTableRows to TypeTable") - - self._rows.append(row) - - def __getattr__(self, item): - if item == 'rows': - return list(self._rows) - return super(TypeTable, self).__getattr__(item) - - def __repr__(self): - return "TypeTable[%s, rows=%s]" % (self.title, self._rows) - - -class TypeTableRow(object): - """Describes an object field defined in the json schema - """ - def __init__(self, key, title, desc, required=False): - self.key = key - self.title = title - self.desc = desc - self.required = required - - def __repr__(self): - return "TypeTableRow[%s: %s]" % (self.key, self.desc) - - -def resolve_references(path, schema): - if isinstance(schema, dict): - # do $ref first - if '$ref' in schema: - value = schema['$ref'] - path = os.path.join(os.path.dirname(path), value) - with open(path, encoding="utf-8") as f: - ref = yaml.load(f, OrderedLoader) - result = resolve_references(path, ref) - del schema['$ref'] - else: - result = OrderedDict() - - for key, value in schema.items(): - result[key] = resolve_references(path, value) - return result - elif isinstance(schema, list): - return [resolve_references(path, value) for value in schema] - else: - return schema - - -def inherit_parents(obj): - """ - Recurse through the 'allOf' declarations in the object - """ - logger.debug("inherit_parents %r" % obj) - - parents = obj.get("allOf", []) - if not parents: - return obj - - result = {} - - # settings defined in the child take priority over the parents, so we - # iterate through the parents first, and then overwrite with the settings - # from the child. - for p in list(map(inherit_parents, parents)) + [obj]: - # child blats out type, title and description - for key in ('type', 'title', 'description'): - if p.get(key): - result[key] = p[key] - - # other fields get merged - for key in ('required', ): - if p.get(key): - result.setdefault(key, []).extend(p[key]) - - for key in ('properties', 'additionalProperties', 'patternProperties'): - if p.get(key): - result.setdefault(key, OrderedDict()).update(p[key]) - - return result - - -def get_json_schema_object_fields(obj, enforce_title=False): - """Parse a JSON schema object definition - - Args: - obj(dict): definition from the JSON schema file. $refs should already - have been resolved. - enforce_title (bool): if True, and the definition has no "title", - the 'title' result will be set to 'NO_TITLE' (otherwise it will be - set to None) - - Returns: - dict: with the following fields: - - title (str): title (normally the type name) for the object - - tables (list[TypeTable]): list of the tables for the type - definition - """ - # Algorithm: - # f.e. property => add field info (if field is object then recurse) - if obj.get("type") != "object": - raise Exception( - "get_json_schema_object_fields: Object %s isn't an object." % obj - ) - - obj_title = obj.get("title") - - logger.debug("Processing object with title '%s'", obj_title) - - additionalProps = obj.get("additionalProperties") - props = obj.get("properties") - if additionalProps and not props: - # not "really" an object, just a KV store - logger.debug("%s is a pseudo-object", obj_title) - - key_type = additionalProps.get("x-pattern", "string") - res = process_data_type(additionalProps) - tables = res["tables"] - val_title = res["title"] - gen_title = "{%s: %s}" % (key_type, val_title) - if res.get("enum_desc") and val_title != "enum": - # A map to enum needs another table with enum description - tables.append(TypeTable( - title=val_title, - rows=[TypeTableRow(key="(mapped value)", title="enum", desc=res["desc"])] - )) - return { - "title": obj_title if obj_title else gen_title, - "tables": tables, - } - - if not props: - props = obj.get("patternProperties") - if props: - # try to replace horrible regex key names with pretty x-pattern ones - for key_name in props.keys(): - pretty_key = props[key_name].get("x-pattern") - if pretty_key: - props[pretty_key] = props[key_name] - del props[key_name] - - - - # Sometimes you just want to specify that a thing is an object without - # doing all the keys. - if not props: - return { - "title": obj_title if obj_title else 'object', - "tables": [], - } - - if enforce_title and not obj_title: - # Force a default titile of "NO_TITLE" to make it obvious in the - # specification output which parts of the schema are missing a title - obj_title = 'NO_TITLE' - - required_keys = set(obj.get("required", [])) - - first_table_rows = [] - tables = [] - - for key_name in props: - try: - logger.debug("Processing property %s.%s", obj_title, key_name) - required = key_name in required_keys - res = process_data_type(props[key_name], required) - - first_table_rows.append(TypeTableRow( - key=key_name, - title=res["title"], - required=required, - desc=res["desc"], - )) - tables.extend(res["tables"]) - logger.debug("Done property %s" % key_name) - - except Exception as e: - e2 = Exception("Error reading property %s.%s: %s" % - (obj_title, key_name, str(e))) - # throw the new exception with the old stack trace, so that - # we don't lose information about where the error occurred. - raise e2.with_traceback(sys.exc_info()[2]) - - tables.insert(0, TypeTable(title=obj_title, rows=first_table_rows)) - - for table in tables: - assert isinstance(table, TypeTable) - - return { - "title": obj_title, - "tables": tables, - } - - -# process a data type definition. returns a dictionary with the keys: -# title: stringified type name -# desc: description -# enum_desc: description of permissible enum fields -# is_object: true if the data type is an object -# tables: list of additional table definitions -def process_data_type(prop, required=False, enforce_title=True): - prop = inherit_parents(prop) - - prop_type = prop.get('oneOf', prop.get('type', [])) - assert prop_type - - tables = [] - enum_desc = None - is_object = False - - if prop_type == "object": - res = get_json_schema_object_fields( - prop, - enforce_title=enforce_title, - ) - prop_title = res["title"] - tables = res["tables"] - is_object = True - - elif prop_type == "array": - items = prop["items"] - # Items can be a list of schemas or a schema itself - # http://json-schema.org/latest/json-schema-validation.html#rfc.section.6.4 - if isinstance(items, list): - nested_titles = [] - for i in items: - nested = process_data_type(i) - tables.extend(nested['tables']) - nested_titles.append(nested['title']) - prop_title = "[%s]" % (", ".join(nested_titles), ) - else: - nested = process_data_type(prop["items"]) - prop_title = "[%s]" % nested["title"] - tables = nested["tables"] - enum_desc = nested["enum_desc"] - - elif isinstance(prop_type, list): - prop_title = [] - for t in prop_type: - if isinstance(t, dict): - nested = process_data_type(t) - tables.extend(nested['tables']) - prop_title.append(nested['title']) - # Assuming there's at most one enum among type options - enum_desc = nested['enum_desc'] - if enum_desc: - enum_desc = "%s if the type is enum" % enum_desc - else: - prop_title.append(t) - else: - prop_title = prop_type - - if prop.get("enum"): - prop_title = prop.get("title", "enum") - if len(prop["enum"]) > 1: - enum_desc = ( - "One of: %s" % json.dumps(prop["enum"]) - ) - else: - enum_desc = ( - "Must be '%s'." % prop["enum"][0] - ) - - if isinstance(prop_title, list): - prop_title = " or ".join(prop_title) - - rq = "**Required.**" if required else None - desc = " ".join(x for x in [rq, prop.get("description"), enum_desc] if x) - - for table in tables: - assert isinstance(table, TypeTable) - - return { - "title": prop_title, - "desc": desc, - "enum_desc": enum_desc, - "is_object": is_object, - "tables": tables, - } - -def deduplicate_tables(tables): - # the result may contain duplicates, if objects are referred to more than - # once. Filter them out. - # - # Go through the tables backwards so that we end up with a breadth-first - # rather than depth-first ordering. - - titles = set() - filtered = [] - for table in reversed(tables): - if table.title in titles: - continue - - titles.add(table.title) - filtered.append(table) - filtered.reverse() - - return filtered - -def get_tables_for_schema(schema): - pv = process_data_type(schema, enforce_title=False) - return deduplicate_tables(pv["tables"]) - -def get_tables_for_response(schema): - pv = process_data_type(schema, enforce_title=False) - tables = deduplicate_tables(pv["tables"]) - - # make up the first table, with just the 'body' row in, unless the response - # is an object, in which case there's little point in having one. - if not pv["is_object"]: - first_table_row = TypeTableRow( - key="", title=pv["title"], desc=pv["desc"], - ) - tables.insert(0, TypeTable(None, rows=[first_table_row])) - - logger.debug("response: %r" % tables) - - return tables - -def get_example_for_schema(schema): - """Returns a python object representing a suitable example for this object""" - schema = inherit_parents(schema) - - if 'example' in schema: - example = schema['example'] - return example - - proptype = schema['type'] - - if proptype == 'object': - if 'properties' not in schema: - raise Exception('"object" property has neither properties nor example') - res = OrderedDict() - for prop_name, prop in schema['properties'].items(): - logger.debug("Parsing property %r" % prop_name) - prop_example = get_example_for_schema(prop) - res[prop_name] = prop_example - return res - - if proptype == 'array': - if 'items' not in schema: - raise Exception('"array" property has neither items nor example') - items = schema['items'] - if isinstance(items, list): - return [get_example_for_schema(i) for i in items] - return [get_example_for_schema(items)] - - if proptype == 'integer': - return 0 - - if proptype == 'string': - return proptype - - raise Exception("Don't know to make an example %s" % proptype) - -def get_example_for_param(param): - """Returns a stringified example for a parameter""" - if 'x-example' in param: - return param['x-example'] - schema = param.get('schema') - if not schema: - return None - - exampleobj = None - if 'example' in schema: - exampleobj = schema['example'] - - if exampleobj is None: - exampleobj = get_example_for_schema(schema) - - return json.dumps(exampleobj, indent=2) - -def get_example_for_response(response): - """Returns a stringified example for a response""" - exampleobj = None - if 'examples' in response: - exampleobj = response["examples"].get("application/json") - - if exampleobj is None: - schema = response.get('schema') - if schema: - if schema['type'] == 'file': - # no example for 'file' responses - return None - exampleobj = get_example_for_schema(schema) - - if exampleobj is None: - return None - - return json.dumps(exampleobj, indent=2) - -class MatrixUnits(Units): - def _load_swagger_meta(self, api, group_name): - endpoints = [] - base_path = api.get("basePath", "") - - for path in api["paths"]: - for method in api["paths"][path]: - logger.info(" ------- Endpoint: %s %s ------- " % (method, path)) - - try: - endpoint = self._handle_endpoint( - api["paths"][path][method], method, - base_path.rstrip("/") + path) - - endpoints.append(endpoint) - except Exception as e: - logger.error("Error handling endpoint %s %s: %s", - method, path, e) - raise - return { - "base": api.get("basePath").rstrip("/"), - "group": group_name, - "endpoints": endpoints, - } - - def _handle_endpoint(self, endpoint_swagger, method, path): - endpoint = { - "title": endpoint_swagger.get("summary", ""), - "deprecated": endpoint_swagger.get("deprecated", False), - "desc": endpoint_swagger.get("description", - endpoint_swagger.get("summary", "")), - "method": method.upper(), - "path": path.strip(), - "requires_auth": "security" in endpoint_swagger, - "rate_limited": 429 in endpoint_swagger.get("responses", {}), - "req_param_by_loc": {}, - "req_body_tables": [], - "res_headers": None, - "res_tables": [], - "responses": [], - "example": { - "req": "", - } - } - path_template = path - example_query_params = [] - example_body = "" - example_mime = "application/json" - for param in endpoint_swagger.get("parameters", []): - # even body params should have names, otherwise the active docs don't work. - param_name = param["name"] - - try: - param_loc = param["in"] - - if param_loc == "body": - self._handle_body_param(param, endpoint) - example_body = get_example_for_param(param) - continue - - if param_loc == "header": - if param["name"] == "Content-Type" and param["x-example"]: - example_mime = param["x-example"] - - # description - desc = param.get("description", "") - if param.get("required"): - desc = "**Required.** " + desc - - # assign value expected for this param - val_type = param.get("type") # integer/string - - if val_type == "array": - items = param.get("items") - if items: - if isinstance(items, list): - types = ", ".join(i.get("type") for i in items) - val_type = "[%s]" % (types,) - else: - val_type = "[%s]" % items.get("type") - - if param.get("enum"): - val_type = "enum" - desc += ( - " One of: %s" % json.dumps(param.get("enum")) - ) - - endpoint["req_param_by_loc"].setdefault(param_loc, []).append( - TypeTableRow(key=param_name, title=val_type, desc=desc), - ) - - example = get_example_for_param(param) - if example is None: - continue - - if param_loc == "path": - path_template = path_template.replace( - "{%s}" % param_name, quote(example) - ) - elif param_loc == "query": - if type(example) == list: - for value in example: - example_query_params.append((param_name, value)) - else: - example_query_params.append((param_name, example)) - - except Exception as e: - raise Exception("Error handling parameter %s" % param_name, e) - # endfor[param] - good_response = None - endpoint_status_codes = endpoint_swagger.get("responses", {}).keys() - # Check to see if any of the status codes are strings ("4xx") and if - # so convert everything to a string to avoid comparing ints and strings. - has_string_status = False - for code in endpoint_status_codes: - if isinstance(code, str): - has_string_status = True - break - if has_string_status: - endpoint_status_codes = [str(i) for i in endpoint_status_codes] - for code in sorted(endpoint_status_codes): - # Convert numeric responses to ints, assuming they got converted - # above. - if isinstance(code, str) and code.isdigit(): - code = int(code) - res = endpoint_swagger["responses"][code] - if not good_response and code == 200: - good_response = res - description = res.get("description", "") - example = get_example_for_response(res) - endpoint["responses"].append({ - "code": code, - "description": description, - "example": example, - }) - - # add response params if this API has any. - if good_response: - if "schema" in good_response: - endpoint["res_tables"] = get_tables_for_response( - good_response["schema"] - ) - if "headers" in good_response: - headers = TypeTable() - for (header_name, header) in good_response["headers"].items(): - headers.add_row( - TypeTableRow(key=header_name, title=header["type"], - desc=header["description"]), - ) - endpoint["res_headers"] = headers - query_string = "" if len( - example_query_params) == 0 else "?" + urlencode( - example_query_params) - if example_body: - endpoint["example"][ - "req"] = "%s %s%s HTTP/1.1\nContent-Type: %s\n\n%s" % ( - method.upper(), path_template, query_string, example_mime, example_body - ) - else: - endpoint["example"]["req"] = "%s %s%s HTTP/1.1\n\n" % ( - method.upper(), path_template, query_string - ) - return endpoint - - def _handle_body_param(self, param, endpoint_data): - """Update endpoint_data object with the details of the body param - :param string filepath path to the yaml - :param dict param the parameter data from the yaml - :param dict endpoint_data dictionary of endpoint data to be updated - """ - try: - schema = inherit_parents(param["schema"]) - if schema["type"] != "object": - logger.warn( - "Unsupported body type %s for %s %s", schema["type"], - endpoint_data["method"], endpoint_data["path"] - ) - return - - req_body_tables = get_tables_for_schema(schema) - - if req_body_tables == []: - # no fields defined for the body. - return - - # put the top-level parameters into 'req_param_by_loc', and the others - # into 'req_body_tables' - body_params = endpoint_data['req_param_by_loc'].setdefault("JSON body",[]) - body_params.extend(req_body_tables[0].rows) - - body_tables = req_body_tables[1:] - endpoint_data['req_body_tables'].extend(body_tables) - - except Exception as e: - e2 = Exception( - "Error decoding body of API endpoint %s %s: %s" % - (endpoint_data["method"], endpoint_data["path"], e) - ) - raise e2.with_traceback(sys.exc_info()[2]) - - - def load_swagger_apis(self): - apis = {} - for path, suffix in HTTP_APIS.items(): - for filename in os.listdir(path): - if not filename.endswith(".yaml"): - continue - filepath = os.path.join(path, filename) - logger.info("Reading swagger API: %s" % filepath) - with open(filepath, "r", encoding="utf-8") as f: - # strip .yaml - group_name = filename[:-5].replace("-", "_") - group_name = "%s_%s" % (group_name, suffix) - api = yaml.load(f, OrderedLoader) - api = resolve_references(filepath, api) - api["__meta"] = self._load_swagger_meta( - api, group_name - ) - apis[group_name] = api - return apis - - - def load_swagger_definitions(self): - defs = {} - for path, prefix in SWAGGER_DEFINITIONS.items(): - self._load_swagger_definitions_in_dir(defs, path, prefix) - return defs - - def _load_swagger_definitions_in_dir(self, defs, path, prefix, recurse=True): - if not os.path.exists(path): - return defs - for filename in os.listdir(path): - filepath = os.path.join(path, filename) - if os.path.isdir(filepath) and recurse: - safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", filename) - dir_prefix = "_".join([prefix, safe_name]) - # We don't recurse because we have to stop at some point - self._load_swagger_definitions_in_dir( - defs, filepath, dir_prefix, recurse=False) - if not filename.endswith(".yaml"): - continue - filepath = os.path.join(path, filename) - logger.info("Reading swagger definition: %s" % filepath) - with open(filepath, "r", encoding="utf-8") as f: - # strip .yaml - group_name = re.sub(r"[^a-zA-Z0-9_]", "_", filename[:-5]) - group_name = "%s_%s" % (prefix, group_name) - definition = yaml.load(f, OrderedLoader) - definition = resolve_references(filepath, definition) - if 'type' not in definition: - continue - try: - example = get_example_for_schema(definition) - except: - example = None - pass # do nothing - we don't care - if 'title' not in definition: - definition['title'] = "NO_TITLE" - definition['tables'] = get_tables_for_schema(definition) - defs[group_name] = { - "definition": definition, - "examples": [example] if example is not None else [], - } - return defs - - def load_common_event_fields(self): - """Parse the core event schema files - - Returns: - dict: with the following properties: - "title": Event title (from the 'title' field of the schema) - "desc": desc - "tables": list[TypeTable] - """ - path = CORE_EVENT_SCHEMA - event_types = {} - - for filename in os.listdir(path): - if not filename.endswith(".yaml"): - continue - - filepath = os.path.join(path, filename) - - event_type = filename[:-5] # strip the ".yaml" - logger.info("Reading event schema: %s" % filepath) - - with open(filepath, encoding="utf-8") as f: - event_schema = yaml.load(f, OrderedLoader) - event_schema = resolve_references(filepath, event_schema) - - schema_info = process_data_type( - event_schema, - enforce_title=True, - ) - event_types[event_type] = schema_info - return event_types - - def load_apis(self, substitutions): - cs_ver = substitutions.get("%CLIENT_RELEASE_LABEL%", "unstable") - fed_ver = substitutions.get("%SERVER_RELEASE_LABEL%", "unstable") - is_ver = substitutions.get("%IDENTITY_RELEASE_LABEL%", "unstable") - as_ver = substitutions.get("%APPSERVICE_RELEASE_LABEL%", "unstable") - push_gw_ver = substitutions.get("%PUSH_GATEWAY_RELEASE_LABEL%", "unstable") - - # we abuse the typetable to return this info to the templates - return TypeTable(rows=[ - TypeTableRow( - "`Client-Server API `_", - cs_ver, - "Interaction between clients and servers", - ), TypeTableRow( - "`Server-Server API `_", - fed_ver, - "Federation between servers", - ), TypeTableRow( - "`Application Service API `_", - as_ver, - "Privileged server plugins", - ), TypeTableRow( - "`Identity Service API `_", - is_ver, - "Mapping of third party IDs to Matrix IDs", - ), TypeTableRow( - "`Push Gateway API `_", - push_gw_ver, - "Push notifications for Matrix events", - ), - ]) - - def load_event_examples(self): - path = EVENT_EXAMPLES - examples = {} - for filename in os.listdir(path): - if not filename.startswith("m."): - continue - - event_name = filename.split("$")[0] - filepath = os.path.join(path, filename) - logger.info("Reading event example: %s" % filepath) - try: - with open(filepath, "r", encoding="utf-8") as f: - example = resolve_references(filepath, json.load(f)) - examples[filename] = examples.get(filename, []) - examples[filename].append(example) - if filename != event_name: - examples[event_name] = examples.get(event_name, []) - examples[event_name].append(example) - except Exception as e: - e2 = Exception("Error reading event example "+filepath+": "+ - str(e)) - # throw the new exception with the old stack trace, so that - # we don't lose information about where the error occurred. - raise e2.with_traceback(sys.exc_info()[2]) - - return examples - - def load_event_schemas(self): - path = EVENT_SCHEMA - schemata = {} - - for filename in os.listdir(path): - if not filename.startswith("m."): - continue - filepath = os.path.join(path, filename) - try: - schemata[filename] = self.read_event_schema(filepath) - except Exception as e: - e2 = Exception("Error reading event schema "+filepath+": "+ - str(e)) - # throw the new exception with the old stack trace, so that - # we don't lose information about where the error occurred. - raise e2.with_traceback(sys.exc_info()[2]) - - return schemata - - def read_event_schema(self, filepath): - logger.info("Reading %s" % filepath) - - with open(filepath, "r", encoding="utf-8") as f: - json_schema = yaml.load(f, OrderedLoader) - - schema = { - # one of "Message Event" or "State Event" - "typeof": "", - "typeof_info": "", - - # event type, eg "m.room.member". Note *not* the type of the - # event object (which should always be 'object'). - "type": None, - "title": None, - "desc": None, - "msgtype": None, - "type_with_msgtype": None, # for the template's sake - "content_fields": [ - # - ] - } - - # before we resolve the references, see if the first reference is to - # the message event or state event schemas, and add typeof info if so. - base_defs = { - ROOM_EVENT: "Message Event", - STATE_EVENT: "State Event" - } - if type(json_schema.get("allOf")) == list: - firstRef = json_schema["allOf"][0]["$ref"] - if firstRef in base_defs: - schema["typeof"] = base_defs[firstRef] - - json_schema = resolve_references(filepath, json_schema) - - # add type - schema["type"] = Units.prop( - json_schema, "properties/type/enum" - )[0] - - # add summary and desc - schema["title"] = json_schema.get("title") - schema["desc"] = json_schema.get("description", "") - - # walk the object for field info - schema["content_fields"] = get_tables_for_schema( - Units.prop(json_schema, "properties/content") - ) - - # Include UnsignedData if it is present on the object - unsigned = Units.prop(json_schema, "properties/unsigned") - if unsigned: - tbls = get_tables_for_schema(unsigned) - for tbl in tbls: - schema["content_fields"].append(tbl) - - # grab msgtype if it is the right kind of event - msgtype = Units.prop( - json_schema, "properties/content/properties/msgtype/enum" - ) - if msgtype: - schema["msgtype"] = msgtype[0] # enum prop - schema["type_with_msgtype"] = schema["type"] + " (" + msgtype[0] + ")" - - # link to msgtypes for m.room.message - if schema["type"] == "m.room.message" and not msgtype: - schema["desc"] += ( - " For more information on ``msgtypes``, see "+ - "`m.room.message msgtypes`_." - ) - - # method types for m.key.verification.start - if schema["type"] == "m.key.verification.start": - methods = Units.prop( - json_schema, "properties/content/properties/method/enum" - ) - if methods: - schema["type_with_msgtype"] = schema["type"] + " (" + methods[0] + ")" - - # Assign state key info if it has some - if schema["typeof"] == "State Event": - skey_desc = Units.prop( - json_schema, "properties/state_key/description" - ) - if not skey_desc: - raise Exception("Missing description for state_key") - schema["typeof_info"] = "``state_key``: %s" % skey_desc - - return schema - - def load_changelogs(self, substitutions): - """Loads the changelog unit for later rendering in a section. - - Args: - substitutions: dict of variable name to value. Provided by the gendoc script. - - Returns: - A dict of API name ("client_server", for example) to changelog. - """ - changelogs = {} - - # The APIs and versions we'll prepare changelogs for. We use the substitutions - # to ensure that we pick up the right version for generated documentation. This - # defaults to "unstable" as a version for incremental generated documentation (CI). - prepare_versions = { - "server_server": substitutions.get("%SERVER_RELEASE_LABEL%", "unstable"), - "client_server": substitutions.get("%CLIENT_RELEASE_LABEL%", "unstable"), - "identity_service": substitutions.get("%IDENTITY_RELEASE_LABEL%", "unstable"), - "push_gateway": substitutions.get("%PUSH_GATEWAY_RELEASE_LABEL%", "unstable"), - "application_service": substitutions.get("%APPSERVICE_RELEASE_LABEL%", "unstable"), - } - - # Changelogs are split into two places: towncrier for the unstable changelog and - # the RST file for historical versions. If the prepare_versions dict above has - # a version other than "unstable" specified for an API, we'll use the historical - # changelog and otherwise generate the towncrier log in-memory. - - for api_name, target_version in prepare_versions.items(): - logger.info("Generating changelog for %s at %s" % (api_name, target_version,)) - changelog_lines = [] - if target_version == 'unstable': - # generate towncrier log - changelog_lines = self._read_towncrier_changelog(api_name) - else: - # read in the existing RST changelog - changelog_lines = self._read_rst_changelog(api_name) - - # Parse the changelog lines to find the header we're looking for and therefore - # the changelog body. - prev_line = None - title_part = None - changelog_body_lines = [] - for line in changelog_lines: - if prev_line is None: - prev_line = line - continue - if re.match("^[=]{3,}$", line.strip()): - # the last line was a header - use that as our new title_part - title_part = prev_line.strip() - # take off the last line from the changelog_body_lines because it's the title - if len(changelog_body_lines) > 0: - changelog_body_lines = changelog_body_lines[:len(changelog_body_lines) - 1] - continue - if re.match("^[-]{3,}$", line.strip()): - # the last line is a subheading - drop this line because it's the underline - # and that causes problems with rendering. We'll keep the header text though. - continue - if line.strip().startswith(".. "): - # skip comments - continue - if title_part == target_version: - # if we made it this far, append the line to the changelog body. We indent it so - # that it renders correctly in the section. We also add newlines so that there's - # intentionally blank lines that make rst2html happy. - changelog_body_lines.append(" " + line + '\n') - prev_line = line - - if len(changelog_body_lines) > 0: - changelogs[api_name] = "".join(changelog_body_lines) - else: - raise ValueError("No changelog for %s at %s" % (api_name, target_version,)) - - # return our `dict[api_name] => changelog` as the last step. - return changelogs - - def _read_towncrier_changelog(self, api_name): - tc_path = os.path.join(CHANGELOG_DIR, api_name) - if os.path.isdir(tc_path): - logger.info("Generating towncrier changelog for: %s" % api_name) - p = subprocess.Popen( - ['towncrier', '--version', 'unstable', '--name', api_name, '--draft'], - cwd=tc_path, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE, - ) - stdout, stderr = p.communicate() - if p.returncode != 0: - # Something broke - dump as much information as we can - logger.error("Towncrier exited with code %s" % p.returncode) - logger.error(stdout.decode('UTF-8')) - logger.error(stderr.decode('UTF-8')) - raw_log = "" - else: - raw_log = stdout.decode('UTF-8') - - # This is a bit of a hack, but it does mean that the log at least gets *something* - # to tell us it broke - if not raw_log.startswith("unstable"): - logger.error("Towncrier appears to have failed to generate a changelog") - logger.error(raw_log) - raw_log = "" - return raw_log.splitlines() - return [] - - def _read_rst_changelog(self, api_name): - logger.info("Reading changelog RST for %s" % api_name) - rst_path = os.path.join(CHANGELOG_DIR, "%s.rst" % api_name) - with open(rst_path, 'r', encoding="utf-8") as f: - return f.readlines() - - def load_unstable_warnings(self, substitutions): - warning = """ -.. WARNING:: - You are viewing an unstable version of this specification. Unstable - specifications may change at any time without notice. To view the - current specification, please `click here `_. -""" - warnings = {} - for var in substitutions.keys(): - key = var[1:-1] # take off the surrounding %-signs - if substitutions.get(var, "unstable") == "unstable": - warnings[key] = warning - else: - warnings[key] = "" - return warnings - - - def load_spec_targets(self): - with open(TARGETS, "r") as f: - return yaml.load(f.read()) - - - def load_git_version(self): - null = open(os.devnull, 'w') - cwd = os.path.dirname(os.path.abspath(__file__)) - try: - git_branch = subprocess.check_output( - ['git', 'rev-parse', '--abbrev-ref', 'HEAD'], - stderr=null, - cwd=cwd, - ).strip().decode('UTF-8') - except subprocess.CalledProcessError: - git_branch = "" - try: - git_tag = subprocess.check_output( - ['git', 'describe', '--exact-match'], - stderr=null, - cwd=cwd, - ).strip().decode('UTF-8') - git_tag = "tag=" + git_tag - except subprocess.CalledProcessError: - git_tag = "" - try: - git_commit = subprocess.check_output( - ['git', 'rev-parse', '--short', 'HEAD'], - stderr=null, - cwd=cwd, - ).strip().decode('UTF-8') - except subprocess.CalledProcessError: - git_commit = "" - try: - dirty_string = "-this_is_a_dirty_checkout" - is_dirty = subprocess.check_output( - ['git', 'describe', '--dirty=' + dirty_string, "--all"], - stderr=null, - cwd=cwd, - ).strip().decode('UTF-8').endswith(dirty_string) - git_dirty = "dirty" if is_dirty else "" - except subprocess.CalledProcessError: - git_dirty = "" - - git_version = "Unknown" - if git_branch or git_tag or git_commit or git_dirty: - git_version = ",".join( - s for s in - (git_branch, git_tag, git_commit, git_dirty,) - if s - ).encode("ascii").decode('ascii') - return { - "string": git_version, - "revision": git_commit - } - - def load_sas_emoji(self): - with open(SAS_EMOJI_JSON, 'r', encoding='utf-8') as sas_json: - emoji = json.load(sas_json) - - # Verify the emoji matches the unicode - for c in emoji: - e = c['emoji'] - logger.info("Checking emoji %s (%s)", e, c['description']) - u = re.sub(r'U\+([0-9a-fA-F]+)', lambda m: chr(int(m.group(1), 16)), c['unicode']) - if e != u: - raise Exception("Emoji %s should be %s not %s" % ( - c['description'], - repr(e), - c['unicode'], - )) - - return emoji