Cut out legacy build scripts

pull/3098/head
Travis Ralston 3 years ago
parent 9d030ca844
commit 93f83aca3d

@ -1,5 +1,5 @@
steps:
- label: ":books: Build the legacy spec"
- label: ":snake: Build swagger definitions for matrix.org"
command:
# Install the python dependencies necessary to build the spec
- python3 -m venv env && . env/bin/activate

@ -1,9 +1,3 @@
genlegacydoc: &genlegacydoc
name: Generate the legacy docs
command: |
source /env/bin/activate
scripts/gendoc.py
gendoc: &gendoc
name: Generate the docs
# Note: Node dependencies are required for the hugo build.
@ -54,12 +48,6 @@ buildspeculator: &buildspeculator
cd scripts/speculator
go build -v
buildcontinuserv: &buildcontinuserv
name: Build Continuserv
command: |
cd scripts/continuserv
go build -v
version: 2
jobs:
validate-docs:
@ -74,17 +62,6 @@ jobs:
steps:
- checkout
- run: *checkexamples
build-legacy-docs:
docker:
- image: uhoreg/matrix-doc-build
steps:
- checkout
- run: *genlegacydoc
- store_artifacts:
path: scripts/gen
- run:
name: "Legacy doc build is available at:"
command: DOCS_URL="${CIRCLE_BUILD_URL}/artifacts/${CIRCLE_NODE_INDEX}/${CIRCLE_WORKING_DIRECTORY/#\~/$HOME}/scripts/gen/index.html"; echo $DOCS_URL
build-docs:
docker:
- image: alpine
@ -121,8 +98,6 @@ jobs:
name: Install Dependencies
command: |
go get -v github.com/hashicorp/golang-lru
go get -v gopkg.in/fsnotify/fsnotify.v1
- run: *buildcontinuserv
- run: *buildspeculator
workflows:
@ -130,7 +105,6 @@ workflows:
build-spec:
jobs:
- build-legacy-docs
- build-docs
- build-swagger
- check-docs

@ -1,3 +0,0 @@
continuserv proactively re-generates the spec on filesystem changes, and serves
it over HTTP. For notes on using it, see [the main
readme](../../README.rst#continuserv).

@ -1,15 +0,0 @@
<head>
<script>
window.onload = function() {
var url = new URL(window.location);
url.pathname += "api-docs.json";
var newLoc = "http://petstore.swagger.io/?url=" + encodeURIComponent(url);
document.getElementById("apidocs").href = newLoc;
};
</script>
</head>
<body><ul>
<li><a id="apidocs">api docs</a></li>
<li><a href="index.html">spec</a></li>
</ul>
</body>

@ -1,274 +0,0 @@
// continuserv proactively re-generates the spec on filesystem changes, and serves it over HTTP.
// It will always serve the most recent version of the spec, and may block an HTTP request until regeneration is finished.
// It does not currently pre-empt stale generations, but will block until they are complete.
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
fsnotify "gopkg.in/fsnotify/fsnotify.v1"
)
var (
port = flag.Int("port", 8000, "Port on which to serve HTTP")
mu sync.Mutex // Prevent multiple updates in parallel.
toServe atomic.Value // Always contains a bytesOrErr. May be stale unless wg is zero.
wgMu sync.Mutex // Prevent multiple calls to wg.Wait() or wg.Add(positive number) in parallel.
wg sync.WaitGroup // Indicates how many updates are pending.
)
func main() {
flag.Parse()
w, err := fsnotify.NewWatcher()
if err != nil {
log.Fatalf("Error making watcher: %v", err)
}
dir, err := os.Getwd()
if err != nil {
log.Fatalf("Error getting wd: %v", err)
}
for ; !exists(path.Join(dir, ".git")); dir = path.Dir(dir) {
if dir == "/" {
log.Fatalf("Could not find git root")
}
}
walker := makeWalker(dir, w)
paths := []string{"api", "changelogs", "event-schemas", "scripts",
"specification", "schemas", "data-definitions"}
for _, p := range paths {
filepath.Walk(path.Join(dir, p), walker)
}
wg.Add(1)
populateOnce(dir)
ch := make(chan struct{}, 100) // Buffered to ensure we can multiple-increment wg for pending writes
go doPopulate(ch, dir)
go watchFS(ch, w)
fmt.Printf("Listening on port %d\n", *port)
http.HandleFunc("/", serve)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), nil))
}
func watchFS(ch chan struct{}, w *fsnotify.Watcher) {
for {
select {
case e := <-w.Events:
if filter(e) {
fmt.Printf("Noticed change to %s, re-generating spec\n", e.Name)
ch <- struct{}{}
}
}
}
}
func makeWalker(base string, w *fsnotify.Watcher) filepath.WalkFunc {
return func(path string, i os.FileInfo, err error) error {
if err != nil {
log.Fatalf("Error walking: %v", err)
}
if !i.IsDir() {
// we set watches on directories, not files
return nil
}
rel, err := filepath.Rel(base, path)
if err != nil {
log.Fatalf("Failed to get relative path of %s: %v", path, err)
}
// Normalize slashes
rel = filepath.ToSlash(rel)
// skip a few things that we know don't form part of the spec
if rel == "api/node_modules" ||
rel == "scripts/gen" ||
rel == "scripts/tmp" {
return filepath.SkipDir
}
// log.Printf("Adding watch on %s", path)
if err := w.Add(path); err != nil {
log.Fatalf("Failed to add watch on %s: %v", path, err)
}
return nil
}
}
// Return true if event should trigger re-population
func filter(e fsnotify.Event) bool {
// vim is *really* noisy about how it writes files
if e.Op != fsnotify.Write {
return false
}
_, fname := filepath.Split(e.Name)
// Avoid some temp files that vim or emacs writes
if strings.HasSuffix(e.Name, "~") || strings.HasSuffix(e.Name, ".swp") || strings.HasPrefix(fname, ".") ||
(strings.HasPrefix(fname, "#") && strings.HasSuffix(fname, "#")) {
return false
}
// Forcefully ignore directories we don't care about (Windows, at least, tries to notify about some directories)
filePath := filepath.ToSlash(e.Name) // normalize slashes
if strings.Contains(filePath, "/scripts/tmp") ||
strings.Contains(filePath, "/scripts/gen") ||
strings.Contains(filePath, "/api/node_modules") {
return false
}
return true
}
func serve(w http.ResponseWriter, req *http.Request) {
wgMu.Lock()
wg.Wait()
wgMu.Unlock()
m := toServe.Load().(bytesOrErr)
if m.err != nil {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(m.err.Error()))
return
}
ok := true
var b []byte
file := req.URL.Path
if file[0] == '/' {
file = file[1:]
}
b, ok = m.bytes[filepath.FromSlash(file)] // de-normalize slashes
if ok && file == "api-docs.json" {
w.Header().Set("Access-Control-Allow-Origin", "*")
}
if ok {
w.Header().Set("Content-Type", "text/html")
w.Write([]byte(b))
return
}
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(404)
w.Write([]byte("Not found"))
}
func generate(dir string) (map[string][]byte, error) {
cmd := exec.Command("python", "gendoc.py")
cmd.Dir = path.Join(dir, "scripts")
var b bytes.Buffer
cmd.Stderr = &b
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("error generating spec: %v\nOutput from gendoc:\n%v", err, b.String())
}
// cheekily dump the swagger docs into the gen directory so that it is
// easy to serve
cmd = exec.Command("python", "dump-swagger.py", "-o", "gen/api-docs.json")
cmd.Dir = path.Join(dir, "scripts")
cmd.Stderr = &b
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("error generating api docs: %v\nOutput from dump-swagger:\n%v", err, b.String())
}
files := make(map[string][]byte)
base := path.Join(dir, "scripts", "gen")
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
rel, err := filepath.Rel(base, path)
if err != nil {
return fmt.Errorf("Failed to get relative path of %s: %v", path, err)
}
bytes, err := ioutil.ReadFile(path)
if err != nil {
return err
}
files[rel] = bytes
return nil
}
if err := filepath.Walk(base, walker); err != nil {
return nil, fmt.Errorf("error reading spec: %v", err)
}
// load the special index
indexpath := path.Join(dir, "scripts", "continuserv", "index.html")
bytes, err := ioutil.ReadFile(indexpath)
if err != nil {
return nil, fmt.Errorf("error reading index: %v", err)
}
files[""] = bytes
return files, nil
}
func populateOnce(dir string) {
defer wg.Done()
mu.Lock()
defer mu.Unlock()
files, err := generate(dir)
toServe.Store(bytesOrErr{files, err})
}
func doPopulate(ch chan struct{}, dir string) {
var pending int
for {
select {
case <-ch:
if pending == 0 {
wgMu.Lock()
wg.Add(1)
wgMu.Unlock()
}
pending++
case <-time.After(10 * time.Millisecond):
if pending > 0 {
pending = 0
populateOnce(dir)
}
}
}
}
func exists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
type bytesOrErr struct {
bytes map[string][]byte // filename -> contents
err error
}

@ -1,6 +0,0 @@
with import <nixpkgs> {};
(python.buildEnv.override {
extraLibs = with pythonPackages;
[ docutils pyyaml jinja2 pygments ];
}).env

@ -1,561 +0,0 @@
#! /usr/bin/env python
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from docutils.core import publish_file
import copy
import fileinput
import glob
import os
import os.path
import re
import shutil
import subprocess
import sys
import yaml
script_dir = os.path.dirname(os.path.abspath(__file__))
docs_dir = os.path.dirname(script_dir)
spec_dir = os.path.join(docs_dir, "specification")
tmp_dir = os.path.join(script_dir, "tmp")
changelog_dir = os.path.join(docs_dir, "changelogs")
VERBOSE = False
"""
Read a RST file and replace titles with a different title level if required.
Args:
filename: The name of the file being read (for debugging)
file_stream: The open file stream to read from.
title_level: The integer which determines the offset to *start* from.
title_styles: An array of characters detailing the right title styles to use
e.g. ["=", "-", "~", "+"]
Returns:
string: The file contents with titles adjusted.
Example:
Assume title_styles = ["=", "-", "~", "+"], title_level = 1, and the file
when read line-by-line encounters the titles "===", "---", "---", "===", "---".
This function will bump every title encountered down a sub-heading e.g.
"=" to "-" and "-" to "~" because title_level = 1, so the output would be
"---", "~~~", "~~~", "---", "~~~". There is no bumping "up" a title level.
"""
def load_with_adjusted_titles(filename, file_stream, title_level, title_styles):
rst_lines = []
prev_line_title_level = 0 # We expect the file to start with '=' titles
file_offset = None
prev_non_title_line = None
for i, line in enumerate(file_stream):
if (prev_non_title_line is None
or not is_title_line(prev_non_title_line, line, title_styles)
):
rst_lines.append(line)
prev_non_title_line = line
continue
line_title_style = line[0]
line_title_level = title_styles.index(line_title_style)
# Not all files will start with "===" and we should be flexible enough
# to allow that. The first title we encounter sets the "file offset"
# which is added to the title_level desired.
if file_offset is None:
file_offset = line_title_level
if file_offset != 0:
logv((" WARNING: %s starts with a title style of '%s' but '%s' " +
"is preferable.") % (filename, line_title_style, title_styles[0]))
# Sanity checks: Make sure that this file is obeying the title levels
# specified and bail if it isn't.
# The file is allowed to go 1 deeper or any number shallower
if prev_line_title_level - line_title_level < -1:
raise Exception(
("File '%s' line '%s' has a title " +
"style '%s' which doesn't match one of the " +
"allowed title styles of %s because the " +
"title level before this line was '%s'") %
(filename, (i + 1), line_title_style, title_styles,
title_styles[prev_line_title_level])
)
prev_line_title_level = line_title_level
adjusted_level = (
title_level + line_title_level - file_offset
)
# Sanity check: Make sure we can bump down the title and we aren't at the
# lowest level already
if adjusted_level >= len(title_styles):
raise Exception(
("Files '%s' line '%s' has a sub-title level too low and it " +
"cannot be adjusted to fit. You can add another level to the " +
"'title_styles' key in targets.yaml to fix this.") %
(filename, (i + 1))
)
if adjusted_level == line_title_level:
# no changes required
rst_lines.append(line)
continue
# Adjusting line levels
logv(
"File: %s Adjusting %s to %s because file_offset=%s title_offset=%s" %
(filename, line_title_style, title_styles[adjusted_level],
file_offset, title_level)
)
rst_lines.append(line.replace(
line_title_style,
title_styles[adjusted_level]
))
return "".join(rst_lines)
def is_title_line(prev_line, line, title_styles):
# The title underline must match at a minimum the length of the title
if len(prev_line) > len(line):
return False
line = line.rstrip()
# must be at least 3 chars long
if len(line) < 3:
return False
# must start with a title char
title_char = line[0]
if title_char not in title_styles:
return False
# all characters must be the same
for char in line[1:]:
if char != title_char:
return False
# looks like a title line
return True
def get_rst(file_info, title_level, title_styles, spec_dir, adjust_titles):
# string are file paths to RST blobs
if isinstance(file_info, str):
log("%s %s" % (">" * (1 + title_level), file_info))
with open(os.path.join(spec_dir, file_info), "r", encoding="utf-8") as f:
rst = None
if adjust_titles:
rst = load_with_adjusted_titles(
file_info, f, title_level, title_styles
)
else:
rst = f.read()
rst += "\n\n"
return rst
# dicts look like {0: filepath, 1: filepath} where the key is the title level
elif isinstance(file_info, dict):
levels = sorted(file_info.keys())
rst = []
for l in levels:
rst.append(get_rst(file_info[l], l, title_styles, spec_dir, adjust_titles))
return "".join(rst)
# lists are multiple file paths e.g. [filepath, filepath]
elif isinstance(file_info, list):
rst = []
for f in file_info:
rst.append(get_rst(f, title_level, title_styles, spec_dir, adjust_titles))
return "".join(rst)
raise Exception(
"The following 'file' entry in this target isn't a string, list or dict. " +
"It really really should be. Entry: %s" % (file_info,)
)
def build_spec(target, out_filename):
log("Building templated file %s" % out_filename)
with open(out_filename, "w", encoding="utf-8") as outfile:
for file_info in target["files"]:
section = get_rst(
file_info=file_info,
title_level=0,
title_styles=target["title_styles"],
spec_dir=spec_dir,
adjust_titles=True
)
outfile.write(section)
"""
Replaces relative title styles with actual title styles.
The templating system has no idea what the right title style is when it produces
RST because it depends on the build target. As a result, it uses relative title
styles defined in targets.yaml to say "down a level, up a level, same level".
This function replaces these relative titles with actual title styles from the
array in targets.yaml.
"""
def fix_relative_titles(target, filename, out_filename):
log("Fix relative titles, %s -> %s" % (filename, out_filename))
title_styles = target["title_styles"]
relative_title_chars = [
target["relative_title_styles"]["subtitle"],
target["relative_title_styles"]["sametitle"],
target["relative_title_styles"]["supertitle"]
]
relative_title_matcher = re.compile(
"^[" + re.escape("".join(relative_title_chars)) + "]{3,}$"
)
title_matcher = re.compile(
"^[" + re.escape("".join(title_styles)) + "]{3,}$"
)
current_title_style = None
with open(filename, "r", encoding="utf-8") as infile:
with open(out_filename, "w", encoding="utf-8") as outfile:
for line in infile.readlines():
if not relative_title_matcher.match(line):
if title_matcher.match(line):
current_title_style = line[0]
outfile.write(line)
continue
line_char = line[0]
replacement_char = None
current_title_level = title_styles.index(current_title_style)
if line_char == target["relative_title_styles"]["subtitle"]:
if (current_title_level + 1) == len(title_styles):
raise Exception(
"Encountered sub-title line style but we can't go " +
"any lower."
)
replacement_char = title_styles[current_title_level + 1]
elif line_char == target["relative_title_styles"]["sametitle"]:
replacement_char = title_styles[current_title_level]
elif line_char == target["relative_title_styles"]["supertitle"]:
if (current_title_level - 1) < 0:
raise Exception(
"Encountered super-title line style but we can't go " +
"any higher."
)
replacement_char = title_styles[current_title_level - 1]
else:
raise Exception(
"Unknown relative line char %s" % (line_char,)
)
outfile.write(
line.replace(line_char, replacement_char)
)
def rst2html(i, o, stylesheets):
log("rst2html %s -> %s" % (i, o))
with open(i, "r", encoding="utf-8") as in_file:
with open(o, "w", encoding="utf-8") as out_file:
publish_file(
source=in_file,
destination=out_file,
reader_name="standalone",
parser_name="restructuredtext",
writer_name="html",
settings_overrides={
"stylesheet_path": stylesheets,
"syntax_highlight": "short",
},
)
def addAnchors(path):
log("add anchors %s" % path)
with open(path, "r", encoding="utf-8") as f:
lines = f.readlines()
replacement = r'<p><a class="anchor" id="\2"></a></p>\n\1'
with open(path, "w", encoding="utf-8") as f:
for line in lines:
line = re.sub(r'(<h\d id="#?(.*?)">)', replacement, line.rstrip())
line = re.sub(r'(<div class="section" id="(.*?)">)', replacement, line.rstrip())
f.write(line + "\n")
def run_through_template(input_files, set_verbose, substitutions):
args = [
'python', script_dir+'/templating/build.py',
"-o", tmp_dir,
"-i", "matrix_templates",
]
for k, v in substitutions.items():
args.append("--substitution=%s=%s" % (k, v))
if set_verbose:
args.insert(2, "-v")
args.extend(input_files)
log("EXEC: %s" % " ".join(args))
log(" ==== build.py output ==== ")
subprocess.check_call(args)
"""
Extract and resolve groups for the given target in the given targets listing.
Args:
all_targets (dict): The parsed YAML file containing a list of targets
target_name (str): The name of the target to extract from the listings.
Returns:
dict: Containing "filees" (a list of file paths), "relative_title_styles"
(a dict of relative style keyword to title character) and "title_styles"
(a list of characters which represent the global title style to follow,
with the top section title first, the second section second, and so on.)
"""
def get_build_target(all_targets, target_name):
build_target = {
"title_styles": [],
"relative_title_styles": {},
"files": []
}
build_target["title_styles"] = all_targets["title_styles"]
build_target["relative_title_styles"] = all_targets["relative_title_styles"]
target = all_targets["targets"].get(target_name)
if not target:
raise Exception(
"No target by the name '" + target_name + "' exists in '" +
targets_listing + "'."
)
if not isinstance(target.get("files"), list):
raise Exception(
"Found target but 'files' key is not a list."
)
def get_group(group_id, depth):
group_name = group_id[len("group:"):]
group = all_targets.get("groups", {}).get(group_name)
if not group:
raise Exception(
"Tried to find group '%s' but it doesn't exist." % group_name
)
if not isinstance(group, list):
raise Exception(
"Expected group '%s' to be a list but it isn't." % group_name
)
# deep copy so changes to depths don't contaminate multiple uses of this group
group = copy.deepcopy(group)
# swap relative depths for absolute ones
for i, entry in enumerate(group):
if isinstance(entry, dict):
group[i] = {
(rel_depth + depth): v for (rel_depth, v) in entry.items()
}
return group
resolved_files = []
for file_entry in target["files"]:
# file_entry is a group id
if isinstance(file_entry, str) and file_entry.startswith("group:"):
group = get_group(file_entry, 0)
# The group may be resolved to a list of file entries, in which case
# we want to extend the array to insert each of them rather than
# insert the entire list as a single element (which is what append does)
if isinstance(group, list):
resolved_files.extend(group)
else:
resolved_files.append(group)
# file_entry is a dict which has more file entries as values
elif isinstance(file_entry, dict):
resolved_entry = {}
for (depth, entry) in file_entry.items():
if not isinstance(entry, str):
raise Exception(
"Double-nested depths are not supported. Entry: %s" % (file_entry,)
)
if entry.startswith("group:"):
resolved_entry[depth] = get_group(entry, depth)
else:
# map across without editing (e.g. normal file path)
resolved_entry[depth] = entry
resolved_files.append(resolved_entry)
continue
# file_entry is just a plain ol' file path
else:
resolved_files.append(file_entry)
build_target["files"] = resolved_files
return build_target
def log(line):
print("gendoc: %s" % line)
def logv(line):
if VERBOSE:
print("gendoc:V: %s" % line)
def cleanup_env():
shutil.rmtree(tmp_dir)
def mkdirp(d) :
if not os.path.exists(d):
os.makedirs(d)
def main(targets, dest_dir, keep_intermediates, substitutions):
try:
mkdirp(dest_dir)
except Exception as e:
log("Error creating destination directory '%s': %s" % (dest_dir, str(e)))
return 1
try:
mkdirp(tmp_dir)
except Exception as e:
log("Error creating temporary directory '%s': %s" % (tmp_dir, str(e)))
return 1
with open(os.path.join(spec_dir, "targets.yaml"), "r") as targ_file:
target_defs = yaml.load(targ_file.read())
if targets == ["all"]:
targets = target_defs["targets"].keys()
log("Building spec [targets=%s]" % targets)
templated_files = {} # map from target name to templated file
for target_name in targets:
templated_file = os.path.join(tmp_dir, "templated_%s.rst" % (target_name,))
target = get_build_target(target_defs, target_name)
build_spec(target=target, out_filename=templated_file)
templated_files[target_name] = templated_file
# we do all the templating at once, because it's slow
run_through_template(templated_files.values(), VERBOSE, substitutions)
stylesheets = glob.glob(os.path.join(script_dir, "css", "*.css"))
for target_name, templated_file in templated_files.items():
target = target_defs["targets"].get(target_name)
version_label = None
if target:
version_label = target.get("version_label")
if version_label:
for old, new in substitutions.items():
version_label = version_label.replace(old, new)
rst_file = os.path.join(tmp_dir, "spec_%s.rst" % (target_name,))
if version_label:
d = os.path.join(dest_dir, target_name.split('@')[0])
if not os.path.exists(d):
os.mkdir(d)
html_file = os.path.join(d, "%s.html" % version_label)
else:
html_file = os.path.join(dest_dir, "%s.html" % (target_name, ))
fix_relative_titles(
target=target_defs, filename=templated_file,
out_filename=rst_file,
)
rst2html(rst_file, html_file, stylesheets=stylesheets)
addAnchors(html_file)
if not keep_intermediates:
cleanup_env()
return 0
def list_targets():
with open(os.path.join(spec_dir, "targets.yaml"), "r") as targ_file:
target_defs = yaml.load(targ_file.read())
targets = target_defs["targets"].keys()
print("\n".join(targets))
def extract_major(s):
major_version = s
match = re.match("^(r\d+)(\.\d+)*$", s)
if match:
major_version = match.group(1)
return major_version
if __name__ == '__main__':
parser = ArgumentParser(
"gendoc.py - Generate the Matrix specification as HTML."
)
parser.add_argument(
"--nodelete", "-n", action="store_true",
help="Do not delete intermediate files. They will be found in scripts/tmp/"
)
parser.add_argument(
"--target", "-t", action="append",
help="Specify the build target to build from specification/targets.yaml. " +
"The value 'all' will build all of the targets therein."
)
parser.add_argument(
"--verbose", "-v", action="store_true",
help="Turn on verbose mode."
)
parser.add_argument(
"--client_release", "-c", action="store", default="unstable",
help="The client-server release tag to generate, e.g. r1.2"
)
parser.add_argument(
"--server_release", "-s", action="store", default="unstable",
help="The server-server release tag to generate, e.g. r1.2"
)
parser.add_argument(
"--appservice_release", "-a", action="store", default="unstable",
help="The appservice release tag to generate, e.g. r1.2"
)
parser.add_argument(
"--push_gateway_release", "-p", action="store", default="unstable",
help="The push gateway release tag to generate, e.g. r1.2"
)
parser.add_argument(
"--identity_release", "-i", action="store", default="unstable",
help="The identity service release tag to generate, e.g. r1.2"
)
parser.add_argument(
"--list_targets", action="store_true",
help="Do not update the specification. Instead print a list of targets.",
)
parser.add_argument(
"--dest", "-d", default=os.path.join(script_dir, "gen"),
help="Set destination directory (default: scripts/gen)",
)
args = parser.parse_args()
VERBOSE = args.verbose
if args.list_targets:
list_targets()
exit(0)
substitutions = {
"%CLIENT_RELEASE_LABEL%": args.client_release,
# we hardcode the major versions. This ends up in the example
# API URLs. When we have released a new major version, we'll
# have to bump them.
"%CLIENT_MAJOR_VERSION%": "r0",
"%SERVER_RELEASE_LABEL%": args.server_release,
"%APPSERVICE_RELEASE_LABEL%": args.appservice_release,
"%IDENTITY_RELEASE_LABEL%": args.identity_release,
"%PUSH_GATEWAY_RELEASE_LABEL%": args.push_gateway_release,
}
exit (main(args.target or ["all"], args.dest, args.nodelete, substitutions))

@ -8,16 +8,8 @@ cd `dirname $0`/..
mkdir -p assets
# generate specification/proposals.rst
./scripts/proposals.py
# generate the legacy spec docs
./scripts/gendoc.py -d assets/spec
# and the swagger
./scripts/dump-swagger.py -o assets/spec/client_server/unstable.json
# create a tarball of the assets. Exclude the spec index for now, as
# we want to leave it pointing at the release versions of the specs.
# (XXX: how to maintain this?)
tar -czf assets.tar.gz --exclude="assets/spec/index.html" assets
# create a tarball of the assets.
tar -czf assets.tar.gz assets

@ -1,218 +0,0 @@
#!/usr/bin/env python
#
# proposals.py: generate an RST file (proposals.rst) from queries to github.com/matrix.org/matrix-doc/issues.
import requests
import re
from datetime import datetime
# a list of the labels we care about
LABELS_LIST=[
'proposal-in-review',
'proposed-final-comment-period',
'final-comment-period',
'finished-final-comment-period',
'spec-pr-missing',
'spec-pr-in-review',
'merged',
'proposal-postponed',
'abandoned',
'obsolete',
]
authors = set()
prs = set()
def getpage(url):
"""Request the given URL, and extract the pagecount from the response headers
Args:
url (str): URL to fetch
Returns:
Tuple[int, list]: number of pages, and the list of items on this page
"""
resp = requests.get(url)
pagecount = 1
for link in resp.links.values():
if link['rel'] == 'last':
# we extract the pagecount from the `page` param of the last url
# in the response, eg
# 'https://api.github.com/repositories/24998719/issues?state=all&labels=proposal&page=10'
pagecount = int(re.search('page=(\d+)', link['url']).group(1))
val = resp.json()
if not isinstance(val, list):
print(val) # Just dump the raw (likely error) response to the log
raise Exception("Error calling %s" % url)
return (pagecount, val)
def getbylabel(label):
"""Fetch all the issues with a given label
Args:
label (str): label to fetch
Returns:
Iterator[dict]: an iterator over the issue list.
"""
urlbase = 'https://api.github.com/repos/matrix-org/matrix-doc/issues?state=all&labels=' + label + '&page='
page = 1
while True:
(pagecount, results) = getpage(urlbase + str(page))
for i in results:
yield i
page += 1
if page > pagecount:
return
def print_issue_list(text_file, label, issues):
text_file.write(label + "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if (len(issues) == 0):
text_file.write("No proposals.\n\n")
return
text_file.write(".. list-table::\n :header-rows: 1\n :widths: auto\n :stub-columns: 1\n\n")
text_file.write(" * - MSC\n")
text_file.write(" - Proposal Title\n")
text_file.write(" - Creation Date\n")
text_file.write(" - Update Date\n")
text_file.write(" - Documentation\n")
text_file.write(" - Author\n")
text_file.write(" - Shepherd\n")
text_file.write(" - PRs\n")
for item in issues:
# set the created date, find local field, otherwise Github
body = str(item['body'])
created = re.search('^Date: (.+?)\n', body, flags=re.MULTILINE)
if created is not None:
created = created.group(1).strip()
try:
created = datetime.strptime(created, "%d/%m/%Y")
created = created.strftime('%Y-%m-%d')
except:
pass
try:
created = datetime.strptime(created, "%Y-%m-%d")
created = created.strftime('%Y-%m-%d')
except:
pass
else :
created = datetime.strptime(item['created_at'], "%Y-%m-%dT%XZ")
created = created.strftime('%Y-%m-%d')
item['created'] = created
issues_to_print = sorted(issues, key=lambda issue_sort: issue_sort["created"])
for item in issues_to_print:
# MSC number
text_file.write(" * - `MSC" + str(item['number']) + " <" + item['html_url'] + ">`_\n")
# title from Github issue
text_file.write(" - " + item['title'] + "\n")
# created date
text_file.write(" - " + item['created'] + "\n")
# last updated, purely Github
updated = datetime.strptime(item['updated_at'], "%Y-%m-%dT%XZ")
text_file.write(" - " + updated.strftime('%Y-%m-%d') + "\n")
# list of document links (urls comma-separated)
maindoc = re.search('^Documentation: (.+?)$', str(item['body']), flags=re.MULTILINE)
if maindoc is not None:
maindoc = maindoc.group(1)
doc_list_formatted = ["`" + str(item['number']) + "-" + str(i) + " <" + x.strip() + ">`_" for i, x in enumerate(maindoc.split(','),1)]
text_file.write(" - " + ', '.join(doc_list_formatted))
else:
text_file.write(" - ")
text_file.write("\n")
# author list, if missing just use Github issue creator
author = re.search('^Author: (.+?)$', str(item['body']), flags=re.MULTILINE)
if author is not None:
author_list_formatted = set()
author_list = author.group(1)
for a in author_list.split(","):
authors.add(a.strip())
author_list_formatted.add("`" + str(a.strip()) + "`_")
text_file.write(" - " + ', '.join(author_list_formatted))
else:
author = "@" + item['user']['login']
authors.add(author)
text_file.write(" - `" + str(author) + "`_")
text_file.write("\n")
# shepherd (currently only one)
shepherd = re.search('Shepherd: (.+?)\n', str(item['body']))
if shepherd is not None:
authors.add(shepherd.group(1).strip())
shepherd = "`" + shepherd.group(1).strip() + "`_"
text_file.write(" - " + str(shepherd) + "\n")
# PRs
try:
pr_list = re.search('PRs: (.+?)$', str(item['body']))
if pr_list is not None:
pr_list_formatted = set()
pr_list = pr_list.group(1)
for p in pr_list.split(","):
if re.match(r"#\d", p.strip()):
prs.add(p.strip())
pr_list_formatted.add("`PR" + str(p.strip()) + "`_")
elif re.match(r"https://github.com/matrix-org/matrix-doc/pulls/\d", p.strip()):
pr = "#" + p.strip().replace('https://github.com/matrix-org/matrix-doc/pulls/', '')
prs.add(pr)
pr_list_formatted.add("`PR" + str(pr) + "`_")
else:
raise RuntimeWarning
text_file.write(" - " + ', '.join(pr_list_formatted))
text_file.write("\n")
else:
text_file.write(" - \n")
except:
print("exception parsing PRs for MSC" + str(item['number']))
text_file.write(" - \n")
text_file.write("\n\n\n")
# first get all of the issues, filtering by label
issues = {n: [] for n in LABELS_LIST}
# use the magic 'None' key for a proposal in progress
issues[None] = []
for prop in getbylabel('proposal'):
print("%s: %s" % (prop['number'], [l['name'] for l in prop['labels']]))
found_label = False
for label in prop['labels']:
label_name = label['name']
if label_name in issues:
issues[label_name].append(prop)
found_label = True
# if it doesn't have any other label, assume it's work-in-progress
if not found_label:
issues[None].append(prop)
text_file = open("specification/proposals.rst", "w")
text_file.write("Tables of Tracked Proposals\n---------------------------\n\n")
print_issue_list(text_file, "<work-in-progress>", issues[None])
for label in LABELS_LIST:
print_issue_list(text_file, label, issues[label])
text_file.write("\n")
for author in authors:
text_file.write("\n.. _" + author + ": https://github.com/" + author[1:])
for pr in prs:
text_file.write("\n.. _PR" + pr + ": https://github.com/matrix-org/matrix-doc/pull/" + pr.replace('#', ''))
text_file.close()

@ -1,38 +0,0 @@
#! /bin/bash
set -ex
cd `dirname $0`/..
virtualenv -p python3 env
. env/bin/activate
# Print out the python versions for debugging purposes
python --version
pip --version
# Install python dependencies
pip install -r scripts/requirements.txt
# Install node dependencies
npm install --prefix=scripts
# do sanity checks on the examples and swagger
scripts/check-event-schema-examples.py
scripts/check-swagger-sources.py
node scripts/validator.js --schema "data/api/client-server"
: ${GOPATH:=${WORKSPACE}/.gopath}
mkdir -p "${GOPATH}"
export GOPATH
go get github.com/hashicorp/golang-lru
go get gopkg.in/fsnotify/fsnotify.v1
# make sure that the scripts build
(cd scripts/continuserv && go build)
(cd scripts/speculator && go build)
# build the spec for matrix.org.
# (we don't actually use it on travis, but it's still useful to check we
# can build it. On Buildkite, this is then used to deploy to matrix.org).
./scripts/generate-matrix-org-assets
Loading…
Cancel
Save