Experimental: mitogen: Remove minification of Python source code

Done on the basis that sending a few more bytes of the network is quicker than
tokenizing and untokeninzing the modules.
pull/942/head
Alex Willmer 2 years ago
parent ec212a10d8
commit 2c7f2b4a99

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
These classes implement execution for each style of Ansible module. They are These classes implement execution for each style of Ansible module. They are

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Classes in this file define Mitogen 'services' that run (initially) within the Classes in this file define Mitogen 'services' that run (initially) within the

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Helper functions intended to be executed on the target. These are entrypoints Helper functions intended to be executed on the target. These are entrypoints

@ -273,10 +273,6 @@ Helpers
.. currentmodule:: mitogen.master .. currentmodule:: mitogen.master
.. autofunction:: get_child_modules .. autofunction:: get_child_modules
.. currentmodule:: mitogen.minify
.. autofunction:: minimize_source
.. _signals: .. _signals:
Signals Signals

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import logging import logging

@ -1,6 +1,5 @@
"""Utilities to support packages.""" """Utilities to support packages."""
# !mitogen: minify_safe
# NOTE: This module must remain compatible with Python 2.3, as it is shared # NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up. # by setuptools for distribution with Python 2.3 and up.

@ -1,453 +0,0 @@
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF). It generates
5-tuples with these members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators
Older entry points
tokenize_loop(readline, tokeneater)
tokenize(readline, tokeneater=printtoken)
are the same, except instead of generating tokens, tokeneater is a callback
function to which the 5 fields described above are passed as 5 arguments,
each time a new token is found."""
# !mitogen: minify_safe
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger')
from itertools import chain
import string, re
from token import *
import token
__all__ = [x for x in dir(token) if not x.startswith("_")]
__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
N_TOKENS += 2
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
Binnumber = r'0[bB][01]+[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'\d+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
# Single-line ' or " string.
String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
r"//=?",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"u'''": single3prog, 'u"""': double3prog,
"ur'''": single3prog, 'ur"""': double3prog,
"R'''": single3prog, 'R"""': double3prog,
"U'''": single3prog, 'U"""': double3prog,
"uR'''": single3prog, 'uR"""': double3prog,
"Ur'''": single3prog, 'Ur"""': double3prog,
"UR'''": single3prog, 'UR"""': double3prog,
"b'''": single3prog, 'b"""': double3prog,
"br'''": single3prog, 'br"""': double3prog,
"B'''": single3prog, 'B"""': double3prog,
"bR'''": single3prog, 'bR"""': double3prog,
"Br'''": single3prog, 'Br"""': double3prog,
"BR'''": single3prog, 'BR"""': double3prog,
'r': None, 'R': None, 'u': None, 'U': None,
'b': None, 'B': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""'):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"u'", 'u"', "U'", 'U"',
"ur'", 'ur"', "Ur'", 'Ur"',
"uR'", 'uR"', "UR'", 'UR"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
srow, scol = srow_scol
erow, ecol = erow_ecol
print("%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token)))
def tokenize(readline, tokeneater=printtoken):
"""
The tokenize() function accepts two parameters: one representing the
input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides
the same interface as the readline() method of built-in file objects.
Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is
called once for each token, with five arguments, corresponding to the
tuples generated by generate_tokens().
"""
try:
tokenize_loop(readline, tokeneater)
except StopTokenizing:
pass
# backwards compatible interface
def tokenize_loop(readline, tokeneater):
for token_info in generate_tokens(readline):
tokeneater(*token_info)
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
indents = []
startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENDMARKER:
break
if tok_type == INDENT:
indents.append(token)
continue
elif tok_type == DEDENT:
indents.pop()
self.prev_row, self.prev_col = end
continue
elif tok_type in (NEWLINE, NL):
startline = True
elif startline and indents:
indent = indents[-1]
if start[1] >= len(indent):
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if parenlev > 0:
n = NL
else:
n = NEWLINE
yield (n, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1:
tokenize(open(sys.argv[1]).readline)
else:
tokenize(sys.stdin.readline)

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
This module implements most package functionality, but remains separate from This module implements most package functionality, but remains separate from
@ -1265,7 +1264,6 @@ class Importer(object):
'lxc', 'lxc',
'lxd', 'lxd',
'master', 'master',
'minify',
'os_fork', 'os_fork',
'parent', 'parent',
'podman', 'podman',

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Basic signal handler for dumping thread stacks. Basic signal handler for dumping thread stacks.

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import logging import logging
import re import re

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import logging import logging

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
:mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with :mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import errno import errno
import logging import logging

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import mitogen.core import mitogen.core
import mitogen.parent import mitogen.parent

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import mitogen.parent import mitogen.parent

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import mitogen.parent import mitogen.parent

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import mitogen.parent import mitogen.parent

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
This module implements functionality required by master processes, such as This module implements functionality required by master processes, such as
@ -62,7 +61,6 @@ if not hasattr(pkgutil, 'find_loader'):
import mitogen import mitogen
import mitogen.core import mitogen.core
import mitogen.minify
import mitogen.parent import mitogen.parent
from mitogen.core import b from mitogen.core import b
@ -231,7 +229,7 @@ def _get_core_source():
Master version of parent.get_core_source(). Master version of parent.get_core_source().
""" """
source = inspect.getsource(mitogen.core) source = inspect.getsource(mitogen.core)
return mitogen.minify.minimize_source(source) return source
if mitogen.is_master: if mitogen.is_master:
@ -980,8 +978,6 @@ class ModuleResponder(object):
self.get_module_count = 0 self.get_module_count = 0
#: Total time spent in uncached GET_MODULE. #: Total time spent in uncached GET_MODULE.
self.get_module_secs = 0.0 self.get_module_secs = 0.0
#: Total time spent minifying modules.
self.minify_secs = 0.0
#: Number of successful LOAD_MODULE messages sent. #: Number of successful LOAD_MODULE messages sent.
self.good_load_module_count = 0 self.good_load_module_count = 0
#: Total bytes in successful LOAD_MODULE payloads. #: Total bytes in successful LOAD_MODULE payloads.
@ -1040,8 +1036,6 @@ class ModuleResponder(object):
def _make_negative_response(self, fullname): def _make_negative_response(self, fullname):
return (fullname, None, None, None, ()) return (fullname, None, None, None, ())
minify_safe_re = re.compile(b(r'\s+#\s*!mitogen:\s*minify_safe'))
def _build_tuple(self, fullname): def _build_tuple(self, fullname):
if fullname in self._cache: if fullname in self._cache:
return self._cache[fullname] return self._cache[fullname]
@ -1066,12 +1060,6 @@ class ModuleResponder(object):
self._cache[fullname] = tup self._cache[fullname] = tup
return tup return tup
if self.minify_safe_re.search(source):
# If the module contains a magic marker, it's safe to minify.
t0 = mitogen.core.now()
source = mitogen.minify.minimize_source(source).encode('utf-8')
self.minify_secs += mitogen.core.now() - t0
if is_pkg: if is_pkg:
pkg_present = get_child_modules(path, fullname) pkg_present = get_child_modules(path, fullname)
self._log.debug('%s is a package at %s with submodules %r', self._log.debug('%s is a package at %s with submodules %r',
@ -1310,7 +1298,6 @@ class Router(mitogen.parent.Router):
super(Router, self)._on_broker_exit() super(Router, self)._on_broker_exit()
dct = self.get_stats() dct = self.get_stats()
dct['self'] = self dct['self'] = self
dct['minify_ms'] = 1000 * dct['minify_secs']
dct['get_module_ms'] = 1000 * dct['get_module_secs'] dct['get_module_ms'] = 1000 * dct['get_module_secs']
dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0 dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0
dct['good_load_module_size_avg'] = ( dct['good_load_module_size_avg'] = (
@ -1325,7 +1312,6 @@ class Router(mitogen.parent.Router):
'%(get_module_count)d module requests in ' '%(get_module_count)d module requests in '
'%(get_module_ms)d ms, ' '%(get_module_ms)d ms, '
'%(good_load_module_count)d sent ' '%(good_load_module_count)d sent '
'(%(minify_ms)d ms minify time), '
'%(bad_load_module_count)d negative responses. ' '%(bad_load_module_count)d negative responses. '
'Sent %(good_load_module_size_kb).01f kb total, ' 'Sent %(good_load_module_size_kb).01f kb total, '
'%(good_load_module_size_avg).01f kb avg.' '%(good_load_module_size_avg).01f kb avg.'
@ -1350,8 +1336,6 @@ class Router(mitogen.parent.Router):
:data:`mitogen.core.LOAD_MODULE` message payloads. :data:`mitogen.core.LOAD_MODULE` message payloads.
* `bad_load_module_count`: Integer count of negative * `bad_load_module_count`: Integer count of negative
:data:`mitogen.core.LOAD_MODULE` messages sent. :data:`mitogen.core.LOAD_MODULE` messages sent.
* `minify_secs`: CPU seconds spent minifying modules marked
minify-safe.
""" """
return { return {
'get_module_count': self.responder.get_module_count, 'get_module_count': self.responder.get_module_count,
@ -1359,7 +1343,6 @@ class Router(mitogen.parent.Router):
'good_load_module_count': self.responder.good_load_module_count, 'good_load_module_count': self.responder.good_load_module_count,
'good_load_module_size': self.responder.good_load_module_size, 'good_load_module_size': self.responder.good_load_module_size,
'bad_load_module_count': self.responder.bad_load_module_count, 'bad_load_module_count': self.responder.bad_load_module_count,
'minify_secs': self.responder.minify_secs,
} }
def enable_debug(self): def enable_debug(self):

@ -1,143 +0,0 @@
# Copyright 2017, Alex Willmer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import sys
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import mitogen.core
if sys.version_info < (2, 7, 11):
from mitogen.compat import tokenize
else:
import tokenize
def minimize_source(source):
"""
Remove comments and docstrings from Python `source`, preserving line
numbers and syntax of empty blocks.
:param str source:
The source to minimize.
:returns str:
The minimized source.
"""
source = mitogen.core.to_text(source)
tokens = tokenize.generate_tokens(StringIO(source).readline)
tokens = strip_comments(tokens)
tokens = strip_docstrings(tokens)
tokens = reindent(tokens)
return tokenize.untokenize(tokens)
def strip_comments(tokens):
"""
Drop comment tokens from a `tokenize` stream.
Comments on lines 1-2 are kept, to preserve hashbang and encoding.
Trailing whitespace is remove from all lines.
"""
prev_typ = None
prev_end_col = 0
for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
if typ in (tokenize.NL, tokenize.NEWLINE):
if prev_typ in (tokenize.NL, tokenize.NEWLINE):
start_col = 0
else:
start_col = prev_end_col
end_col = start_col + 1
elif typ == tokenize.COMMENT and start_row > 2:
continue
prev_typ = typ
prev_end_col = end_col
yield typ, tok, (start_row, start_col), (end_row, end_col), line
def strip_docstrings(tokens):
"""
Replace docstring tokens with NL tokens in a `tokenize` stream.
Any STRING token not part of an expression is deemed a docstring.
Indented docstrings are not yet recognised.
"""
stack = []
state = 'wait_string'
for t in tokens:
typ = t[0]
if state == 'wait_string':
if typ in (tokenize.NL, tokenize.COMMENT):
yield t
elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):
stack.append(t)
elif typ == tokenize.NEWLINE:
stack.append(t)
start_line, end_line = stack[0][2][0], stack[-1][3][0]+1
for i in range(start_line, end_line):
yield tokenize.NL, '\n', (i, 0), (i,1), '\n'
for t in stack:
if t[0] in (tokenize.DEDENT, tokenize.INDENT):
yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]
del stack[:]
else:
stack.append(t)
for t in stack: yield t
del stack[:]
state = 'wait_newline'
elif state == 'wait_newline':
if typ == tokenize.NEWLINE:
state = 'wait_string'
yield t
def reindent(tokens, indent=' '):
"""
Replace existing indentation in a token steam, with `indent`.
"""
old_levels = []
old_level = 0
new_level = 0
for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
if typ == tokenize.INDENT:
old_levels.append(old_level)
old_level = len(tok)
new_level += 1
tok = indent * new_level
elif typ == tokenize.DEDENT:
old_level = old_levels.pop()
new_level -= 1
start_col = max(0, start_col - old_level + new_level)
if start_row == end_row:
end_col = start_col + len(tok)
yield typ, tok, (start_row, start_col), (end_row, end_col), line

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Support for operating in a mixed threading/forking environment. Support for operating in a mixed threading/forking environment.

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
This module defines functionality common to master and parent processes. It is This module defines functionality common to master and parent processes. It is
@ -189,7 +188,7 @@ def _get_core_source():
def get_core_source_partial(): def get_core_source_partial():
""" """
_get_core_source() is expensive, even with @lru_cache in minify.py, threads _get_core_source() is expensive, even with caching, threads
can enter it simultaneously causing severe slowdowns. can enter it simultaneously causing severe slowdowns.
""" """
global _core_source_partial global _core_source_partial

@ -27,7 +27,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import logging import logging

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
mitogen.profiler mitogen.profiler

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import mitogen.core import mitogen.core

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import grp import grp
import logging import logging
@ -212,7 +211,7 @@ class Error(Exception):
""" """
Raised when an error occurs configuring a service or pool. Raised when an error occurs configuring a service or pool.
""" """
pass # cope with minify_source() bug. pass
class Policy(object): class Policy(object):

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import ctypes import ctypes
import grp import grp

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Construct new children via the OpenSSH client. Construct new children via the OpenSSH client.

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import logging import logging
import re import re

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import base64 import base64
import logging import logging

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
""" """
Permit connection of additional contexts that may act with the authority of Permit connection of additional contexts that may act with the authority of

@ -26,7 +26,6 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE. # POSSIBILITY OF SUCH DAMAGE.
# !mitogen: minify_safe
import datetime import datetime
import functools import functools

@ -11,7 +11,6 @@ import zlib
import mitogen.fakessh import mitogen.fakessh
import mitogen.fork import mitogen.fork
import mitogen.master import mitogen.master
import mitogen.minify
import mitogen.parent import mitogen.parent
import mitogen.select import mitogen.select
import mitogen.service import mitogen.service
@ -58,8 +57,8 @@ for mod in (
): ):
original = inspect.getsource(mod) original = inspect.getsource(mod)
original_size = len(original) original_size = len(original)
minimized = mitogen.minify.minimize_source(original) minimized = original
minimized_size = len(minimized) minimized_size = original_size
compressed = zlib.compress(minimized.encode(), 9) compressed = zlib.compress(minimized.encode(), 9)
compressed_size = len(compressed) compressed_size = len(compressed)
print( print(

@ -1,112 +0,0 @@
import codecs
import glob
import pprint
import sys
import mitogen.minify
import testlib
def read_sample(fname):
sample_path = testlib.data_path('minimize_samples/' + fname)
sample_file = open(sample_path)
sample = sample_file.read()
sample_file.close()
return sample
class MinimizeSourceTest(testlib.TestCase):
func = staticmethod(mitogen.minify.minimize_source)
def test_class(self):
original = read_sample('class.py')
expected = read_sample('class_min.py')
self.assertEqual(expected, self.func(original))
def test_comment(self):
original = read_sample('comment.py')
expected = read_sample('comment_min.py')
self.assertEqual(expected, self.func(original))
def test_def(self):
original = read_sample('def.py')
expected = read_sample('def_min.py')
self.assertEqual(expected, self.func(original))
def test_hashbang(self):
original = read_sample('hashbang.py')
expected = read_sample('hashbang_min.py')
self.assertEqual(expected, self.func(original))
def test_mod(self):
original = read_sample('mod.py')
expected = read_sample('mod_min.py')
self.assertEqual(expected, self.func(original))
def test_pass(self):
original = read_sample('pass.py')
expected = read_sample('pass_min.py')
self.assertEqual(expected, self.func(original))
def test_obstacle_course(self):
original = read_sample('obstacle_course.py')
expected = read_sample('obstacle_course_min.py')
self.assertEqual(expected, self.func(original))
class MitogenCoreTest(testlib.TestCase):
# Verify minimize_source() succeeds for all built-in modules.
func = staticmethod(mitogen.minify.minimize_source)
def read_source(self, name):
fp = codecs.open(name, encoding='utf-8')
try:
return fp.read()
finally:
fp.close()
def _test_syntax_valid(self, minified, name):
compile(minified, name, 'exec')
def _test_line_counts_match(self, original, minified):
self.assertEqual(original.count('\n'),
minified.count('\n'))
def _test_non_blank_lines_match(self, name, original, minified):
# Verify first token matches. We just want to ensure line numbers make
# sense, this is good enough.
olines = original.splitlines()
mlines = minified.splitlines()
for i, (orig, mini) in enumerate(zip(olines, mlines)):
if i < 2:
assert orig == mini
continue
owords = orig.split()
mwords = mini.split()
assert len(mwords) == 0 or (mwords[0] == owords[0]), pprint.pformat({
'line': i+1,
'filename': name,
'owords': owords,
'mwords': mwords,
})
PY_24_25_SKIP = [
# cProfile unsupported on 2.4, 2.6+ syntax is fine here.
'mitogen/profiler.py',
]
def test_minify_all(self):
for name in glob.glob('mitogen/*.py'):
if name in self.PY_24_25_SKIP and sys.version_info < (2, 6):
continue
original = self.read_source(name)
try:
minified = self.func(original)
except Exception:
print('file was: ' + name)
raise
self._test_syntax_valid(minified, name)
self._test_line_counts_match(original, minified)
self._test_non_blank_lines_match(name, original, minified)

@ -285,7 +285,6 @@ class FindRelatedTest(testlib.TestCase):
if sys.version_info < (2, 7): if sys.version_info < (2, 7):
SIMPLE_EXPECT.add('mitogen.compat') SIMPLE_EXPECT.add('mitogen.compat')
SIMPLE_EXPECT.add('mitogen.compat.tokenize')
if sys.version_info < (2, 6): if sys.version_info < (2, 6):
SIMPLE_EXPECT.add('mitogen.compat') SIMPLE_EXPECT.add('mitogen.compat')
SIMPLE_EXPECT.add('mitogen.compat.pkgutil') SIMPLE_EXPECT.add('mitogen.compat.pkgutil')

@ -193,7 +193,7 @@ class ForwardTest(testlib.RouterMixin, testlib.TestCase):
self.assertEqual(2+os_fork, self.router.responder.get_module_count) self.assertEqual(2+os_fork, self.router.responder.get_module_count)
self.assertEqual(2+os_fork, self.router.responder.good_load_module_count) self.assertEqual(2+os_fork, self.router.responder.good_load_module_count)
self.assertLess(10000, self.router.responder.good_load_module_size) self.assertLess(10000, self.router.responder.good_load_module_size)
self.assertGreater(40000, self.router.responder.good_load_module_size) self.assertGreater(41000, self.router.responder.good_load_module_size)
class BlacklistTest(testlib.TestCase): class BlacklistTest(testlib.TestCase):

Loading…
Cancel
Save