From 1871f2a9b1a08688c5b75059af53a593f757261a Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Mon, 12 Sep 2022 19:53:09 +0100 Subject: [PATCH 1/3] Remove vendored mitogen.compat.simplejson Python 2.6 added json to the stdlib. We no longer support Python <= 2.7 in Mitogen 0.3.x, so this fallback is unneeded complexity. Fixes #659 --- ansible_mitogen/compat/simplejson/__init__.py | 318 ------------- ansible_mitogen/compat/simplejson/decoder.py | 354 -------------- ansible_mitogen/compat/simplejson/encoder.py | 440 ------------------ ansible_mitogen/compat/simplejson/scanner.py | 65 --- ansible_mitogen/process.py | 37 -- ansible_mitogen/runner.py | 7 +- ansible_mitogen/target.py | 11 +- docs/changelog.rst | 2 +- .../lib/modules/custom_python_os_getcwd.py | 5 +- .../modules/custom_python_want_json_module.py | 6 +- 10 files changed, 5 insertions(+), 1240 deletions(-) delete mode 100644 ansible_mitogen/compat/simplejson/__init__.py delete mode 100644 ansible_mitogen/compat/simplejson/decoder.py delete mode 100644 ansible_mitogen/compat/simplejson/encoder.py delete mode 100644 ansible_mitogen/compat/simplejson/scanner.py diff --git a/ansible_mitogen/compat/simplejson/__init__.py b/ansible_mitogen/compat/simplejson/__init__.py deleted file mode 100644 index d5b4d399..00000000 --- a/ansible_mitogen/compat/simplejson/__init__.py +++ /dev/null @@ -1,318 +0,0 @@ -r"""JSON (JavaScript Object Notation) is a subset of -JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data -interchange format. - -:mod:`simplejson` exposes an API familiar to users of the standard library -:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained -version of the :mod:`json` library contained in Python 2.6, but maintains -compatibility with Python 2.4 and Python 2.5 and (currently) has -significant performance advantages, even without using the optional C -extension for speedups. - -Encoding basic Python object hierarchies:: - - >>> import simplejson as json - >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) - '["foo", {"bar": ["baz", null, 1.0, 2]}]' - >>> print json.dumps("\"foo\bar") - "\"foo\bar" - >>> print json.dumps(u'\u1234') - "\u1234" - >>> print json.dumps('\\') - "\\" - >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) - {"a": 0, "b": 0, "c": 0} - >>> from StringIO import StringIO - >>> io = StringIO() - >>> json.dump(['streaming API'], io) - >>> io.getvalue() - '["streaming API"]' - -Compact encoding:: - - >>> import simplejson as json - >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) - '[1,2,3,{"4":5,"6":7}]' - -Pretty printing:: - - >>> import simplejson as json - >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) - >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) - { - "4": 5, - "6": 7 - } - -Decoding JSON:: - - >>> import simplejson as json - >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] - >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj - True - >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' - True - >>> from StringIO import StringIO - >>> io = StringIO('["streaming API"]') - >>> json.load(io)[0] == 'streaming API' - True - -Specializing JSON object decoding:: - - >>> import simplejson as json - >>> def as_complex(dct): - ... if '__complex__' in dct: - ... return complex(dct['real'], dct['imag']) - ... return dct - ... - >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', - ... object_hook=as_complex) - (1+2j) - >>> import decimal - >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1') - True - -Specializing JSON object encoding:: - - >>> import simplejson as json - >>> def encode_complex(obj): - ... if isinstance(obj, complex): - ... return [obj.real, obj.imag] - ... raise TypeError(repr(o) + " is not JSON serializable") - ... - >>> json.dumps(2 + 1j, default=encode_complex) - '[2.0, 1.0]' - >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) - '[2.0, 1.0]' - >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) - '[2.0, 1.0]' - - -Using simplejson.tool from the shell to validate and pretty-print:: - - $ echo '{"json":"obj"}' | python -m simplejson.tool - { - "json": "obj" - } - $ echo '{ 1.2:3.4}' | python -m simplejson.tool - Expecting property name: line 1 column 2 (char 2) -""" -__version__ = '2.0.9' -__all__ = [ - 'dump', 'dumps', 'load', 'loads', - 'JSONDecoder', 'JSONEncoder', -] - -__author__ = 'Bob Ippolito ' - -from decoder import JSONDecoder -from encoder import JSONEncoder - -_default_encoder = JSONEncoder( - skipkeys=False, - ensure_ascii=True, - check_circular=True, - allow_nan=True, - indent=None, - separators=None, - encoding='utf-8', - default=None, -) - -def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - encoding='utf-8', default=None, **kw): - """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a - ``.write()``-supporting file-like object). - - If ``skipkeys`` is true then ``dict`` keys that are not basic types - (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) - will be skipped instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the some chunks written to ``fp`` - may be ``unicode`` instances, subject to normal Python ``str`` to - ``unicode`` coercion rules. Unless ``fp.write()`` explicitly - understands ``unicode`` (as in ``codecs.getwriter()``) this is likely - to cause an error. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``OverflowError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) - in strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and object - members will be pretty-printed with that indent level. An indent level - of 0 will only insert newlines. ``None`` is the most compact representation. - - If ``separators`` is an ``(item_separator, dict_separator)`` tuple - then it will be used instead of the default ``(', ', ': ')`` separators. - ``(',', ':')`` is the most compact JSON representation. - - ``encoding`` is the character encoding for str instances, default is UTF-8. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - encoding == 'utf-8' and default is None and not kw): - iterable = _default_encoder.iterencode(obj) - else: - if cls is None: - cls = JSONEncoder - iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, encoding=encoding, - default=default, **kw).iterencode(obj) - # could accelerate with writelines in some versions of Python, at - # a debuggability cost - for chunk in iterable: - fp.write(chunk) - - -def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, - allow_nan=True, cls=None, indent=None, separators=None, - encoding='utf-8', default=None, **kw): - """Serialize ``obj`` to a JSON formatted ``str``. - - If ``skipkeys`` is false then ``dict`` keys that are not basic types - (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) - will be skipped instead of raising a ``TypeError``. - - If ``ensure_ascii`` is false, then the return value will be a - ``unicode`` instance subject to normal Python ``str`` to ``unicode`` - coercion rules instead of being escaped to an ASCII ``str``. - - If ``check_circular`` is false, then the circular reference check - for container types will be skipped and a circular reference will - result in an ``OverflowError`` (or worse). - - If ``allow_nan`` is false, then it will be a ``ValueError`` to - serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in - strict compliance of the JSON specification, instead of using the - JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). - - If ``indent`` is a non-negative integer, then JSON array elements and - object members will be pretty-printed with that indent level. An indent - level of 0 will only insert newlines. ``None`` is the most compact - representation. - - If ``separators`` is an ``(item_separator, dict_separator)`` tuple - then it will be used instead of the default ``(', ', ': ')`` separators. - ``(',', ':')`` is the most compact JSON representation. - - ``encoding`` is the character encoding for str instances, default is UTF-8. - - ``default(obj)`` is a function that should return a serializable version - of obj or raise TypeError. The default simply raises TypeError. - - To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the - ``.default()`` method to serialize additional types), specify it with - the ``cls`` kwarg. - - """ - # cached encoder - if (not skipkeys and ensure_ascii and - check_circular and allow_nan and - cls is None and indent is None and separators is None and - encoding == 'utf-8' and default is None and not kw): - return _default_encoder.encode(obj) - if cls is None: - cls = JSONEncoder - return cls( - skipkeys=skipkeys, ensure_ascii=ensure_ascii, - check_circular=check_circular, allow_nan=allow_nan, indent=indent, - separators=separators, encoding=encoding, default=default, - **kw).encode(obj) - - -_default_decoder = JSONDecoder(encoding=None, object_hook=None) - - -def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, **kw): - """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing - a JSON document) to a Python object. - - If the contents of ``fp`` is encoded with an ASCII based encoding other - than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must - be specified. Encodings that are not ASCII based (such as UCS-2) are - not allowed, and should be wrapped with - ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` - object and passed to ``loads()`` - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg. - - """ - return loads(fp.read(), - encoding=encoding, cls=cls, object_hook=object_hook, - parse_float=parse_float, parse_int=parse_int, - parse_constant=parse_constant, **kw) - - -def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, **kw): - """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON - document) to a Python object. - - If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding - other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name - must be specified. Encodings that are not ASCII based (such as UCS-2) - are not allowed and should be decoded to ``unicode`` first. - - ``object_hook`` is an optional function that will be called with the - result of any object literal decode (a ``dict``). The return value of - ``object_hook`` will be used instead of the ``dict``. This feature - can be used to implement custom decoders (e.g. JSON-RPC class hinting). - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN, null, true, false. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` - kwarg. - - """ - if (cls is None and encoding is None and object_hook is None and - parse_int is None and parse_float is None and - parse_constant is None and not kw): - return _default_decoder.decode(s) - if cls is None: - cls = JSONDecoder - if object_hook is not None: - kw['object_hook'] = object_hook - if parse_float is not None: - kw['parse_float'] = parse_float - if parse_int is not None: - kw['parse_int'] = parse_int - if parse_constant is not None: - kw['parse_constant'] = parse_constant - return cls(encoding=encoding, **kw).decode(s) diff --git a/ansible_mitogen/compat/simplejson/decoder.py b/ansible_mitogen/compat/simplejson/decoder.py deleted file mode 100644 index b769ea48..00000000 --- a/ansible_mitogen/compat/simplejson/decoder.py +++ /dev/null @@ -1,354 +0,0 @@ -"""Implementation of JSONDecoder -""" -import re -import sys -import struct - -from simplejson.scanner import make_scanner -try: - from simplejson._speedups import scanstring as c_scanstring -except ImportError: - c_scanstring = None - -__all__ = ['JSONDecoder'] - -FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL - -def _floatconstants(): - _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') - if sys.byteorder != 'big': - _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] - nan, inf = struct.unpack('dd', _BYTES) - return nan, inf, -inf - -NaN, PosInf, NegInf = _floatconstants() - - -def linecol(doc, pos): - lineno = doc.count('\n', 0, pos) + 1 - if lineno == 1: - colno = pos - else: - colno = pos - doc.rindex('\n', 0, pos) - return lineno, colno - - -def errmsg(msg, doc, pos, end=None): - # Note that this function is called from _speedups - lineno, colno = linecol(doc, pos) - if end is None: - #fmt = '{0}: line {1} column {2} (char {3})' - #return fmt.format(msg, lineno, colno, pos) - fmt = '%s: line %d column %d (char %d)' - return fmt % (msg, lineno, colno, pos) - endlineno, endcolno = linecol(doc, end) - #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' - #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) - fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' - return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) - - -_CONSTANTS = { - '-Infinity': NegInf, - 'Infinity': PosInf, - 'NaN': NaN, -} - -STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) -BACKSLASH = { - '"': u'"', '\\': u'\\', '/': u'/', - 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', -} - -DEFAULT_ENCODING = "utf-8" - -def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match): - """Scan the string s for a JSON string. End is the index of the - character in s after the quote that started the JSON string. - Unescapes all valid JSON string escape sequences and raises ValueError - on attempt to decode an invalid string. If strict is False then literal - control characters are allowed in the string. - - Returns a tuple of the decoded string and the index of the character in s - after the end quote.""" - if encoding is None: - encoding = DEFAULT_ENCODING - chunks = [] - _append = chunks.append - begin = end - 1 - while 1: - chunk = _m(s, end) - if chunk is None: - raise ValueError( - errmsg("Unterminated string starting at", s, begin)) - end = chunk.end() - content, terminator = chunk.groups() - # Content is contains zero or more unescaped string characters - if content: - if not isinstance(content, unicode): - content = unicode(content, encoding) - _append(content) - # Terminator is the end of string, a literal control character, - # or a backslash denoting that an escape sequence follows - if terminator == '"': - break - elif terminator != '\\': - if strict: - msg = "Invalid control character %r at" % (terminator,) - #msg = "Invalid control character {0!r} at".format(terminator) - raise ValueError(errmsg(msg, s, end)) - else: - _append(terminator) - continue - try: - esc = s[end] - except IndexError: - raise ValueError( - errmsg("Unterminated string starting at", s, begin)) - # If not a unicode escape sequence, must be in the lookup table - if esc != 'u': - try: - char = _b[esc] - except KeyError: - msg = "Invalid \\escape: " + repr(esc) - raise ValueError(errmsg(msg, s, end)) - end += 1 - else: - # Unicode escape sequence - esc = s[end + 1:end + 5] - next_end = end + 5 - if len(esc) != 4: - msg = "Invalid \\uXXXX escape" - raise ValueError(errmsg(msg, s, end)) - uni = int(esc, 16) - # Check for surrogate pair on UCS-4 systems - if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: - msg = "Invalid \\uXXXX\\uXXXX surrogate pair" - if not s[end + 5:end + 7] == '\\u': - raise ValueError(errmsg(msg, s, end)) - esc2 = s[end + 7:end + 11] - if len(esc2) != 4: - raise ValueError(errmsg(msg, s, end)) - uni2 = int(esc2, 16) - uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) - next_end += 6 - char = unichr(uni) - end = next_end - # Append the unescaped character - _append(char) - return u''.join(chunks), end - - -# Use speedup if available -scanstring = c_scanstring or py_scanstring - -WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) -WHITESPACE_STR = ' \t\n\r' - -def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR): - pairs = {} - # Use a slice to prevent IndexError from being raised, the following - # check will raise a more specific ValueError if the string is empty - nextchar = s[end:end + 1] - # Normally we expect nextchar == '"' - if nextchar != '"': - if nextchar in _ws: - end = _w(s, end).end() - nextchar = s[end:end + 1] - # Trivial empty object - if nextchar == '}': - return pairs, end + 1 - elif nextchar != '"': - raise ValueError(errmsg("Expecting property name", s, end)) - end += 1 - while True: - key, end = scanstring(s, end, encoding, strict) - - # To skip some function call overhead we optimize the fast paths where - # the JSON key separator is ": " or just ":". - if s[end:end + 1] != ':': - end = _w(s, end).end() - if s[end:end + 1] != ':': - raise ValueError(errmsg("Expecting : delimiter", s, end)) - - end += 1 - - try: - if s[end] in _ws: - end += 1 - if s[end] in _ws: - end = _w(s, end + 1).end() - except IndexError: - pass - - try: - value, end = scan_once(s, end) - except StopIteration: - raise ValueError(errmsg("Expecting object", s, end)) - pairs[key] = value - - try: - nextchar = s[end] - if nextchar in _ws: - end = _w(s, end + 1).end() - nextchar = s[end] - except IndexError: - nextchar = '' - end += 1 - - if nextchar == '}': - break - elif nextchar != ',': - raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) - - try: - nextchar = s[end] - if nextchar in _ws: - end += 1 - nextchar = s[end] - if nextchar in _ws: - end = _w(s, end + 1).end() - nextchar = s[end] - except IndexError: - nextchar = '' - - end += 1 - if nextchar != '"': - raise ValueError(errmsg("Expecting property name", s, end - 1)) - - if object_hook is not None: - pairs = object_hook(pairs) - return pairs, end - -def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): - values = [] - nextchar = s[end:end + 1] - if nextchar in _ws: - end = _w(s, end + 1).end() - nextchar = s[end:end + 1] - # Look-ahead for trivial empty array - if nextchar == ']': - return values, end + 1 - _append = values.append - while True: - try: - value, end = scan_once(s, end) - except StopIteration: - raise ValueError(errmsg("Expecting object", s, end)) - _append(value) - nextchar = s[end:end + 1] - if nextchar in _ws: - end = _w(s, end + 1).end() - nextchar = s[end:end + 1] - end += 1 - if nextchar == ']': - break - elif nextchar != ',': - raise ValueError(errmsg("Expecting , delimiter", s, end)) - - try: - if s[end] in _ws: - end += 1 - if s[end] in _ws: - end = _w(s, end + 1).end() - except IndexError: - pass - - return values, end - -class JSONDecoder(object): - """Simple JSON decoder - - Performs the following translations in decoding by default: - - +---------------+-------------------+ - | JSON | Python | - +===============+===================+ - | object | dict | - +---------------+-------------------+ - | array | list | - +---------------+-------------------+ - | string | unicode | - +---------------+-------------------+ - | number (int) | int, long | - +---------------+-------------------+ - | number (real) | float | - +---------------+-------------------+ - | true | True | - +---------------+-------------------+ - | false | False | - +---------------+-------------------+ - | null | None | - +---------------+-------------------+ - - It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as - their corresponding ``float`` values, which is outside the JSON spec. - - """ - - def __init__(self, encoding=None, object_hook=None, parse_float=None, - parse_int=None, parse_constant=None, strict=True): - """``encoding`` determines the encoding used to interpret any ``str`` - objects decoded by this instance (utf-8 by default). It has no - effect when decoding ``unicode`` objects. - - Note that currently only encodings that are a superset of ASCII work, - strings of other encodings should be passed in as ``unicode``. - - ``object_hook``, if specified, will be called with the result - of every JSON object decoded and its return value will be used in - place of the given ``dict``. This can be used to provide custom - deserializations (e.g. to support JSON-RPC class hinting). - - ``parse_float``, if specified, will be called with the string - of every JSON float to be decoded. By default this is equivalent to - float(num_str). This can be used to use another datatype or parser - for JSON floats (e.g. decimal.Decimal). - - ``parse_int``, if specified, will be called with the string - of every JSON int to be decoded. By default this is equivalent to - int(num_str). This can be used to use another datatype or parser - for JSON integers (e.g. float). - - ``parse_constant``, if specified, will be called with one of the - following strings: -Infinity, Infinity, NaN. - This can be used to raise an exception if invalid JSON numbers - are encountered. - - """ - self.encoding = encoding - self.object_hook = object_hook - self.parse_float = parse_float or float - self.parse_int = parse_int or int - self.parse_constant = parse_constant or _CONSTANTS.__getitem__ - self.strict = strict - self.parse_object = JSONObject - self.parse_array = JSONArray - self.parse_string = scanstring - self.scan_once = make_scanner(self) - - def decode(self, s, _w=WHITESPACE.match): - """Return the Python representation of ``s`` (a ``str`` or ``unicode`` - instance containing a JSON document) - - """ - obj, end = self.raw_decode(s, idx=_w(s, 0).end()) - end = _w(s, end).end() - if end != len(s): - raise ValueError(errmsg("Extra data", s, end, len(s))) - return obj - - def raw_decode(self, s, idx=0): - """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning - with a JSON document) and return a 2-tuple of the Python - representation and the index in ``s`` where the document ended. - - This can be used to decode a JSON document from a string that may - have extraneous data at the end. - - """ - try: - obj, end = self.scan_once(s, idx) - except StopIteration: - raise ValueError("No JSON object could be decoded") - return obj, end diff --git a/ansible_mitogen/compat/simplejson/encoder.py b/ansible_mitogen/compat/simplejson/encoder.py deleted file mode 100644 index cf582903..00000000 --- a/ansible_mitogen/compat/simplejson/encoder.py +++ /dev/null @@ -1,440 +0,0 @@ -"""Implementation of JSONEncoder -""" -import re - -try: - from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii -except ImportError: - c_encode_basestring_ascii = None -try: - from simplejson._speedups import make_encoder as c_make_encoder -except ImportError: - c_make_encoder = None - -ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') -ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') -HAS_UTF8 = re.compile(r'[\x80-\xff]') -ESCAPE_DCT = { - '\\': '\\\\', - '"': '\\"', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', -} -for i in range(0x20): - #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) - ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) - -# Assume this produces an infinity on all machines (probably not guaranteed) -INFINITY = float('1e66666') -FLOAT_REPR = repr - -def encode_basestring(s): - """Return a JSON representation of a Python string - - """ - def replace(match): - return ESCAPE_DCT[match.group(0)] - return '"' + ESCAPE.sub(replace, s) + '"' - - -def py_encode_basestring_ascii(s): - """Return an ASCII-only JSON representation of a Python string - - """ - if isinstance(s, str) and HAS_UTF8.search(s) is not None: - s = s.decode('utf-8') - def replace(match): - s = match.group(0) - try: - return ESCAPE_DCT[s] - except KeyError: - n = ord(s) - if n < 0x10000: - #return '\\u{0:04x}'.format(n) - return '\\u%04x' % (n,) - else: - # surrogate pair - n -= 0x10000 - s1 = 0xd800 | ((n >> 10) & 0x3ff) - s2 = 0xdc00 | (n & 0x3ff) - #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) - return '\\u%04x\\u%04x' % (s1, s2) - return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' - - -encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii - -class JSONEncoder(object): - """Extensible JSON encoder for Python data structures. - - Supports the following objects and types by default: - - +-------------------+---------------+ - | Python | JSON | - +===================+===============+ - | dict | object | - +-------------------+---------------+ - | list, tuple | array | - +-------------------+---------------+ - | str, unicode | string | - +-------------------+---------------+ - | int, long, float | number | - +-------------------+---------------+ - | True | true | - +-------------------+---------------+ - | False | false | - +-------------------+---------------+ - | None | null | - +-------------------+---------------+ - - To extend this to recognize other objects, subclass and implement a - ``.default()`` method with another method that returns a serializable - object for ``o`` if possible, otherwise it should call the superclass - implementation (to raise ``TypeError``). - - """ - item_separator = ', ' - key_separator = ': ' - def __init__(self, skipkeys=False, ensure_ascii=True, - check_circular=True, allow_nan=True, sort_keys=False, - indent=None, separators=None, encoding='utf-8', default=None): - """Constructor for JSONEncoder, with sensible defaults. - - If skipkeys is false, then it is a TypeError to attempt - encoding of keys that are not str, int, long, float or None. If - skipkeys is True, such items are simply skipped. - - If ensure_ascii is true, the output is guaranteed to be str - objects with all incoming unicode characters escaped. If - ensure_ascii is false, the output will be unicode object. - - If check_circular is true, then lists, dicts, and custom encoded - objects will be checked for circular references during encoding to - prevent an infinite recursion (which would cause an OverflowError). - Otherwise, no such check takes place. - - If allow_nan is true, then NaN, Infinity, and -Infinity will be - encoded as such. This behavior is not JSON specification compliant, - but is consistent with most JavaScript based encoders and decoders. - Otherwise, it will be a ValueError to encode such floats. - - If sort_keys is true, then the output of dictionaries will be - sorted by key; this is useful for regression tests to ensure - that JSON serializations can be compared on a day-to-day basis. - - If indent is a non-negative integer, then JSON array - elements and object members will be pretty-printed with that - indent level. An indent level of 0 will only insert newlines. - None is the most compact representation. - - If specified, separators should be a (item_separator, key_separator) - tuple. The default is (', ', ': '). To get the most compact JSON - representation you should specify (',', ':') to eliminate whitespace. - - If specified, default is a function that gets called for objects - that can't otherwise be serialized. It should return a JSON encodable - version of the object or raise a ``TypeError``. - - If encoding is not None, then all input strings will be - transformed into unicode using that encoding prior to JSON-encoding. - The default is UTF-8. - - """ - - self.skipkeys = skipkeys - self.ensure_ascii = ensure_ascii - self.check_circular = check_circular - self.allow_nan = allow_nan - self.sort_keys = sort_keys - self.indent = indent - if separators is not None: - self.item_separator, self.key_separator = separators - if default is not None: - self.default = default - self.encoding = encoding - - def default(self, o): - """Implement this method in a subclass such that it returns - a serializable object for ``o``, or calls the base implementation - (to raise a ``TypeError``). - - For example, to support arbitrary iterators, you could - implement default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - - """ - raise TypeError(repr(o) + " is not JSON serializable") - - def encode(self, o): - """Return a JSON string representation of a Python data structure. - - >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) - '{"foo": ["bar", "baz"]}' - - """ - # This is for extremely simple cases and benchmarks. - if isinstance(o, basestring): - if isinstance(o, str): - _encoding = self.encoding - if (_encoding is not None - and not (_encoding == 'utf-8')): - o = o.decode(_encoding) - if self.ensure_ascii: - return encode_basestring_ascii(o) - else: - return encode_basestring(o) - # This doesn't pass the iterator directly to ''.join() because the - # exceptions aren't as detailed. The list call should be roughly - # equivalent to the PySequence_Fast that ''.join() would do. - chunks = self.iterencode(o, _one_shot=True) - if not isinstance(chunks, (list, tuple)): - chunks = list(chunks) - return ''.join(chunks) - - def iterencode(self, o, _one_shot=False): - """Encode the given object and yield each string - representation as available. - - For example:: - - for chunk in JSONEncoder().iterencode(bigobject): - mysocket.write(chunk) - - """ - if self.check_circular: - markers = {} - else: - markers = None - if self.ensure_ascii: - _encoder = encode_basestring_ascii - else: - _encoder = encode_basestring - if self.encoding != 'utf-8': - def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): - if isinstance(o, str): - o = o.decode(_encoding) - return _orig_encoder(o) - - def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): - # Check for specials. Note that this type of test is processor- and/or - # platform-specific, so do tests which don't depend on the internals. - - if o != o: - text = 'NaN' - elif o == _inf: - text = 'Infinity' - elif o == _neginf: - text = '-Infinity' - else: - return _repr(o) - - if not allow_nan: - raise ValueError( - "Out of range float values are not JSON compliant: " + - repr(o)) - - return text - - - if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys: - _iterencode = c_make_encoder( - markers, self.default, _encoder, self.indent, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, self.allow_nan) - else: - _iterencode = _make_iterencode( - markers, self.default, _encoder, self.indent, floatstr, - self.key_separator, self.item_separator, self.sort_keys, - self.skipkeys, _one_shot) - return _iterencode(o, 0) - -def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, - ## HACK: hand-optimized bytecode; turn globals into locals - False=False, - True=True, - ValueError=ValueError, - basestring=basestring, - dict=dict, - float=float, - id=id, - int=int, - isinstance=isinstance, - list=list, - long=long, - str=str, - tuple=tuple, - ): - - def _iterencode_list(lst, _current_indent_level): - if not lst: - yield '[]' - return - if markers is not None: - markerid = id(lst) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = lst - buf = '[' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) - separator = _item_separator + newline_indent - buf += newline_indent - else: - newline_indent = None - separator = _item_separator - first = True - for value in lst: - if first: - first = False - else: - buf = separator - if isinstance(value, basestring): - yield buf + _encoder(value) - elif value is None: - yield buf + 'null' - elif value is True: - yield buf + 'true' - elif value is False: - yield buf + 'false' - elif isinstance(value, (int, long)): - yield buf + str(value) - elif isinstance(value, float): - yield buf + _floatstr(value) - else: - yield buf - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - for chunk in chunks: - yield chunk - if newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + (' ' * (_indent * _current_indent_level)) - yield ']' - if markers is not None: - del markers[markerid] - - def _iterencode_dict(dct, _current_indent_level): - if not dct: - yield '{}' - return - if markers is not None: - markerid = id(dct) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = dct - yield '{' - if _indent is not None: - _current_indent_level += 1 - newline_indent = '\n' + (' ' * (_indent * _current_indent_level)) - item_separator = _item_separator + newline_indent - yield newline_indent - else: - newline_indent = None - item_separator = _item_separator - first = True - if _sort_keys: - items = dct.items() - items.sort(key=lambda kv: kv[0]) - else: - items = dct.iteritems() - for key, value in items: - if isinstance(key, basestring): - pass - # JavaScript is weakly typed for these, so it makes sense to - # also allow them. Many encoders seem to do something like this. - elif isinstance(key, float): - key = _floatstr(key) - elif key is True: - key = 'true' - elif key is False: - key = 'false' - elif key is None: - key = 'null' - elif isinstance(key, (int, long)): - key = str(key) - elif _skipkeys: - continue - else: - raise TypeError("key " + repr(key) + " is not a string") - if first: - first = False - else: - yield item_separator - yield _encoder(key) - yield _key_separator - if isinstance(value, basestring): - yield _encoder(value) - elif value is None: - yield 'null' - elif value is True: - yield 'true' - elif value is False: - yield 'false' - elif isinstance(value, (int, long)): - yield str(value) - elif isinstance(value, float): - yield _floatstr(value) - else: - if isinstance(value, (list, tuple)): - chunks = _iterencode_list(value, _current_indent_level) - elif isinstance(value, dict): - chunks = _iterencode_dict(value, _current_indent_level) - else: - chunks = _iterencode(value, _current_indent_level) - for chunk in chunks: - yield chunk - if newline_indent is not None: - _current_indent_level -= 1 - yield '\n' + (' ' * (_indent * _current_indent_level)) - yield '}' - if markers is not None: - del markers[markerid] - - def _iterencode(o, _current_indent_level): - if isinstance(o, basestring): - yield _encoder(o) - elif o is None: - yield 'null' - elif o is True: - yield 'true' - elif o is False: - yield 'false' - elif isinstance(o, (int, long)): - yield str(o) - elif isinstance(o, float): - yield _floatstr(o) - elif isinstance(o, (list, tuple)): - for chunk in _iterencode_list(o, _current_indent_level): - yield chunk - elif isinstance(o, dict): - for chunk in _iterencode_dict(o, _current_indent_level): - yield chunk - else: - if markers is not None: - markerid = id(o) - if markerid in markers: - raise ValueError("Circular reference detected") - markers[markerid] = o - o = _default(o) - for chunk in _iterencode(o, _current_indent_level): - yield chunk - if markers is not None: - del markers[markerid] - - return _iterencode diff --git a/ansible_mitogen/compat/simplejson/scanner.py b/ansible_mitogen/compat/simplejson/scanner.py deleted file mode 100644 index adbc6ec9..00000000 --- a/ansible_mitogen/compat/simplejson/scanner.py +++ /dev/null @@ -1,65 +0,0 @@ -"""JSON token scanner -""" -import re -try: - from simplejson._speedups import make_scanner as c_make_scanner -except ImportError: - c_make_scanner = None - -__all__ = ['make_scanner'] - -NUMBER_RE = re.compile( - r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', - (re.VERBOSE | re.MULTILINE | re.DOTALL)) - -def py_make_scanner(context): - parse_object = context.parse_object - parse_array = context.parse_array - parse_string = context.parse_string - match_number = NUMBER_RE.match - encoding = context.encoding - strict = context.strict - parse_float = context.parse_float - parse_int = context.parse_int - parse_constant = context.parse_constant - object_hook = context.object_hook - - def _scan_once(string, idx): - try: - nextchar = string[idx] - except IndexError: - raise StopIteration - - if nextchar == '"': - return parse_string(string, idx + 1, encoding, strict) - elif nextchar == '{': - return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook) - elif nextchar == '[': - return parse_array((string, idx + 1), _scan_once) - elif nextchar == 'n' and string[idx:idx + 4] == 'null': - return None, idx + 4 - elif nextchar == 't' and string[idx:idx + 4] == 'true': - return True, idx + 4 - elif nextchar == 'f' and string[idx:idx + 5] == 'false': - return False, idx + 5 - - m = match_number(string, idx) - if m is not None: - integer, frac, exp = m.groups() - if frac or exp: - res = parse_float(integer + (frac or '') + (exp or '')) - else: - res = parse_int(integer) - return res, m.end() - elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': - return parse_constant('NaN'), idx + 3 - elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': - return parse_constant('Infinity'), idx + 8 - elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': - return parse_constant('-Infinity'), idx + 9 - else: - raise StopIteration - - return _scan_once - -make_scanner = c_make_scanner or py_make_scanner diff --git a/ansible_mitogen/process.py b/ansible_mitogen/process.py index 9276614b..63caa88a 100644 --- a/ansible_mitogen/process.py +++ b/ansible_mitogen/process.py @@ -180,42 +180,6 @@ def setup_pool(pool): LOG.debug('Service pool configured: size=%d', pool.size) -def _setup_simplejson(responder): - """ - We support serving simplejson for Python 2.4 targets on Ansible 2.3, at - least so the package's own CI Docker scripts can run without external - help, however newer versions of simplejson no longer support Python - 2.4. Therefore override any installed/loaded version with a - 2.4-compatible version we ship in the compat/ directory. - """ - responder.whitelist_prefix('simplejson') - - # issue #536: must be at end of sys.path, in case existing newer - # version is already loaded. - compat_path = os.path.join(os.path.dirname(__file__), 'compat') - sys.path.append(compat_path) - - for fullname, is_pkg, suffix in ( - (u'simplejson', True, '__init__.py'), - (u'simplejson.decoder', False, 'decoder.py'), - (u'simplejson.encoder', False, 'encoder.py'), - (u'simplejson.scanner', False, 'scanner.py'), - ): - path = os.path.join(compat_path, 'simplejson', suffix) - fp = open(path, 'rb') - try: - source = fp.read() - finally: - fp.close() - - responder.add_source_override( - fullname=fullname, - path=path, - source=source, - is_pkg=is_pkg, - ) - - def _setup_responder(responder): """ Configure :class:`mitogen.master.ModuleResponder` to only permit @@ -223,7 +187,6 @@ def _setup_responder(responder): """ responder.whitelist_prefix('ansible') responder.whitelist_prefix('ansible_mitogen') - _setup_simplejson(responder) # Ansible 2.3 is compatible with Python 2.4 targets, however # ansible/__init__.py is not. Instead, executor/module_common.py writes diff --git a/ansible_mitogen/runner.py b/ansible_mitogen/runner.py index 31ccf1cf..c4cb71ff 100644 --- a/ansible_mitogen/runner.py +++ b/ansible_mitogen/runner.py @@ -41,6 +41,7 @@ __metaclass__ = type import atexit import imp +import json import os import re import shlex @@ -63,12 +64,6 @@ except ImportError: # Python 2.4 ctypes = None -try: - import json -except ImportError: - # Python 2.4 - import simplejson as json - try: # Cannot use cStringIO as it does not support Unicode. from StringIO import StringIO diff --git a/ansible_mitogen/target.py b/ansible_mitogen/target.py index 2c65c516..7d907d62 100644 --- a/ansible_mitogen/target.py +++ b/ansible_mitogen/target.py @@ -38,6 +38,7 @@ __metaclass__ = type import errno import grp +import json import operator import os import pwd @@ -58,11 +59,6 @@ import mitogen.parent import mitogen.service from mitogen.core import b -try: - import json -except ImportError: - import simplejson as json - try: reduce except NameError: @@ -371,11 +367,6 @@ def init_child(econtext, log_level, candidate_temp_dirs): LOG.setLevel(log_level) logging.getLogger('ansible_mitogen').setLevel(log_level) - # issue #536: if the json module is available, remove simplejson from the - # importer whitelist to avoid confusing certain Ansible modules. - if json.__name__ == 'json': - econtext.importer.whitelist.remove('simplejson') - global _fork_parent if FORK_SUPPORTED: mitogen.parent.upgrade_router(econtext) diff --git a/docs/changelog.rst b/docs/changelog.rst index cadd1eef..b4ae9df5 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -26,7 +26,7 @@ v0.3.4.dev0 connection on destruction. This is expected to reduce cases of `mitogen.core.Error: An attempt was made to enqueue a message with a Broker that has already exitted`. However it may result in resource leaks. - +* :gh:issue:`659` Removed :mod:`mitogen.compat.simplejson`, not needed with Python 2.7+, contained Python 3.x syntax errors v0.3.3 (2022-06-03) ------------------- diff --git a/tests/ansible/lib/modules/custom_python_os_getcwd.py b/tests/ansible/lib/modules/custom_python_os_getcwd.py index b4e20afb..c5e264ae 100644 --- a/tests/ansible/lib/modules/custom_python_os_getcwd.py +++ b/tests/ansible/lib/modules/custom_python_os_getcwd.py @@ -2,12 +2,9 @@ # #591: call os.getcwd() before AnsibleModule ever gets a chance to fix up the # process environment. +import json import os -try: - import json -except ImportError: - import simplejson as json print(json.dumps({ 'cwd': os.getcwd() diff --git a/tests/ansible/lib/modules/custom_python_want_json_module.py b/tests/ansible/lib/modules/custom_python_want_json_module.py index 4a8dbd7e..f5e33862 100755 --- a/tests/ansible/lib/modules/custom_python_want_json_module.py +++ b/tests/ansible/lib/modules/custom_python_want_json_module.py @@ -1,13 +1,9 @@ #!/usr/bin/env python # I am an Ansible Python WANT_JSON module. I should receive a JSON-encoded file. +import json import sys -try: - import json -except ImportError: - import simplejson as json - WANT_JSON = 1 From 21cb4a347210f6047b22e0d10add3c90d880a7d8 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Sun, 5 Feb 2023 14:25:46 +0000 Subject: [PATCH 2/3] CI: Remove faulthandler fallback requirement faulthandler is a stdlib module in Python 3.3+. For a long time a PyPI package of the same name was available for earlier Python releases. That package has since been removed from PyPI, and the source respoitory archived. So we should not rely on it. fixes #983 refs #970 --- docs/ansible_detailed.rst | 15 +++++++-------- docs/changelog.rst | 1 + tests/requirements.txt | 1 - 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/ansible_detailed.rst b/docs/ansible_detailed.rst index dd569a76..449771b8 100644 --- a/docs/ansible_detailed.rst +++ b/docs/ansible_detailed.rst @@ -1248,18 +1248,17 @@ with ``-vvv``. However, certain controller hangs may render ``MITOGEN_DUMP_THREAD_STACKS`` ineffective, or occur too infrequently for interactive reproduction. In these -cases `faulthandler `_ may be used: +cases :py:mod:`faulthandler` may be used with Python >= 3.3: -1. For Python 2, ``pip install faulthandler``. This is unnecessary on Python 3. -2. Once the hang occurs, observe the process tree using ``pstree`` or ``ps +1. Once the hang occurs, observe the process tree using ``pstree`` or ``ps --forest``. -3. The most likely process to be hung is the connection multiplexer, which can +2. The most likely process to be hung is the connection multiplexer, which can easily be identified as the parent of all SSH client processes. -4. Send ``kill -SEGV `` to the multiplexer PID, causing it to print all +3. Send ``kill -SEGV `` to the multiplexer PID, causing it to print all thread stacks. -5. `File a bug `_ including a copy - of the stacks, along with a description of the last task executing prior to - the hang. +4. `File a bug `_ + including a copy of the stacks and a description of the last task executing + before the hang It is possible the hang occurred in a process on a target. If ``strace`` is available, look for the host name not listed in Ansible output as reporting a diff --git a/docs/changelog.rst b/docs/changelog.rst index b4ae9df5..da5cc5e4 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -27,6 +27,7 @@ v0.3.4.dev0 was made to enqueue a message with a Broker that has already exitted`. However it may result in resource leaks. * :gh:issue:`659` Removed :mod:`mitogen.compat.simplejson`, not needed with Python 2.7+, contained Python 3.x syntax errors +* :gh:issue:`983` CI: Removed PyPI faulthandler requirement from tests v0.3.3 (2022-06-03) ------------------- diff --git a/tests/requirements.txt b/tests/requirements.txt index 0e9a0f0a..7301bee1 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -2,7 +2,6 @@ psutil==5.4.8 coverage==5.5; python_version < '3.7' coverage==6.4.4; python_version >= '3.7' Django==1.6.11 # Last version supporting 2.6. -faulthandler==3.2; python_version < '3.3' mock==2.0.0 pytz==2018.5 cffi==1.14.3 # Random pin to try and fix pyparser==2.18 not having effect From 8151577b7559a5ba11afaca836edba1fa4a146cf Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Sun, 5 Feb 2023 15:35:41 +0000 Subject: [PATCH 3/3] CI: Limit to Tox < 4.0 to avoid plugin incompatibility I'm abandoning tox-factor because having any [tox] requires = ... causes tox 3.x to create an isolated virtualenv for running tox itself. Since Tox 4.x was released that virtualenv gets it, which is incompatible with the tox-factor plugin. e.g. ``` Traceback (most recent call last): File "/Users/runner/work/1/s/.tox/.tox/lib/python3.10/site-packages/tox_factor/compat.py", line 2, in from tox.config.parallel import ENV_VAR_KEY_PUBLIC as TOX_PARALLEL_ENV ModuleNotFoundError: No module named 'tox.config.parallel' ``` --- .ci/azure-pipelines-steps.yml | 9 ++++++++- tox.ini | 2 -- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.ci/azure-pipelines-steps.yml b/.ci/azure-pipelines-steps.yml index f4570744..6aeeca93 100644 --- a/.ci/azure-pipelines-steps.yml +++ b/.ci/azure-pipelines-steps.yml @@ -1,3 +1,10 @@ +# Each step entry runs a task (Azure Pipelines analog of an Ansible module). +# https://learn.microsoft.com/en-us/azure/devops/pipelines/tasks/reference/?view=azure-pipelines&viewFallbackFrom=azure-devops#tool + +# `{script: ...}` is shorthand for `{task: CmdLine@, inputs: {script: ...}}`. +# https://learn.microsoft.com/en-us/azure/devops/pipelines/yaml-schema/steps-script?view=azure-pipelines +# https://learn.microsoft.com/en-us/azure/devops/pipelines/tasks/reference/cmd-line-v2?view=azure-pipelines + steps: - task: UsePythonVersion@0 displayName: Install python @@ -11,7 +18,7 @@ steps: displayName: Install build deps condition: and(eq(variables['python.version'], ''), eq(variables['Agent.OS'], 'Linux')) -- script: python -mpip install tox +- script: python -mpip install "tox<4.0" displayName: Install tooling - script: python -mtox -e "$(tox.env)" diff --git a/tox.ini b/tox.ini index 43170f9f..da710933 100644 --- a/tox.ini +++ b/tox.ini @@ -42,8 +42,6 @@ envlist = py{27,36,310}-mode_mitogen-distro_debian{9,10,11}, py{27,36,310}-mode_mitogen-distro_ubuntu{1604,1804,2004}, report, -requires = - tox-factor [testenv] basepython =