Only use the selectors code path when we are prompt matching (#79931)

pull/80401/head
Matt Martz 2 years ago committed by GitHub
parent 460abe0cef
commit 6d1e355639
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,4 @@
bugfixes:
- AnsibleModule.run_command - Only use selectors when needed, and rely on Python
stdlib subprocess for the simple task of collecting stdout/stderr when prompt
matching is not required.

@ -2043,53 +2043,59 @@ class AnsibleModule(object):
# Select PollSelector which is supported by major platforms # Select PollSelector which is supported by major platforms
selector = selectors.PollSelector() selector = selectors.PollSelector()
selector.register(cmd.stdout, selectors.EVENT_READ)
selector.register(cmd.stderr, selectors.EVENT_READ)
if os.name == 'posix':
fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
if data: if data:
if not binary_data: if not binary_data:
data += '\n' data += '\n'
if isinstance(data, text_type): if isinstance(data, text_type):
data = to_bytes(data) data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
events = selector.select(1)
for key, event in events:
b_chunk = key.fileobj.read()
if b_chunk == b(''):
selector.unregister(key.fileobj)
if key.fileobj == cmd.stdout:
stdout += b_chunk
elif key.fileobj == cmd.stderr:
stderr += b_chunk
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not events or not selector.get_map()) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if no selectors are left
elif not selector.get_map() and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close() if not prompt_re:
cmd.stderr.close() stdout, stderr = cmd.communicate(input=data)
selector.close() else:
# We only need this to look for a prompt, to abort instead of hanging
selector.register(cmd.stdout, selectors.EVENT_READ)
selector.register(cmd.stderr, selectors.EVENT_READ)
if os.name == 'posix':
fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
if data:
cmd.stdin.write(data)
cmd.stdin.close()
while True:
events = selector.select(1)
for key, event in events:
b_chunk = key.fileobj.read()
if b_chunk == b(''):
selector.unregister(key.fileobj)
if key.fileobj == cmd.stdout:
stdout += b_chunk
elif key.fileobj == cmd.stderr:
stderr += b_chunk
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not events or not selector.get_map()) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if no selectors are left
elif not selector.get_map() and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
selector.close()
rc = cmd.returncode rc = cmd.returncode
except (OSError, IOError) as e: except (OSError, IOError) as e:

@ -147,7 +147,7 @@ class TestRunCommandArgs:
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))), for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin']) indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am): def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell) rc_am.run_command(cmd, use_unsafe_shell=shell, prompt_regex='i_dont_exist')
assert rc_am._subprocess.Popen.called assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, ) assert args == (expected, )
@ -163,17 +163,17 @@ class TestRunCommandArgs:
class TestRunCommandCwd: class TestRunCommandCwd:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd(self, mocker, rc_am): def test_cwd(self, mocker, rc_am):
rc_am.run_command('/bin/ls', cwd='/new') rc_am.run_command('/bin/ls', cwd='/new', prompt_regex='i_dont_exist')
assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/new' assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/new'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_relative_path(self, mocker, rc_am): def test_cwd_relative_path(self, mocker, rc_am):
rc_am.run_command('/bin/ls', cwd='sub-dir') rc_am.run_command('/bin/ls', cwd='sub-dir', prompt_regex='i_dont_exist')
assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/home/foo/sub-dir' assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/home/foo/sub-dir'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir(self, mocker, rc_am): def test_cwd_not_a_dir(self, mocker, rc_am):
rc_am.run_command('/bin/ls', cwd='/not-a-dir') rc_am.run_command('/bin/ls', cwd='/not-a-dir', prompt_regex='i_dont_exist')
assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/not-a-dir' assert rc_am._subprocess.Popen.mock_calls[0][2]['cwd'] == b'/not-a-dir'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
@ -212,14 +212,14 @@ class TestRunCommandRc:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am): def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1 rc_am._subprocess.Popen.return_value.returncode = 1
(rc, _, _) = rc_am.run_command('/bin/false', check_rc=False) (rc, _, _) = rc_am.run_command('/bin/false', check_rc=False, prompt_regex='i_dont_exist')
assert rc == 1 assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am): def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1 rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True) rc_am.run_command('/bin/false', check_rc=True, prompt_regex='i_dont_exist')
assert rc_am.fail_json.called assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1 assert kwargs['rc'] == 1
@ -228,7 +228,7 @@ class TestRunCommandRc:
class TestRunCommandOutput: class TestRunCommandOutput:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am): def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world') (rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world', prompt_regex='i_dont_exist')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n' assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin']) @pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
@ -237,7 +237,7 @@ class TestRunCommandOutput:
SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout), SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr: mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)} SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt') (rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt', prompt_regex='i_dont_exist')
assert rc == 0 assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns # module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings # bytes because it's returning native strings
@ -251,7 +251,7 @@ class TestRunCommandOutput:
mocker.sentinel.stderr: mocker.sentinel.stderr:
SpecialBytesIO(u'لرئيسية'.encode('utf-8'), SpecialBytesIO(u'لرئيسية'.encode('utf-8'),
fh=mocker.sentinel.stderr)} fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly') (rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly', prompt_regex='i_dont_exist')
assert rc == 0 assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns # module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings # bytes because it's returning native strings

Loading…
Cancel
Save